← Index
NYTProf Performance Profile   « block view • line view • sub view »
For mentat.storage.mongo.pl
  Run on Tue Jun 24 09:58:41 2014
Reported on Tue Jun 24 09:59:13 2014

Filename/usr/local/lib/perl/5.14.2/MongoDB/GridFS.pm
StatementsExecuted 16 statements in 1.16ms
Subroutines
Calls P F Exclusive
Time
Inclusive
Time
Subroutine
111504µs11.9msMongoDB::GridFS::::BEGIN@26MongoDB::GridFS::BEGIN@26
11117µs17µsMongoDB::GridFS::::BEGIN@27MongoDB::GridFS::BEGIN@27
11112µs3.31msMongoDB::GridFS::::BEGIN@25MongoDB::GridFS::BEGIN@25
1117µs23µsMongoDB::GridFS::::BEGIN@28MongoDB::GridFS::BEGIN@28
0000s0sMongoDB::GridFS::::BUILDMongoDB::GridFS::BUILD
0000s0sMongoDB::GridFS::::_build_chunksMongoDB::GridFS::_build_chunks
0000s0sMongoDB::GridFS::::_build_filesMongoDB::GridFS::_build_files
0000s0sMongoDB::GridFS::::_calc_md5MongoDB::GridFS::_calc_md5
0000s0sMongoDB::GridFS::::_ensure_indexesMongoDB::GridFS::_ensure_indexes
0000s0sMongoDB::GridFS::::allMongoDB::GridFS::all
0000s0sMongoDB::GridFS::::deleteMongoDB::GridFS::delete
0000s0sMongoDB::GridFS::::dropMongoDB::GridFS::drop
0000s0sMongoDB::GridFS::::find_oneMongoDB::GridFS::find_one
0000s0sMongoDB::GridFS::::getMongoDB::GridFS::get
0000s0sMongoDB::GridFS::::insertMongoDB::GridFS::insert
0000s0sMongoDB::GridFS::::putMongoDB::GridFS::put
0000s0sMongoDB::GridFS::::removeMongoDB::GridFS::remove
Call graph for these subroutines as a Graphviz dot language file.
Line State
ments
Time
on line
Calls Time
in subs
Code
1#
2# Copyright 2009-2013 MongoDB, Inc.
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8# http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17package MongoDB::GridFS;
18{
1921µs $MongoDB::GridFS::VERSION = '0.702.2';
20}
21
22
23# ABSTRACT: A file storage utility
24
25233µs26.60ms
# spent 3.31ms (12µs+3.29) within MongoDB::GridFS::BEGIN@25 which was called: # once (12µs+3.29ms) by MongoDB::Database::BEGIN@26 at line 25
use Moose;
# spent 3.31ms making 1 call to MongoDB::GridFS::BEGIN@25 # spent 3.29ms making 1 call to Moose::Exporter::__ANON__[Moose/Exporter.pm:519]
262127µs111.9ms
# spent 11.9ms (504µs+11.4) within MongoDB::GridFS::BEGIN@26 which was called: # once (504µs+11.4ms) by MongoDB::Database::BEGIN@26 at line 26
use MongoDB::GridFS::File;
# spent 11.9ms making 1 call to MongoDB::GridFS::BEGIN@26
27236µs117µs
# spent 17µs within MongoDB::GridFS::BEGIN@27 which was called: # once (17µs+0s) by MongoDB::Database::BEGIN@26 at line 27
use DateTime;
# spent 17µs making 1 call to MongoDB::GridFS::BEGIN@27
282942µs238µs
# spent 23µs (7+16) within MongoDB::GridFS::BEGIN@28 which was called: # once (7µs+16µs) by MongoDB::Database::BEGIN@26 at line 28
use Digest::MD5;
# spent 23µs making 1 call to MongoDB::GridFS::BEGIN@28 # spent 16µs making 1 call to Exporter::import
29
30
311300ns$MongoDB::GridFS::chunk_size = 1048576;
32
3312µs11.48mshas _database => (
# spent 1.48ms making 1 call to Moose::has
34 is => 'ro',
35 isa => 'MongoDB::Database',
36 required => 1,
37);
38
39
4012µs15.91mshas prefix => (
# spent 5.91ms making 1 call to Moose::has
41 is => 'ro',
42 isa => 'Str',
43 default => 'fs'
44);
45
46
4712µs14.14mshas files => (
# spent 4.14ms making 1 call to Moose::has
48 is => 'ro',
49 isa => 'MongoDB::Collection',
50 lazy_build => 1
51);
52
53sub _build_files {
54 my $self = shift;
55 my $coll = $self->_database->get_collection($self->prefix . '.files');
56 return $coll;
57}
58
59
6011µs13.46mshas chunks => (
# spent 3.46ms making 1 call to Moose::has
61 is => 'ro',
62 isa => 'MongoDB::Collection',
63 lazy_build => 1
64);
65
66sub _build_chunks {
67 my $self = shift;
68 my $coll = $self->_database->get_collection($self->prefix . '.chunks');
69 return $coll;
70}
71
72# This checks if the required indexes for GridFS exist in for the current database.
73# If they are not found, they will be created.
74sub BUILD {
75 my ($self) = @_;
76
77 # check for the required indexs in the system.indexes colleciton
78 my $count = $self->_database->get_collection('system.indexes')->count({key=>{filename => 1}});
79 $count += $self->_database->get_collection('system.indexes')->count({key=>{files_id => 1, n => 1}});
80
81 # if we dont have the required indexes, create them now.
82 if ($count < 2){
83 $self->_ensure_indexes();
84 }
85}
86
87
88sub _ensure_indexes {
89 my ($self) = @_;
90
91 # ensure the necessary index is present (this may be first usage)
92 $self->files->ensure_index(Tie::IxHash->new(filename => 1), {"safe" => 1});
93 $self->chunks->ensure_index(Tie::IxHash->new(files_id => 1, n => 1), {"safe" => 1, "unique" => 1});
94}
95
96
97sub get {
98 my ($self, $id) = @_;
99
100 return $self->find_one({_id => $id});
101}
102
103
104sub put {
105 my ($self, $fh, $metadata) = @_;
106
107 return $self->insert($fh, $metadata, {safe => 1});
108}
109
110
111sub delete {
112 my ($self, $id) = @_;
113
114 $self->remove({_id => $id}, {safe => 1});
115}
116
117
118sub find_one {
119 my ($self, $criteria, $fields) = @_;
120
121 my $file = $self->files->find_one($criteria, $fields);
122 return undef unless $file;
123 return MongoDB::GridFS::File->new({_grid => $self,info => $file});
124}
125
126
127sub remove {
128 my ($self, $criteria, $options) = @_;
129
130 my $just_one = 0;
131 my $safe = 0;
132
133 if (defined $options) {
134 if (ref $options eq 'HASH') {
135 $just_one = $options->{just_one} && 1;
136 $safe = $options->{safe} && 1;
137 }
138 elsif ($options) {
139 $just_one = $options && 1;
140 }
141 }
142
143 if ($just_one) {
144 my $meta = $self->files->find_one($criteria);
145 $self->chunks->remove({"files_id" => $meta->{'_id'}}, {safe => $safe});
146 $self->files->remove({"_id" => $meta->{'_id'}}, {safe => $safe});
147 }
148 else {
149 my $cursor = $self->files->query($criteria);
150 while (my $meta = $cursor->next) {
151 $self->chunks->remove({"files_id" => $meta->{'_id'}}, {safe => $safe});
152 }
153 $self->files->remove($criteria, {safe => $safe});
154 }
155}
156
- -
159sub insert {
160 my ($self, $fh, $metadata, $options) = @_;
161 $options ||= {};
162
163 confess "not a file handle" unless $fh;
164 $metadata = {} unless $metadata && ref $metadata eq 'HASH';
165
166 my $start_pos = $fh->getpos();
167
168 my $id;
169 if (exists $metadata->{"_id"}) {
170 $id = $metadata->{"_id"};
171 }
172 else {
173 $id = MongoDB::OID->new;
174 }
175
176 my $n = 0;
177 my $length = 0;
178 while ((my $len = $fh->read(my $data, $MongoDB::GridFS::chunk_size)) != 0) {
179 $self->chunks->insert({"files_id" => $id,
180 "n" => $n,
181 "data" => bless(\$data)}, $options);
182 $n++;
183 $length += $len;
184 }
185 $fh->setpos($start_pos);
186
187 # get an md5 hash for the file. set the retry flag to 'true' incase the
188 # database, collection, or indexes are missing. That way we can recreate them
189 # retry the md5 calc.
190 my $result = $self->_calc_md5($id, $self->prefix, 1);
191
192 # compare the md5 hashes
193 if ($options->{safe}) {
194 my $md5 = Digest::MD5->new;
195 $md5->addfile($fh);
196 my $digest = $md5->hexdigest;
197 if ($digest ne $result->{md5}) {
198 # cleanup and die
199 $self->chunks->remove({files_id => $id});
200 die "md5 hashes don't match: database got $result->{md5}, fs got $digest";
201 }
202 }
203
204 my %copy = %{$metadata};
205 $copy{"_id"} = $id;
206 $copy{"md5"} = $result->{"md5"};
207 $copy{"chunkSize"} = $MongoDB::GridFS::chunk_size;
208 $copy{"uploadDate"} = DateTime->now;
209 $copy{"length"} = $length;
210 return $self->files->insert(\%copy, $options);
211}
212
213# Calculates the md5 of the file on the server
214# $id : reference to the object we want to hash
215# $root : the namespace the file resides in
216# $retry : a flag which controls whether or not to retry the md5 calc.
217# (which is currently only if we are missing our indexes)
218sub _calc_md5 {
219 my ($self, $id, $root, $retry) = @_;
220
221 # Try to get an md5 hash for the file
222 my $result = $self->_database->run_command(["filemd5", $id, "root" => $self->prefix]);
223
224 # If we didn't get a hash back, it means something is wrong (probably to do with gridfs's
225 # indexes because its currently the only error that is thown from the md5 class)
226 if (ref($result) ne 'HASH') {
227 # Yep, indexes are missing. If we have the $retry flag, lets create them calc the md5 again
228 # but we wont pass set the $retry flag again. we dont want an infinate loop for any reason.
229 if ($retry == 1 && $result eq 'need an index on { files_id : 1 , n : 1 }'){
230 $self->_ensure_indexes();
231 $result = $self->_calc_md5($id, $root, 0);
232 }
233 # Well, something bad is happening, so lets clean up and die.
234 else{
235 $self->chunks->remove({files_id => $id});
236 die "recieve an unexpected error from the server: $result";
237 }
238 }
239
240 return $result;
241}
242
- -
245sub drop {
246 my ($self) = @_;
247
248 $self->files->drop;
249 $self->chunks->drop;
250}
251
252
253sub all {
254 my ($self) = @_;
255 my @ret;
256
257 my $cursor = $self->files->query;
258 while (my $meta = $cursor->next) {
259 push @ret, MongoDB::GridFS::File->new(
260 _grid => $self,
261 info => $meta);
262 }
263 return @ret;
264}
265
26618µs1;
267
268__END__