Filename | /usr/local/lib/perl/5.14.2/MongoDB/GridFS.pm |
Statements | Executed 16 statements in 1.16ms |
Calls | P | F | Exclusive Time |
Inclusive Time |
Subroutine |
---|---|---|---|---|---|
1 | 1 | 1 | 504µs | 11.9ms | BEGIN@26 | MongoDB::GridFS::
1 | 1 | 1 | 17µs | 17µs | BEGIN@27 | MongoDB::GridFS::
1 | 1 | 1 | 12µs | 3.31ms | BEGIN@25 | MongoDB::GridFS::
1 | 1 | 1 | 7µs | 23µs | BEGIN@28 | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | BUILD | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | _build_chunks | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | _build_files | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | _calc_md5 | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | _ensure_indexes | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | all | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | delete | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | drop | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | find_one | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | get | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | insert | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | put | MongoDB::GridFS::
0 | 0 | 0 | 0s | 0s | remove | MongoDB::GridFS::
Line | State ments |
Time on line |
Calls | Time in subs |
Code |
---|---|---|---|---|---|
1 | # | ||||
2 | # Copyright 2009-2013 MongoDB, Inc. | ||||
3 | # | ||||
4 | # Licensed under the Apache License, Version 2.0 (the "License"); | ||||
5 | # you may not use this file except in compliance with the License. | ||||
6 | # You may obtain a copy of the License at | ||||
7 | # | ||||
8 | # http://www.apache.org/licenses/LICENSE-2.0 | ||||
9 | # | ||||
10 | # Unless required by applicable law or agreed to in writing, software | ||||
11 | # distributed under the License is distributed on an "AS IS" BASIS, | ||||
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
13 | # See the License for the specific language governing permissions and | ||||
14 | # limitations under the License. | ||||
15 | # | ||||
16 | |||||
17 | package MongoDB::GridFS; | ||||
18 | { | ||||
19 | 2 | 1µs | $MongoDB::GridFS::VERSION = '0.702.2'; | ||
20 | } | ||||
21 | |||||
22 | |||||
23 | # ABSTRACT: A file storage utility | ||||
24 | |||||
25 | 2 | 33µs | 2 | 6.60ms | # spent 3.31ms (12µs+3.29) within MongoDB::GridFS::BEGIN@25 which was called:
# once (12µs+3.29ms) by MongoDB::Database::BEGIN@26 at line 25 # spent 3.31ms making 1 call to MongoDB::GridFS::BEGIN@25
# spent 3.29ms making 1 call to Moose::Exporter::__ANON__[Moose/Exporter.pm:519] |
26 | 2 | 127µs | 1 | 11.9ms | # spent 11.9ms (504µs+11.4) within MongoDB::GridFS::BEGIN@26 which was called:
# once (504µs+11.4ms) by MongoDB::Database::BEGIN@26 at line 26 # spent 11.9ms making 1 call to MongoDB::GridFS::BEGIN@26 |
27 | 2 | 36µs | 1 | 17µs | # spent 17µs within MongoDB::GridFS::BEGIN@27 which was called:
# once (17µs+0s) by MongoDB::Database::BEGIN@26 at line 27 # spent 17µs making 1 call to MongoDB::GridFS::BEGIN@27 |
28 | 2 | 942µs | 2 | 38µs | # spent 23µs (7+16) within MongoDB::GridFS::BEGIN@28 which was called:
# once (7µs+16µs) by MongoDB::Database::BEGIN@26 at line 28 # spent 23µs making 1 call to MongoDB::GridFS::BEGIN@28
# spent 16µs making 1 call to Exporter::import |
29 | |||||
30 | |||||
31 | 1 | 300ns | $MongoDB::GridFS::chunk_size = 1048576; | ||
32 | |||||
33 | 1 | 2µs | 1 | 1.48ms | has _database => ( # spent 1.48ms making 1 call to Moose::has |
34 | is => 'ro', | ||||
35 | isa => 'MongoDB::Database', | ||||
36 | required => 1, | ||||
37 | ); | ||||
38 | |||||
39 | |||||
40 | 1 | 2µs | 1 | 5.91ms | has prefix => ( # spent 5.91ms making 1 call to Moose::has |
41 | is => 'ro', | ||||
42 | isa => 'Str', | ||||
43 | default => 'fs' | ||||
44 | ); | ||||
45 | |||||
46 | |||||
47 | 1 | 2µs | 1 | 4.14ms | has files => ( # spent 4.14ms making 1 call to Moose::has |
48 | is => 'ro', | ||||
49 | isa => 'MongoDB::Collection', | ||||
50 | lazy_build => 1 | ||||
51 | ); | ||||
52 | |||||
53 | sub _build_files { | ||||
54 | my $self = shift; | ||||
55 | my $coll = $self->_database->get_collection($self->prefix . '.files'); | ||||
56 | return $coll; | ||||
57 | } | ||||
58 | |||||
59 | |||||
60 | 1 | 1µs | 1 | 3.46ms | has chunks => ( # spent 3.46ms making 1 call to Moose::has |
61 | is => 'ro', | ||||
62 | isa => 'MongoDB::Collection', | ||||
63 | lazy_build => 1 | ||||
64 | ); | ||||
65 | |||||
66 | sub _build_chunks { | ||||
67 | my $self = shift; | ||||
68 | my $coll = $self->_database->get_collection($self->prefix . '.chunks'); | ||||
69 | return $coll; | ||||
70 | } | ||||
71 | |||||
72 | # This checks if the required indexes for GridFS exist in for the current database. | ||||
73 | # If they are not found, they will be created. | ||||
74 | sub BUILD { | ||||
75 | my ($self) = @_; | ||||
76 | |||||
77 | # check for the required indexs in the system.indexes colleciton | ||||
78 | my $count = $self->_database->get_collection('system.indexes')->count({key=>{filename => 1}}); | ||||
79 | $count += $self->_database->get_collection('system.indexes')->count({key=>{files_id => 1, n => 1}}); | ||||
80 | |||||
81 | # if we dont have the required indexes, create them now. | ||||
82 | if ($count < 2){ | ||||
83 | $self->_ensure_indexes(); | ||||
84 | } | ||||
85 | } | ||||
86 | |||||
87 | |||||
88 | sub _ensure_indexes { | ||||
89 | my ($self) = @_; | ||||
90 | |||||
91 | # ensure the necessary index is present (this may be first usage) | ||||
92 | $self->files->ensure_index(Tie::IxHash->new(filename => 1), {"safe" => 1}); | ||||
93 | $self->chunks->ensure_index(Tie::IxHash->new(files_id => 1, n => 1), {"safe" => 1, "unique" => 1}); | ||||
94 | } | ||||
95 | |||||
96 | |||||
97 | sub get { | ||||
98 | my ($self, $id) = @_; | ||||
99 | |||||
100 | return $self->find_one({_id => $id}); | ||||
101 | } | ||||
102 | |||||
103 | |||||
104 | sub put { | ||||
105 | my ($self, $fh, $metadata) = @_; | ||||
106 | |||||
107 | return $self->insert($fh, $metadata, {safe => 1}); | ||||
108 | } | ||||
109 | |||||
110 | |||||
111 | sub delete { | ||||
112 | my ($self, $id) = @_; | ||||
113 | |||||
114 | $self->remove({_id => $id}, {safe => 1}); | ||||
115 | } | ||||
116 | |||||
117 | |||||
118 | sub find_one { | ||||
119 | my ($self, $criteria, $fields) = @_; | ||||
120 | |||||
121 | my $file = $self->files->find_one($criteria, $fields); | ||||
122 | return undef unless $file; | ||||
123 | return MongoDB::GridFS::File->new({_grid => $self,info => $file}); | ||||
124 | } | ||||
125 | |||||
126 | |||||
127 | sub remove { | ||||
128 | my ($self, $criteria, $options) = @_; | ||||
129 | |||||
130 | my $just_one = 0; | ||||
131 | my $safe = 0; | ||||
132 | |||||
133 | if (defined $options) { | ||||
134 | if (ref $options eq 'HASH') { | ||||
135 | $just_one = $options->{just_one} && 1; | ||||
136 | $safe = $options->{safe} && 1; | ||||
137 | } | ||||
138 | elsif ($options) { | ||||
139 | $just_one = $options && 1; | ||||
140 | } | ||||
141 | } | ||||
142 | |||||
143 | if ($just_one) { | ||||
144 | my $meta = $self->files->find_one($criteria); | ||||
145 | $self->chunks->remove({"files_id" => $meta->{'_id'}}, {safe => $safe}); | ||||
146 | $self->files->remove({"_id" => $meta->{'_id'}}, {safe => $safe}); | ||||
147 | } | ||||
148 | else { | ||||
149 | my $cursor = $self->files->query($criteria); | ||||
150 | while (my $meta = $cursor->next) { | ||||
151 | $self->chunks->remove({"files_id" => $meta->{'_id'}}, {safe => $safe}); | ||||
152 | } | ||||
153 | $self->files->remove($criteria, {safe => $safe}); | ||||
154 | } | ||||
155 | } | ||||
156 | |||||
- - | |||||
159 | sub insert { | ||||
160 | my ($self, $fh, $metadata, $options) = @_; | ||||
161 | $options ||= {}; | ||||
162 | |||||
163 | confess "not a file handle" unless $fh; | ||||
164 | $metadata = {} unless $metadata && ref $metadata eq 'HASH'; | ||||
165 | |||||
166 | my $start_pos = $fh->getpos(); | ||||
167 | |||||
168 | my $id; | ||||
169 | if (exists $metadata->{"_id"}) { | ||||
170 | $id = $metadata->{"_id"}; | ||||
171 | } | ||||
172 | else { | ||||
173 | $id = MongoDB::OID->new; | ||||
174 | } | ||||
175 | |||||
176 | my $n = 0; | ||||
177 | my $length = 0; | ||||
178 | while ((my $len = $fh->read(my $data, $MongoDB::GridFS::chunk_size)) != 0) { | ||||
179 | $self->chunks->insert({"files_id" => $id, | ||||
180 | "n" => $n, | ||||
181 | "data" => bless(\$data)}, $options); | ||||
182 | $n++; | ||||
183 | $length += $len; | ||||
184 | } | ||||
185 | $fh->setpos($start_pos); | ||||
186 | |||||
187 | # get an md5 hash for the file. set the retry flag to 'true' incase the | ||||
188 | # database, collection, or indexes are missing. That way we can recreate them | ||||
189 | # retry the md5 calc. | ||||
190 | my $result = $self->_calc_md5($id, $self->prefix, 1); | ||||
191 | |||||
192 | # compare the md5 hashes | ||||
193 | if ($options->{safe}) { | ||||
194 | my $md5 = Digest::MD5->new; | ||||
195 | $md5->addfile($fh); | ||||
196 | my $digest = $md5->hexdigest; | ||||
197 | if ($digest ne $result->{md5}) { | ||||
198 | # cleanup and die | ||||
199 | $self->chunks->remove({files_id => $id}); | ||||
200 | die "md5 hashes don't match: database got $result->{md5}, fs got $digest"; | ||||
201 | } | ||||
202 | } | ||||
203 | |||||
204 | my %copy = %{$metadata}; | ||||
205 | $copy{"_id"} = $id; | ||||
206 | $copy{"md5"} = $result->{"md5"}; | ||||
207 | $copy{"chunkSize"} = $MongoDB::GridFS::chunk_size; | ||||
208 | $copy{"uploadDate"} = DateTime->now; | ||||
209 | $copy{"length"} = $length; | ||||
210 | return $self->files->insert(\%copy, $options); | ||||
211 | } | ||||
212 | |||||
213 | # Calculates the md5 of the file on the server | ||||
214 | # $id : reference to the object we want to hash | ||||
215 | # $root : the namespace the file resides in | ||||
216 | # $retry : a flag which controls whether or not to retry the md5 calc. | ||||
217 | # (which is currently only if we are missing our indexes) | ||||
218 | sub _calc_md5 { | ||||
219 | my ($self, $id, $root, $retry) = @_; | ||||
220 | |||||
221 | # Try to get an md5 hash for the file | ||||
222 | my $result = $self->_database->run_command(["filemd5", $id, "root" => $self->prefix]); | ||||
223 | |||||
224 | # If we didn't get a hash back, it means something is wrong (probably to do with gridfs's | ||||
225 | # indexes because its currently the only error that is thown from the md5 class) | ||||
226 | if (ref($result) ne 'HASH') { | ||||
227 | # Yep, indexes are missing. If we have the $retry flag, lets create them calc the md5 again | ||||
228 | # but we wont pass set the $retry flag again. we dont want an infinate loop for any reason. | ||||
229 | if ($retry == 1 && $result eq 'need an index on { files_id : 1 , n : 1 }'){ | ||||
230 | $self->_ensure_indexes(); | ||||
231 | $result = $self->_calc_md5($id, $root, 0); | ||||
232 | } | ||||
233 | # Well, something bad is happening, so lets clean up and die. | ||||
234 | else{ | ||||
235 | $self->chunks->remove({files_id => $id}); | ||||
236 | die "recieve an unexpected error from the server: $result"; | ||||
237 | } | ||||
238 | } | ||||
239 | |||||
240 | return $result; | ||||
241 | } | ||||
242 | |||||
- - | |||||
245 | sub drop { | ||||
246 | my ($self) = @_; | ||||
247 | |||||
248 | $self->files->drop; | ||||
249 | $self->chunks->drop; | ||||
250 | } | ||||
251 | |||||
252 | |||||
253 | sub all { | ||||
254 | my ($self) = @_; | ||||
255 | my @ret; | ||||
256 | |||||
257 | my $cursor = $self->files->query; | ||||
258 | while (my $meta = $cursor->next) { | ||||
259 | push @ret, MongoDB::GridFS::File->new( | ||||
260 | _grid => $self, | ||||
261 | info => $meta); | ||||
262 | } | ||||
263 | return @ret; | ||||
264 | } | ||||
265 | |||||
266 | 1 | 8µs | 1; | ||
267 | |||||
268 | __END__ |