3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, NoSuchChildError, \
11 IDeepCheckResults, IDeepCheckAndRepairResults
12 from allmydata.util import hashutil
13 from allmydata.monitor import Monitor
14 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
15 FakeDirectoryNode, create_chk_filenode, ErrorMixin
16 from allmydata.checker_results import CheckerResults, CheckAndRepairResults
17 import common_util as testutil
19 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
20 # contain pointers to fake files. We start with a fake MutableFileNode that
21 # stores all of its data in a static table.
24 implements(IFileNode, IMutableFileNode) # sure, why not
25 def __init__(self, nodeuri):
26 if not isinstance(nodeuri, str):
27 nodeuri = nodeuri.to_string()
28 self.nodeuri = nodeuri
29 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
30 self.storage_index = si
31 fp = hashutil.tagged_hash("tag2", nodeuri)
32 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
35 def get_readonly_uri(self):
37 def get_verifier(self):
38 return self.verifieruri
39 def get_storage_index(self):
40 return self.storage_index
42 def check(self, monitor, verify=False):
43 r = CheckerResults("", None)
45 r.set_recoverable(True)
46 return defer.succeed(r)
48 def check_and_repair(self, monitor, verify=False):
49 d = self.check(verify)
51 r = CheckAndRepairResults(None)
52 r.pre_repair_results = r.post_repair_results = cr
57 # dirnode requires three methods from the client: upload(),
58 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
59 # only used by the convenience composite method add_file().
64 def upload(self, uploadable):
65 d = uploadable.get_size()
66 d.addCallback(lambda size: uploadable.read(size))
69 n = create_chk_filenode(self, data)
70 results = upload.UploadResults()
71 results.uri = n.get_uri()
73 d.addCallback(_got_data)
76 def create_node_from_uri(self, u):
78 if (INewDirectoryURI.providedBy(u)
79 or IReadonlyNewDirectoryURI.providedBy(u)):
80 return FakeDirectoryNode(self).init_from_uri(u)
81 return Marker(u.to_string())
83 def create_empty_dirnode(self):
84 n = FakeDirectoryNode(self)
86 d.addCallback(lambda res: n)
90 class Dirnode(unittest.TestCase,
91 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
93 self.client = FakeClient()
96 d = self.client.create_empty_dirnode()
98 self.failUnless(isinstance(res, FakeDirectoryNode))
100 self.failUnless("RW" in rep)
104 def test_corrupt(self):
105 d = self.client.create_empty_dirnode()
107 u = make_mutable_file_uri()
108 d = dn.set_uri(u"child", u, {})
109 d.addCallback(lambda res: dn.list())
110 def _check1(children):
111 self.failUnless(u"child" in children)
112 d.addCallback(_check1)
113 d.addCallback(lambda res:
114 self.shouldFail(NoSuchChildError, "get bogus", None,
118 si = IURI(filenode.get_uri()).storage_index
119 old_contents = filenode.all_contents[si]
120 # we happen to know that the writecap is encrypted near the
121 # end of the string. Flip one of its bits and make sure we
122 # detect the corruption.
123 new_contents = testutil.flip_bit(old_contents, -10)
124 # TODO: also test flipping bits in the other portions
125 filenode.all_contents[si] = new_contents
126 d.addCallback(_corrupt)
128 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
129 "HMAC does not match, crypttext is corrupted",
131 d.addCallback(_check2)
133 d.addCallback(_created)
136 def test_check(self):
137 d = self.client.create_empty_dirnode()
138 d.addCallback(lambda dn: dn.check(Monitor()))
140 self.failUnless(res.is_healthy())
144 def _test_deepcheck_create(self):
145 # create a small tree with a loop, and some non-directories
149 # root/subdir/link -> root
150 d = self.client.create_empty_dirnode()
151 def _created_root(rootnode):
152 self._rootnode = rootnode
153 return rootnode.create_empty_directory(u"subdir")
154 d.addCallback(_created_root)
155 def _created_subdir(subdir):
156 self._subdir = subdir
157 d = subdir.add_file(u"file1", upload.Data("data", None))
158 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
160 d.addCallback(_created_subdir)
162 return self._rootnode
166 def test_deepcheck(self):
167 d = self._test_deepcheck_create()
168 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
169 def _check_results(r):
170 self.failUnless(IDeepCheckResults.providedBy(r))
172 self.failUnlessEqual(c,
173 {"count-objects-checked": 3,
174 "count-objects-healthy": 3,
175 "count-objects-unhealthy": 0,
176 "count-objects-unrecoverable": 0,
177 "count-corrupt-shares": 0,
179 self.failIf(r.get_corrupt_shares())
180 self.failUnlessEqual(len(r.get_all_results()), 3)
181 d.addCallback(_check_results)
184 def test_deepcheck_and_repair(self):
185 d = self._test_deepcheck_create()
186 d.addCallback(lambda rootnode:
187 rootnode.start_deep_check_and_repair().when_done())
188 def _check_results(r):
189 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
191 self.failUnlessEqual(c,
192 {"count-objects-checked": 3,
193 "count-objects-healthy-pre-repair": 3,
194 "count-objects-unhealthy-pre-repair": 0,
195 "count-objects-unrecoverable-pre-repair": 0,
196 "count-corrupt-shares-pre-repair": 0,
197 "count-objects-healthy-post-repair": 3,
198 "count-objects-unhealthy-post-repair": 0,
199 "count-objects-unrecoverable-post-repair": 0,
200 "count-corrupt-shares-post-repair": 0,
201 "count-repairs-attempted": 0,
202 "count-repairs-successful": 0,
203 "count-repairs-unsuccessful": 0,
205 self.failIf(r.get_corrupt_shares())
206 self.failIf(r.get_remaining_corrupt_shares())
207 self.failUnlessEqual(len(r.get_all_results()), 3)
208 d.addCallback(_check_results)
211 def _mark_file_bad(self, rootnode):
212 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
213 rootnode._node.bad_shares[si] = "unhealthy"
216 def test_deepcheck_problems(self):
217 d = self._test_deepcheck_create()
218 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
219 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
220 def _check_results(r):
222 self.failUnlessEqual(c,
223 {"count-objects-checked": 3,
224 "count-objects-healthy": 2,
225 "count-objects-unhealthy": 1,
226 "count-objects-unrecoverable": 0,
227 "count-corrupt-shares": 0,
229 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
230 d.addCallback(_check_results)
233 def test_readonly(self):
234 fileuri = make_chk_file_uri(1234)
235 filenode = self.client.create_node_from_uri(fileuri)
236 uploadable = upload.Data("some data", convergence="some convergence string")
238 d = self.client.create_empty_dirnode()
240 d2 = rw_dn.set_uri(u"child", fileuri)
241 d2.addCallback(lambda res: rw_dn)
243 d.addCallback(_created)
246 ro_uri = rw_dn.get_readonly_uri()
247 ro_dn = self.client.create_node_from_uri(ro_uri)
248 self.failUnless(ro_dn.is_readonly())
249 self.failUnless(ro_dn.is_mutable())
251 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
252 ro_dn.set_uri, u"newchild", fileuri)
253 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
254 ro_dn.set_node, u"newchild", filenode)
255 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
256 ro_dn.set_nodes, [ (u"newchild", filenode) ])
257 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
258 ro_dn.add_file, u"newchild", uploadable)
259 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
260 ro_dn.delete, u"child")
261 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
262 ro_dn.create_empty_directory, u"newchild")
263 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
264 ro_dn.set_metadata_for, u"child", {})
265 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
266 ro_dn.move_child_to, u"child", rw_dn)
267 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
268 rw_dn.move_child_to, u"child", ro_dn)
270 d.addCallback(_ready)
271 def _listed(children):
272 self.failUnless(u"child" in children)
273 d.addCallback(_listed)
276 def failUnlessGreaterThan(self, a, b):
277 self.failUnless(a > b, "%r should be > %r" % (a, b))
279 def failUnlessGreaterOrEqualThan(self, a, b):
280 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
282 def test_create(self):
283 self.expected_manifest = []
285 d = self.client.create_empty_dirnode()
288 self.failUnless(n.is_mutable())
291 self.failUnless(u.startswith("URI:DIR2:"), u)
292 u_ro = n.get_readonly_uri()
293 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
294 u_v = n.get_verifier().to_string()
295 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
296 self.expected_manifest.append( ((), u) )
297 expected_si = n._uri._filenode_uri.storage_index
298 self.failUnlessEqual(n.get_storage_index(), expected_si)
301 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
302 d.addCallback(lambda res: n.has_child(u"missing"))
303 d.addCallback(lambda res: self.failIf(res))
304 fake_file_uri = make_mutable_file_uri()
305 other_file_uri = make_mutable_file_uri()
306 m = Marker(fake_file_uri)
307 ffu_v = m.get_verifier().to_string()
308 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
309 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
310 d.addCallback(lambda res:
311 self.shouldFail(ExistingChildError, "set_uri-no",
312 "child 'child' already exists",
313 n.set_uri, u"child", other_file_uri,
318 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
322 # /subdir = directory
323 def _created(subdir):
324 self.failUnless(isinstance(subdir, FakeDirectoryNode))
326 new_v = subdir.get_verifier().to_string()
327 assert isinstance(new_v, str)
328 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
329 d.addCallback(_created)
331 d.addCallback(lambda res:
332 self.shouldFail(ExistingChildError, "mkdir-no",
333 "child 'subdir' already exists",
334 n.create_empty_directory, u"subdir",
337 d.addCallback(lambda res: n.list())
338 d.addCallback(lambda children:
339 self.failUnlessEqual(sorted(children.keys()),
340 sorted([u"child", u"subdir"])))
342 d.addCallback(lambda res: n.start_deep_stats().when_done())
343 def _check_deepstats(stats):
344 self.failUnless(isinstance(stats, dict))
345 expected = {"count-immutable-files": 0,
346 "count-mutable-files": 1,
347 "count-literal-files": 0,
349 "count-directories": 2,
350 "size-immutable-files": 0,
351 "size-literal-files": 0,
352 #"size-directories": 616, # varies
353 #"largest-directory": 616,
354 "largest-directory-children": 2,
355 "largest-immutable-file": 0,
357 for k,v in expected.iteritems():
358 self.failUnlessEqual(stats[k], v,
359 "stats[%s] was %s, not %s" %
361 self.failUnless(stats["size-directories"] > 500,
362 stats["size-directories"])
363 self.failUnless(stats["largest-directory"] > 500,
364 stats["largest-directory"])
365 self.failUnlessEqual(stats["size-files-histogram"], [])
366 d.addCallback(_check_deepstats)
368 d.addCallback(lambda res: n.build_manifest().when_done())
369 def _check_manifest(res):
370 manifest = res["manifest"]
371 self.failUnlessEqual(sorted(manifest),
372 sorted(self.expected_manifest))
374 _check_deepstats(stats)
375 d.addCallback(_check_manifest)
377 def _add_subsubdir(res):
378 return self.subdir.create_empty_directory(u"subsubdir")
379 d.addCallback(_add_subsubdir)
382 # /subdir = directory
383 # /subdir/subsubdir = directory
384 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
385 d.addCallback(lambda subsubdir:
386 self.failUnless(isinstance(subsubdir,
388 d.addCallback(lambda res: n.get_child_at_path(u""))
389 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
392 d.addCallback(lambda res: n.get_metadata_for(u"child"))
393 d.addCallback(lambda metadata:
394 self.failUnlessEqual(sorted(metadata.keys()),
397 d.addCallback(lambda res:
398 self.shouldFail(NoSuchChildError, "gcamap-no",
400 n.get_child_and_metadata_at_path,
402 d.addCallback(lambda res:
403 n.get_child_and_metadata_at_path(u""))
404 def _check_child_and_metadata1(res):
405 child, metadata = res
406 self.failUnless(isinstance(child, FakeDirectoryNode))
407 # edge-metadata needs at least one path segment
408 self.failUnlessEqual(sorted(metadata.keys()), [])
409 d.addCallback(_check_child_and_metadata1)
410 d.addCallback(lambda res:
411 n.get_child_and_metadata_at_path(u"child"))
413 def _check_child_and_metadata2(res):
414 child, metadata = res
415 self.failUnlessEqual(child.get_uri(),
416 fake_file_uri.to_string())
417 self.failUnlessEqual(sorted(metadata.keys()),
419 d.addCallback(_check_child_and_metadata2)
421 d.addCallback(lambda res:
422 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
423 def _check_child_and_metadata3(res):
424 child, metadata = res
425 self.failUnless(isinstance(child, FakeDirectoryNode))
426 self.failUnlessEqual(sorted(metadata.keys()),
428 d.addCallback(_check_child_and_metadata3)
431 # it should be possible to add a child without any metadata
432 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
433 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
434 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
436 # if we don't set any defaults, the child should get timestamps
437 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
438 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
439 d.addCallback(lambda metadata:
440 self.failUnlessEqual(sorted(metadata.keys()),
443 # or we can add specific metadata at set_uri() time, which
444 # overrides the timestamps
445 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
447 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
448 d.addCallback(lambda metadata:
449 self.failUnlessEqual(metadata, {"key": "value"}))
451 d.addCallback(lambda res: n.delete(u"c2"))
452 d.addCallback(lambda res: n.delete(u"c3"))
453 d.addCallback(lambda res: n.delete(u"c4"))
455 # set_node + metadata
456 # it should be possible to add a child without any metadata
457 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
458 d.addCallback(lambda res: self.client.create_empty_dirnode())
459 d.addCallback(lambda n2:
460 self.shouldFail(ExistingChildError, "set_node-no",
461 "child 'd2' already exists",
462 n.set_node, u"d2", n2,
464 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
465 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
467 # if we don't set any defaults, the child should get timestamps
468 d.addCallback(lambda res: n.set_node(u"d3", n))
469 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
470 d.addCallback(lambda metadata:
471 self.failUnlessEqual(sorted(metadata.keys()),
474 # or we can add specific metadata at set_node() time, which
475 # overrides the timestamps
476 d.addCallback(lambda res: n.set_node(u"d4", n,
478 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
479 d.addCallback(lambda metadata:
480 self.failUnlessEqual(metadata, {"key": "value"}))
482 d.addCallback(lambda res: n.delete(u"d2"))
483 d.addCallback(lambda res: n.delete(u"d3"))
484 d.addCallback(lambda res: n.delete(u"d4"))
486 # metadata through set_children()
487 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
488 (u"e2", fake_file_uri, {}),
489 (u"e3", fake_file_uri,
492 d.addCallback(lambda res:
493 self.shouldFail(ExistingChildError, "set_children-no",
494 "child 'e1' already exists",
496 [ (u"e1", other_file_uri),
497 (u"new", other_file_uri), ],
499 # and 'new' should not have been created
500 d.addCallback(lambda res: n.list())
501 d.addCallback(lambda children: self.failIf(u"new" in children))
502 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
503 d.addCallback(lambda metadata:
504 self.failUnlessEqual(sorted(metadata.keys()),
506 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
507 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
508 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
509 d.addCallback(lambda metadata:
510 self.failUnlessEqual(metadata, {"key": "value"}))
512 d.addCallback(lambda res: n.delete(u"e1"))
513 d.addCallback(lambda res: n.delete(u"e2"))
514 d.addCallback(lambda res: n.delete(u"e3"))
516 # metadata through set_nodes()
517 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
522 d.addCallback(lambda res:
523 self.shouldFail(ExistingChildError, "set_nodes-no",
524 "child 'f1' already exists",
529 # and 'new' should not have been created
530 d.addCallback(lambda res: n.list())
531 d.addCallback(lambda children: self.failIf(u"new" in children))
532 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
533 d.addCallback(lambda metadata:
534 self.failUnlessEqual(sorted(metadata.keys()),
536 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
537 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
538 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
539 d.addCallback(lambda metadata:
540 self.failUnlessEqual(metadata, {"key": "value"}))
542 d.addCallback(lambda res: n.delete(u"f1"))
543 d.addCallback(lambda res: n.delete(u"f2"))
544 d.addCallback(lambda res: n.delete(u"f3"))
547 d.addCallback(lambda res:
548 n.set_metadata_for(u"child",
549 {"tags": ["web2.0-compatible"]}))
550 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
551 d.addCallback(lambda metadata:
552 self.failUnlessEqual(metadata,
553 {"tags": ["web2.0-compatible"]}))
556 self._start_timestamp = time.time()
557 d.addCallback(_start)
558 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
559 # floats to hundredeths (it uses str(num) instead of repr(num)).
560 # simplejson-1.7.3 does not have this bug. To prevent this bug
561 # from causing the test to fail, stall for more than a few
562 # hundrededths of a second.
563 d.addCallback(self.stall, 0.1)
564 d.addCallback(lambda res: n.add_file(u"timestamps",
565 upload.Data("stamp me", convergence="some convergence string")))
566 d.addCallback(self.stall, 0.1)
568 self._stop_timestamp = time.time()
571 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
572 def _check_timestamp1(metadata):
573 self.failUnless("ctime" in metadata)
574 self.failUnless("mtime" in metadata)
575 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
576 self._start_timestamp)
577 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
579 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
580 self._start_timestamp)
581 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
583 # Our current timestamp rules say that replacing an existing
584 # child should preserve the 'ctime' but update the mtime
585 self._old_ctime = metadata["ctime"]
586 self._old_mtime = metadata["mtime"]
587 d.addCallback(_check_timestamp1)
588 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
589 d.addCallback(lambda res: n.set_node(u"timestamps", n))
590 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
591 def _check_timestamp2(metadata):
592 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
593 "%s != %s" % (metadata["ctime"],
595 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
596 return n.delete(u"timestamps")
597 d.addCallback(_check_timestamp2)
599 # also make sure we can add/update timestamps on a
600 # previously-existing child that didn't have any, since there are
601 # a lot of 0.7.0-generated edges around out there
602 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
603 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
604 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
605 d.addCallback(lambda metadata:
606 self.failUnlessEqual(sorted(metadata.keys()),
608 d.addCallback(lambda res: n.delete(u"no_timestamps"))
610 d.addCallback(lambda res: n.delete(u"subdir"))
611 d.addCallback(lambda old_child:
612 self.failUnlessEqual(old_child.get_uri(),
613 self.subdir.get_uri()))
615 d.addCallback(lambda res: n.list())
616 d.addCallback(lambda children:
617 self.failUnlessEqual(sorted(children.keys()),
620 uploadable = upload.Data("some data", convergence="some convergence string")
621 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
622 d.addCallback(lambda newnode:
623 self.failUnless(IFileNode.providedBy(newnode)))
624 other_uploadable = upload.Data("some data", convergence="stuff")
625 d.addCallback(lambda res:
626 self.shouldFail(ExistingChildError, "add_file-no",
627 "child 'newfile' already exists",
628 n.add_file, u"newfile",
631 d.addCallback(lambda res: n.list())
632 d.addCallback(lambda children:
633 self.failUnlessEqual(sorted(children.keys()),
634 sorted([u"child", u"newfile"])))
635 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
636 d.addCallback(lambda metadata:
637 self.failUnlessEqual(sorted(metadata.keys()),
640 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
643 d.addCallback(lambda newnode:
644 self.failUnless(IFileNode.providedBy(newnode)))
645 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
646 d.addCallback(lambda metadata:
647 self.failUnlessEqual(metadata, {"key": "value"}))
648 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
650 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
651 def _created2(subdir2):
652 self.subdir2 = subdir2
653 # put something in the way, to make sure it gets overwritten
654 return subdir2.add_file(u"child", upload.Data("overwrite me",
656 d.addCallback(_created2)
658 d.addCallback(lambda res:
659 n.move_child_to(u"child", self.subdir2))
660 d.addCallback(lambda res: n.list())
661 d.addCallback(lambda children:
662 self.failUnlessEqual(sorted(children.keys()),
663 sorted([u"newfile", u"subdir2"])))
664 d.addCallback(lambda res: self.subdir2.list())
665 d.addCallback(lambda children:
666 self.failUnlessEqual(sorted(children.keys()),
668 d.addCallback(lambda res: self.subdir2.get(u"child"))
669 d.addCallback(lambda child:
670 self.failUnlessEqual(child.get_uri(),
671 fake_file_uri.to_string()))
673 # move it back, using new_child_name=
674 d.addCallback(lambda res:
675 self.subdir2.move_child_to(u"child", n, u"newchild"))
676 d.addCallback(lambda res: n.list())
677 d.addCallback(lambda children:
678 self.failUnlessEqual(sorted(children.keys()),
679 sorted([u"newchild", u"newfile",
681 d.addCallback(lambda res: self.subdir2.list())
682 d.addCallback(lambda children:
683 self.failUnlessEqual(sorted(children.keys()), []))
685 # now make sure that we honor overwrite=False
686 d.addCallback(lambda res:
687 self.subdir2.set_uri(u"newchild", other_file_uri))
689 d.addCallback(lambda res:
690 self.shouldFail(ExistingChildError, "move_child_to-no",
691 "child 'newchild' already exists",
692 n.move_child_to, u"newchild",
695 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
696 d.addCallback(lambda child:
697 self.failUnlessEqual(child.get_uri(),
698 other_file_uri.to_string()))
704 d.addErrback(self.explain_error)
707 class DeepStats(unittest.TestCase):
708 def test_stats(self):
709 ds = dirnode.DeepStats(None)
710 ds.add("count-files")
711 ds.add("size-immutable-files", 123)
712 ds.histogram("size-files-histogram", 123)
713 ds.max("largest-directory", 444)
716 self.failUnlessEqual(s["count-files"], 1)
717 self.failUnlessEqual(s["size-immutable-files"], 123)
718 self.failUnlessEqual(s["largest-directory"], 444)
719 self.failUnlessEqual(s["count-literal-files"], 0)
721 ds.add("count-files")
722 ds.add("size-immutable-files", 321)
723 ds.histogram("size-files-histogram", 321)
724 ds.max("largest-directory", 2)
727 self.failUnlessEqual(s["count-files"], 2)
728 self.failUnlessEqual(s["size-immutable-files"], 444)
729 self.failUnlessEqual(s["largest-directory"], 444)
730 self.failUnlessEqual(s["count-literal-files"], 0)
731 self.failUnlessEqual(s["size-files-histogram"],
732 [ (101, 316, 1), (317, 1000, 1) ])
734 ds = dirnode.DeepStats(None)
735 for i in range(1, 1100):
736 ds.histogram("size-files-histogram", i)
737 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
739 self.failUnlessEqual(s["size-files-histogram"],
747 (3162277660169L, 10000000000000L, 1),