3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, IDeepCheckResults, IDeepCheckAndRepairResults
11 from allmydata.util import hashutil, testutil
12 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
13 FakeDirectoryNode, create_chk_filenode
14 from allmydata.checker_results import CheckerResults, CheckAndRepairResults
16 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
17 # contain pointers to fake files. We start with a fake MutableFileNode that
18 # stores all of its data in a static table.
21 implements(IFileNode, IMutableFileNode) # sure, why not
22 def __init__(self, nodeuri):
23 if not isinstance(nodeuri, str):
24 nodeuri = nodeuri.to_string()
25 self.nodeuri = nodeuri
26 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
27 fp = hashutil.tagged_hash("tag2", nodeuri)
28 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
31 def get_readonly_uri(self):
33 def get_verifier(self):
34 return self.verifieruri
36 def check(self, verify=False):
37 r = CheckerResults(None)
39 return defer.succeed(r)
41 def check_and_repair(self, verify=False):
42 d = self.check(verify)
44 r = CheckAndRepairResults(None)
45 r.pre_repair_results = r.post_repair_results = cr
50 # dirnode requires three methods from the client: upload(),
51 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
52 # only used by the convenience composite method add_file().
57 def upload(self, uploadable):
58 d = uploadable.get_size()
59 d.addCallback(lambda size: uploadable.read(size))
62 n = create_chk_filenode(self, data)
63 results = upload.UploadResults()
64 results.uri = n.get_uri()
66 d.addCallback(_got_data)
69 def create_node_from_uri(self, u):
71 if (INewDirectoryURI.providedBy(u)
72 or IReadonlyNewDirectoryURI.providedBy(u)):
73 return FakeDirectoryNode(self).init_from_uri(u)
74 return Marker(u.to_string())
76 def create_empty_dirnode(self):
77 n = FakeDirectoryNode(self)
79 d.addCallback(lambda res: n)
83 class Dirnode(unittest.TestCase, testutil.ShouldFailMixin, testutil.StallMixin):
85 self.client = FakeClient()
88 d = self.client.create_empty_dirnode()
90 self.failUnless(isinstance(res, FakeDirectoryNode))
92 self.failUnless("RW" in rep)
96 def test_corrupt(self):
97 d = self.client.create_empty_dirnode()
99 u = make_mutable_file_uri()
100 d = dn.set_uri(u"child", u, {})
101 d.addCallback(lambda res: dn.list())
102 def _check1(children):
103 self.failUnless(u"child" in children)
104 d.addCallback(_check1)
105 d.addCallback(lambda res:
106 self.shouldFail(KeyError, "get bogus", None,
110 si = IURI(filenode.get_uri()).storage_index
111 old_contents = filenode.all_contents[si]
112 # we happen to know that the writecap is encrypted near the
113 # end of the string. Flip one of its bits and make sure we
114 # detect the corruption.
115 new_contents = testutil.flip_bit(old_contents, -10)
116 # TODO: also test flipping bits in the other portions
117 filenode.all_contents[si] = new_contents
118 d.addCallback(_corrupt)
120 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
121 "HMAC does not match, crypttext is corrupted",
123 d.addCallback(_check2)
125 d.addCallback(_created)
128 def test_check(self):
129 d = self.client.create_empty_dirnode()
130 d.addCallback(lambda dn: dn.check())
132 self.failUnless(res.is_healthy())
136 def _test_deepcheck_create(self):
137 # create a small tree with a loop, and some non-directories
141 # root/subdir/link -> root
142 d = self.client.create_empty_dirnode()
143 def _created_root(rootnode):
144 self._rootnode = rootnode
145 return rootnode.create_empty_directory(u"subdir")
146 d.addCallback(_created_root)
147 def _created_subdir(subdir):
148 self._subdir = subdir
149 d = subdir.add_file(u"file1", upload.Data("data", None))
150 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
152 d.addCallback(_created_subdir)
154 return self._rootnode
158 def test_deepcheck(self):
159 d = self._test_deepcheck_create()
160 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
161 def _check_results(r):
162 self.failUnless(IDeepCheckResults.providedBy(r))
164 self.failUnlessEqual(c,
165 {"count-objects-checked": 3,
166 "count-objects-healthy": 3,
167 "count-objects-unhealthy": 0,
168 "count-corrupt-shares": 0,
170 self.failIf(r.get_corrupt_shares())
171 self.failUnlessEqual(len(r.get_all_results()), 3)
172 d.addCallback(_check_results)
175 def test_deepcheck_and_repair(self):
176 d = self._test_deepcheck_create()
177 d.addCallback(lambda rootnode:
178 rootnode.start_deep_check_and_repair().when_done())
179 def _check_results(r):
180 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
182 self.failUnlessEqual(c,
183 {"count-objects-checked": 3,
184 "count-objects-healthy-pre-repair": 3,
185 "count-objects-unhealthy-pre-repair": 0,
186 "count-corrupt-shares-pre-repair": 0,
187 "count-objects-healthy-post-repair": 3,
188 "count-objects-unhealthy-post-repair": 0,
189 "count-corrupt-shares-post-repair": 0,
190 "count-repairs-attempted": 0,
191 "count-repairs-successful": 0,
192 "count-repairs-unsuccessful": 0,
194 self.failIf(r.get_corrupt_shares())
195 self.failIf(r.get_remaining_corrupt_shares())
196 self.failUnlessEqual(len(r.get_all_results()), 3)
197 d.addCallback(_check_results)
200 def _mark_file_bad(self, rootnode):
201 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
202 rootnode._node.bad_shares[si] = "unhealthy"
205 def test_deepcheck_problems(self):
206 d = self._test_deepcheck_create()
207 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
208 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
209 def _check_results(r):
211 self.failUnlessEqual(c,
212 {"count-objects-checked": 3,
213 "count-objects-healthy": 2,
214 "count-objects-unhealthy": 1,
215 "count-corrupt-shares": 0,
217 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
218 d.addCallback(_check_results)
221 def test_readonly(self):
222 fileuri = make_chk_file_uri(1234)
223 filenode = self.client.create_node_from_uri(fileuri)
224 uploadable = upload.Data("some data", convergence="some convergence string")
226 d = self.client.create_empty_dirnode()
228 d2 = rw_dn.set_uri(u"child", fileuri)
229 d2.addCallback(lambda res: rw_dn)
231 d.addCallback(_created)
234 ro_uri = rw_dn.get_readonly_uri()
235 ro_dn = self.client.create_node_from_uri(ro_uri)
236 self.failUnless(ro_dn.is_readonly())
237 self.failUnless(ro_dn.is_mutable())
239 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
240 ro_dn.set_uri, u"newchild", fileuri)
241 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
242 ro_dn.set_node, u"newchild", filenode)
243 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
244 ro_dn.set_nodes, [ (u"newchild", filenode) ])
245 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
246 ro_dn.add_file, u"newchild", uploadable)
247 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
248 ro_dn.delete, u"child")
249 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
250 ro_dn.create_empty_directory, u"newchild")
251 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
252 ro_dn.set_metadata_for, u"child", {})
253 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
254 ro_dn.move_child_to, u"child", rw_dn)
255 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
256 rw_dn.move_child_to, u"child", ro_dn)
258 d.addCallback(_ready)
259 def _listed(children):
260 self.failUnless(u"child" in children)
261 d.addCallback(_listed)
264 def failUnlessGreaterThan(self, a, b):
265 self.failUnless(a > b, "%r should be > %r" % (a, b))
267 def failUnlessGreaterOrEqualThan(self, a, b):
268 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
270 def test_create(self):
271 self.expected_manifest = []
273 d = self.client.create_empty_dirnode()
276 self.failUnless(n.is_mutable())
279 self.failUnless(u.startswith("URI:DIR2:"), u)
280 u_ro = n.get_readonly_uri()
281 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
282 u_v = n.get_verifier().to_string()
283 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
284 self.expected_manifest.append( ((), u) )
285 expected_si = n._uri._filenode_uri.storage_index
286 self.failUnlessEqual(n.get_storage_index(), expected_si)
289 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
290 d.addCallback(lambda res: n.has_child(u"missing"))
291 d.addCallback(lambda res: self.failIf(res))
292 fake_file_uri = make_mutable_file_uri()
293 other_file_uri = make_mutable_file_uri()
294 m = Marker(fake_file_uri)
295 ffu_v = m.get_verifier().to_string()
296 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
297 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
298 d.addCallback(lambda res:
299 self.shouldFail(ExistingChildError, "set_uri-no",
300 "child 'child' already exists",
301 n.set_uri, u"child", other_file_uri,
306 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
310 # /subdir = directory
311 def _created(subdir):
312 self.failUnless(isinstance(subdir, FakeDirectoryNode))
314 new_v = subdir.get_verifier().to_string()
315 assert isinstance(new_v, str)
316 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
317 d.addCallback(_created)
319 d.addCallback(lambda res:
320 self.shouldFail(ExistingChildError, "mkdir-no",
321 "child 'subdir' already exists",
322 n.create_empty_directory, u"subdir",
325 d.addCallback(lambda res: n.list())
326 d.addCallback(lambda children:
327 self.failUnlessEqual(sorted(children.keys()),
328 sorted([u"child", u"subdir"])))
330 d.addCallback(lambda res: n.build_manifest().when_done())
331 def _check_manifest(manifest):
332 self.failUnlessEqual(sorted(manifest),
333 sorted(self.expected_manifest))
334 d.addCallback(_check_manifest)
336 d.addCallback(lambda res: n.start_deep_stats().when_done())
337 def _check_deepstats(stats):
338 self.failUnless(isinstance(stats, dict))
339 expected = {"count-immutable-files": 0,
340 "count-mutable-files": 1,
341 "count-literal-files": 0,
343 "count-directories": 2,
344 "size-immutable-files": 0,
345 "size-literal-files": 0,
346 #"size-directories": 616, # varies
347 #"largest-directory": 616,
348 "largest-directory-children": 2,
349 "largest-immutable-file": 0,
351 for k,v in expected.iteritems():
352 self.failUnlessEqual(stats[k], v,
353 "stats[%s] was %s, not %s" %
355 self.failUnless(stats["size-directories"] > 500,
356 stats["size-directories"])
357 self.failUnless(stats["largest-directory"] > 500,
358 stats["largest-directory"])
359 self.failUnlessEqual(stats["size-files-histogram"], [])
360 d.addCallback(_check_deepstats)
362 def _add_subsubdir(res):
363 return self.subdir.create_empty_directory(u"subsubdir")
364 d.addCallback(_add_subsubdir)
367 # /subdir = directory
368 # /subdir/subsubdir = directory
369 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
370 d.addCallback(lambda subsubdir:
371 self.failUnless(isinstance(subsubdir,
373 d.addCallback(lambda res: n.get_child_at_path(u""))
374 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
377 d.addCallback(lambda res: n.get_metadata_for(u"child"))
378 d.addCallback(lambda metadata:
379 self.failUnlessEqual(sorted(metadata.keys()),
382 d.addCallback(lambda res:
383 self.shouldFail(KeyError, "gcamap-no",
385 n.get_child_and_metadata_at_path,
387 d.addCallback(lambda res:
388 n.get_child_and_metadata_at_path(u""))
389 def _check_child_and_metadata1(res):
390 child, metadata = res
391 self.failUnless(isinstance(child, FakeDirectoryNode))
392 # edge-metadata needs at least one path segment
393 self.failUnlessEqual(sorted(metadata.keys()), [])
394 d.addCallback(_check_child_and_metadata1)
395 d.addCallback(lambda res:
396 n.get_child_and_metadata_at_path(u"child"))
398 def _check_child_and_metadata2(res):
399 child, metadata = res
400 self.failUnlessEqual(child.get_uri(),
401 fake_file_uri.to_string())
402 self.failUnlessEqual(sorted(metadata.keys()),
404 d.addCallback(_check_child_and_metadata2)
406 d.addCallback(lambda res:
407 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
408 def _check_child_and_metadata3(res):
409 child, metadata = res
410 self.failUnless(isinstance(child, FakeDirectoryNode))
411 self.failUnlessEqual(sorted(metadata.keys()),
413 d.addCallback(_check_child_and_metadata3)
416 # it should be possible to add a child without any metadata
417 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
418 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
419 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
421 # if we don't set any defaults, the child should get timestamps
422 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
423 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
424 d.addCallback(lambda metadata:
425 self.failUnlessEqual(sorted(metadata.keys()),
428 # or we can add specific metadata at set_uri() time, which
429 # overrides the timestamps
430 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
432 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
433 d.addCallback(lambda metadata:
434 self.failUnlessEqual(metadata, {"key": "value"}))
436 d.addCallback(lambda res: n.delete(u"c2"))
437 d.addCallback(lambda res: n.delete(u"c3"))
438 d.addCallback(lambda res: n.delete(u"c4"))
440 # set_node + metadata
441 # it should be possible to add a child without any metadata
442 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
443 d.addCallback(lambda res: self.client.create_empty_dirnode())
444 d.addCallback(lambda n2:
445 self.shouldFail(ExistingChildError, "set_node-no",
446 "child 'd2' already exists",
447 n.set_node, u"d2", n2,
449 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
450 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
452 # if we don't set any defaults, the child should get timestamps
453 d.addCallback(lambda res: n.set_node(u"d3", n))
454 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
455 d.addCallback(lambda metadata:
456 self.failUnlessEqual(sorted(metadata.keys()),
459 # or we can add specific metadata at set_node() time, which
460 # overrides the timestamps
461 d.addCallback(lambda res: n.set_node(u"d4", n,
463 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
464 d.addCallback(lambda metadata:
465 self.failUnlessEqual(metadata, {"key": "value"}))
467 d.addCallback(lambda res: n.delete(u"d2"))
468 d.addCallback(lambda res: n.delete(u"d3"))
469 d.addCallback(lambda res: n.delete(u"d4"))
471 # metadata through set_children()
472 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
473 (u"e2", fake_file_uri, {}),
474 (u"e3", fake_file_uri,
477 d.addCallback(lambda res:
478 self.shouldFail(ExistingChildError, "set_children-no",
479 "child 'e1' already exists",
481 [ (u"e1", other_file_uri),
482 (u"new", other_file_uri), ],
484 # and 'new' should not have been created
485 d.addCallback(lambda res: n.list())
486 d.addCallback(lambda children: self.failIf(u"new" in children))
487 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
488 d.addCallback(lambda metadata:
489 self.failUnlessEqual(sorted(metadata.keys()),
491 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
492 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
493 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
494 d.addCallback(lambda metadata:
495 self.failUnlessEqual(metadata, {"key": "value"}))
497 d.addCallback(lambda res: n.delete(u"e1"))
498 d.addCallback(lambda res: n.delete(u"e2"))
499 d.addCallback(lambda res: n.delete(u"e3"))
501 # metadata through set_nodes()
502 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
507 d.addCallback(lambda res:
508 self.shouldFail(ExistingChildError, "set_nodes-no",
509 "child 'f1' already exists",
514 # and 'new' should not have been created
515 d.addCallback(lambda res: n.list())
516 d.addCallback(lambda children: self.failIf(u"new" in children))
517 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
518 d.addCallback(lambda metadata:
519 self.failUnlessEqual(sorted(metadata.keys()),
521 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
522 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
523 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
524 d.addCallback(lambda metadata:
525 self.failUnlessEqual(metadata, {"key": "value"}))
527 d.addCallback(lambda res: n.delete(u"f1"))
528 d.addCallback(lambda res: n.delete(u"f2"))
529 d.addCallback(lambda res: n.delete(u"f3"))
532 d.addCallback(lambda res:
533 n.set_metadata_for(u"child",
534 {"tags": ["web2.0-compatible"]}))
535 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
536 d.addCallback(lambda metadata:
537 self.failUnlessEqual(metadata,
538 {"tags": ["web2.0-compatible"]}))
541 self._start_timestamp = time.time()
542 d.addCallback(_start)
543 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
544 # floats to hundredeths (it uses str(num) instead of repr(num)).
545 # simplejson-1.7.3 does not have this bug. To prevent this bug
546 # from causing the test to fail, stall for more than a few
547 # hundrededths of a second.
548 d.addCallback(self.stall, 0.1)
549 d.addCallback(lambda res: n.add_file(u"timestamps",
550 upload.Data("stamp me", convergence="some convergence string")))
551 d.addCallback(self.stall, 0.1)
553 self._stop_timestamp = time.time()
556 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
557 def _check_timestamp1(metadata):
558 self.failUnless("ctime" in metadata)
559 self.failUnless("mtime" in metadata)
560 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
561 self._start_timestamp)
562 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
564 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
565 self._start_timestamp)
566 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
568 # Our current timestamp rules say that replacing an existing
569 # child should preserve the 'ctime' but update the mtime
570 self._old_ctime = metadata["ctime"]
571 self._old_mtime = metadata["mtime"]
572 d.addCallback(_check_timestamp1)
573 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
574 d.addCallback(lambda res: n.set_node(u"timestamps", n))
575 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
576 def _check_timestamp2(metadata):
577 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
578 "%s != %s" % (metadata["ctime"],
580 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
581 return n.delete(u"timestamps")
582 d.addCallback(_check_timestamp2)
584 # also make sure we can add/update timestamps on a
585 # previously-existing child that didn't have any, since there are
586 # a lot of 0.7.0-generated edges around out there
587 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
588 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
589 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
590 d.addCallback(lambda metadata:
591 self.failUnlessEqual(sorted(metadata.keys()),
593 d.addCallback(lambda res: n.delete(u"no_timestamps"))
595 d.addCallback(lambda res: n.delete(u"subdir"))
596 d.addCallback(lambda old_child:
597 self.failUnlessEqual(old_child.get_uri(),
598 self.subdir.get_uri()))
600 d.addCallback(lambda res: n.list())
601 d.addCallback(lambda children:
602 self.failUnlessEqual(sorted(children.keys()),
605 uploadable = upload.Data("some data", convergence="some convergence string")
606 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
607 d.addCallback(lambda newnode:
608 self.failUnless(IFileNode.providedBy(newnode)))
609 other_uploadable = upload.Data("some data", convergence="stuff")
610 d.addCallback(lambda res:
611 self.shouldFail(ExistingChildError, "add_file-no",
612 "child 'newfile' already exists",
613 n.add_file, u"newfile",
616 d.addCallback(lambda res: n.list())
617 d.addCallback(lambda children:
618 self.failUnlessEqual(sorted(children.keys()),
619 sorted([u"child", u"newfile"])))
620 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
621 d.addCallback(lambda metadata:
622 self.failUnlessEqual(sorted(metadata.keys()),
625 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
628 d.addCallback(lambda newnode:
629 self.failUnless(IFileNode.providedBy(newnode)))
630 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
631 d.addCallback(lambda metadata:
632 self.failUnlessEqual(metadata, {"key": "value"}))
633 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
635 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
636 def _created2(subdir2):
637 self.subdir2 = subdir2
638 # put something in the way, to make sure it gets overwritten
639 return subdir2.add_file(u"child", upload.Data("overwrite me",
641 d.addCallback(_created2)
643 d.addCallback(lambda res:
644 n.move_child_to(u"child", self.subdir2))
645 d.addCallback(lambda res: n.list())
646 d.addCallback(lambda children:
647 self.failUnlessEqual(sorted(children.keys()),
648 sorted([u"newfile", u"subdir2"])))
649 d.addCallback(lambda res: self.subdir2.list())
650 d.addCallback(lambda children:
651 self.failUnlessEqual(sorted(children.keys()),
653 d.addCallback(lambda res: self.subdir2.get(u"child"))
654 d.addCallback(lambda child:
655 self.failUnlessEqual(child.get_uri(),
656 fake_file_uri.to_string()))
658 # move it back, using new_child_name=
659 d.addCallback(lambda res:
660 self.subdir2.move_child_to(u"child", n, u"newchild"))
661 d.addCallback(lambda res: n.list())
662 d.addCallback(lambda children:
663 self.failUnlessEqual(sorted(children.keys()),
664 sorted([u"newchild", u"newfile",
666 d.addCallback(lambda res: self.subdir2.list())
667 d.addCallback(lambda children:
668 self.failUnlessEqual(sorted(children.keys()), []))
670 # now make sure that we honor overwrite=False
671 d.addCallback(lambda res:
672 self.subdir2.set_uri(u"newchild", other_file_uri))
674 d.addCallback(lambda res:
675 self.shouldFail(ExistingChildError, "move_child_to-no",
676 "child 'newchild' already exists",
677 n.move_child_to, u"newchild",
680 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
681 d.addCallback(lambda child:
682 self.failUnlessEqual(child.get_uri(),
683 other_file_uri.to_string()))
691 class DeepStats(unittest.TestCase):
692 def test_stats(self):
693 ds = dirnode.DeepStats(None)
694 ds.add("count-files")
695 ds.add("size-immutable-files", 123)
696 ds.histogram("size-files-histogram", 123)
697 ds.max("largest-directory", 444)
700 self.failUnlessEqual(s["count-files"], 1)
701 self.failUnlessEqual(s["size-immutable-files"], 123)
702 self.failUnlessEqual(s["largest-directory"], 444)
703 self.failUnlessEqual(s["count-literal-files"], 0)
705 ds.add("count-files")
706 ds.add("size-immutable-files", 321)
707 ds.histogram("size-files-histogram", 321)
708 ds.max("largest-directory", 2)
711 self.failUnlessEqual(s["count-files"], 2)
712 self.failUnlessEqual(s["size-immutable-files"], 444)
713 self.failUnlessEqual(s["largest-directory"], 444)
714 self.failUnlessEqual(s["count-literal-files"], 0)
715 self.failUnlessEqual(s["size-files-histogram"],
716 [ (101, 316, 1), (317, 1000, 1) ])
718 ds = dirnode.DeepStats(None)
719 for i in range(1, 1100):
720 ds.histogram("size-files-histogram", i)
721 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
723 self.failUnlessEqual(s["size-files-histogram"],
731 (3162277660169L, 10000000000000L, 1),