3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, IDeepCheckResults, IDeepCheckAndRepairResults
11 from allmydata.util import hashutil, testutil
12 from allmydata.monitor import Monitor
13 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
14 FakeDirectoryNode, create_chk_filenode
15 from allmydata.checker_results import CheckerResults, CheckAndRepairResults
17 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
18 # contain pointers to fake files. We start with a fake MutableFileNode that
19 # stores all of its data in a static table.
22 implements(IFileNode, IMutableFileNode) # sure, why not
23 def __init__(self, nodeuri):
24 if not isinstance(nodeuri, str):
25 nodeuri = nodeuri.to_string()
26 self.nodeuri = nodeuri
27 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
28 fp = hashutil.tagged_hash("tag2", nodeuri)
29 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
32 def get_readonly_uri(self):
34 def get_verifier(self):
35 return self.verifieruri
37 def check(self, verify=False):
38 r = CheckerResults(None)
40 return defer.succeed(r)
42 def check_and_repair(self, verify=False):
43 d = self.check(verify)
45 r = CheckAndRepairResults(None)
46 r.pre_repair_results = r.post_repair_results = cr
51 # dirnode requires three methods from the client: upload(),
52 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
53 # only used by the convenience composite method add_file().
58 def upload(self, uploadable):
59 d = uploadable.get_size()
60 d.addCallback(lambda size: uploadable.read(size))
63 n = create_chk_filenode(self, data)
64 results = upload.UploadResults()
65 results.uri = n.get_uri()
67 d.addCallback(_got_data)
70 def create_node_from_uri(self, u):
72 if (INewDirectoryURI.providedBy(u)
73 or IReadonlyNewDirectoryURI.providedBy(u)):
74 return FakeDirectoryNode(self).init_from_uri(u)
75 return Marker(u.to_string())
77 def create_empty_dirnode(self):
78 n = FakeDirectoryNode(self)
80 d.addCallback(lambda res: n)
84 class Dirnode(unittest.TestCase, testutil.ShouldFailMixin, testutil.StallMixin):
86 self.client = FakeClient()
89 d = self.client.create_empty_dirnode()
91 self.failUnless(isinstance(res, FakeDirectoryNode))
93 self.failUnless("RW" in rep)
97 def test_corrupt(self):
98 d = self.client.create_empty_dirnode()
100 u = make_mutable_file_uri()
101 d = dn.set_uri(u"child", u, {})
102 d.addCallback(lambda res: dn.list())
103 def _check1(children):
104 self.failUnless(u"child" in children)
105 d.addCallback(_check1)
106 d.addCallback(lambda res:
107 self.shouldFail(KeyError, "get bogus", None,
111 si = IURI(filenode.get_uri()).storage_index
112 old_contents = filenode.all_contents[si]
113 # we happen to know that the writecap is encrypted near the
114 # end of the string. Flip one of its bits and make sure we
115 # detect the corruption.
116 new_contents = testutil.flip_bit(old_contents, -10)
117 # TODO: also test flipping bits in the other portions
118 filenode.all_contents[si] = new_contents
119 d.addCallback(_corrupt)
121 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
122 "HMAC does not match, crypttext is corrupted",
124 d.addCallback(_check2)
126 d.addCallback(_created)
129 def test_check(self):
130 d = self.client.create_empty_dirnode()
131 d.addCallback(lambda dn: dn.check(Monitor()))
133 self.failUnless(res.is_healthy())
137 def _test_deepcheck_create(self):
138 # create a small tree with a loop, and some non-directories
142 # root/subdir/link -> root
143 d = self.client.create_empty_dirnode()
144 def _created_root(rootnode):
145 self._rootnode = rootnode
146 return rootnode.create_empty_directory(u"subdir")
147 d.addCallback(_created_root)
148 def _created_subdir(subdir):
149 self._subdir = subdir
150 d = subdir.add_file(u"file1", upload.Data("data", None))
151 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
153 d.addCallback(_created_subdir)
155 return self._rootnode
159 def test_deepcheck(self):
160 d = self._test_deepcheck_create()
161 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
162 def _check_results(r):
163 self.failUnless(IDeepCheckResults.providedBy(r))
165 self.failUnlessEqual(c,
166 {"count-objects-checked": 3,
167 "count-objects-healthy": 3,
168 "count-objects-unhealthy": 0,
169 "count-corrupt-shares": 0,
171 self.failIf(r.get_corrupt_shares())
172 self.failUnlessEqual(len(r.get_all_results()), 3)
173 d.addCallback(_check_results)
176 def test_deepcheck_and_repair(self):
177 d = self._test_deepcheck_create()
178 d.addCallback(lambda rootnode:
179 rootnode.start_deep_check_and_repair().when_done())
180 def _check_results(r):
181 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
183 self.failUnlessEqual(c,
184 {"count-objects-checked": 3,
185 "count-objects-healthy-pre-repair": 3,
186 "count-objects-unhealthy-pre-repair": 0,
187 "count-corrupt-shares-pre-repair": 0,
188 "count-objects-healthy-post-repair": 3,
189 "count-objects-unhealthy-post-repair": 0,
190 "count-corrupt-shares-post-repair": 0,
191 "count-repairs-attempted": 0,
192 "count-repairs-successful": 0,
193 "count-repairs-unsuccessful": 0,
195 self.failIf(r.get_corrupt_shares())
196 self.failIf(r.get_remaining_corrupt_shares())
197 self.failUnlessEqual(len(r.get_all_results()), 3)
198 d.addCallback(_check_results)
201 def _mark_file_bad(self, rootnode):
202 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
203 rootnode._node.bad_shares[si] = "unhealthy"
206 def test_deepcheck_problems(self):
207 d = self._test_deepcheck_create()
208 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
209 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
210 def _check_results(r):
212 self.failUnlessEqual(c,
213 {"count-objects-checked": 3,
214 "count-objects-healthy": 2,
215 "count-objects-unhealthy": 1,
216 "count-corrupt-shares": 0,
218 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
219 d.addCallback(_check_results)
222 def test_readonly(self):
223 fileuri = make_chk_file_uri(1234)
224 filenode = self.client.create_node_from_uri(fileuri)
225 uploadable = upload.Data("some data", convergence="some convergence string")
227 d = self.client.create_empty_dirnode()
229 d2 = rw_dn.set_uri(u"child", fileuri)
230 d2.addCallback(lambda res: rw_dn)
232 d.addCallback(_created)
235 ro_uri = rw_dn.get_readonly_uri()
236 ro_dn = self.client.create_node_from_uri(ro_uri)
237 self.failUnless(ro_dn.is_readonly())
238 self.failUnless(ro_dn.is_mutable())
240 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
241 ro_dn.set_uri, u"newchild", fileuri)
242 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
243 ro_dn.set_node, u"newchild", filenode)
244 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
245 ro_dn.set_nodes, [ (u"newchild", filenode) ])
246 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
247 ro_dn.add_file, u"newchild", uploadable)
248 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
249 ro_dn.delete, u"child")
250 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
251 ro_dn.create_empty_directory, u"newchild")
252 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
253 ro_dn.set_metadata_for, u"child", {})
254 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
255 ro_dn.move_child_to, u"child", rw_dn)
256 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
257 rw_dn.move_child_to, u"child", ro_dn)
259 d.addCallback(_ready)
260 def _listed(children):
261 self.failUnless(u"child" in children)
262 d.addCallback(_listed)
265 def failUnlessGreaterThan(self, a, b):
266 self.failUnless(a > b, "%r should be > %r" % (a, b))
268 def failUnlessGreaterOrEqualThan(self, a, b):
269 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
271 def test_create(self):
272 self.expected_manifest = []
274 d = self.client.create_empty_dirnode()
277 self.failUnless(n.is_mutable())
280 self.failUnless(u.startswith("URI:DIR2:"), u)
281 u_ro = n.get_readonly_uri()
282 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
283 u_v = n.get_verifier().to_string()
284 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
285 self.expected_manifest.append( ((), u) )
286 expected_si = n._uri._filenode_uri.storage_index
287 self.failUnlessEqual(n.get_storage_index(), expected_si)
290 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
291 d.addCallback(lambda res: n.has_child(u"missing"))
292 d.addCallback(lambda res: self.failIf(res))
293 fake_file_uri = make_mutable_file_uri()
294 other_file_uri = make_mutable_file_uri()
295 m = Marker(fake_file_uri)
296 ffu_v = m.get_verifier().to_string()
297 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
298 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
299 d.addCallback(lambda res:
300 self.shouldFail(ExistingChildError, "set_uri-no",
301 "child 'child' already exists",
302 n.set_uri, u"child", other_file_uri,
307 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
311 # /subdir = directory
312 def _created(subdir):
313 self.failUnless(isinstance(subdir, FakeDirectoryNode))
315 new_v = subdir.get_verifier().to_string()
316 assert isinstance(new_v, str)
317 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
318 d.addCallback(_created)
320 d.addCallback(lambda res:
321 self.shouldFail(ExistingChildError, "mkdir-no",
322 "child 'subdir' already exists",
323 n.create_empty_directory, u"subdir",
326 d.addCallback(lambda res: n.list())
327 d.addCallback(lambda children:
328 self.failUnlessEqual(sorted(children.keys()),
329 sorted([u"child", u"subdir"])))
331 d.addCallback(lambda res: n.build_manifest().when_done())
332 def _check_manifest(manifest):
333 self.failUnlessEqual(sorted(manifest),
334 sorted(self.expected_manifest))
335 d.addCallback(_check_manifest)
337 d.addCallback(lambda res: n.start_deep_stats().when_done())
338 def _check_deepstats(stats):
339 self.failUnless(isinstance(stats, dict))
340 expected = {"count-immutable-files": 0,
341 "count-mutable-files": 1,
342 "count-literal-files": 0,
344 "count-directories": 2,
345 "size-immutable-files": 0,
346 "size-literal-files": 0,
347 #"size-directories": 616, # varies
348 #"largest-directory": 616,
349 "largest-directory-children": 2,
350 "largest-immutable-file": 0,
352 for k,v in expected.iteritems():
353 self.failUnlessEqual(stats[k], v,
354 "stats[%s] was %s, not %s" %
356 self.failUnless(stats["size-directories"] > 500,
357 stats["size-directories"])
358 self.failUnless(stats["largest-directory"] > 500,
359 stats["largest-directory"])
360 self.failUnlessEqual(stats["size-files-histogram"], [])
361 d.addCallback(_check_deepstats)
363 def _add_subsubdir(res):
364 return self.subdir.create_empty_directory(u"subsubdir")
365 d.addCallback(_add_subsubdir)
368 # /subdir = directory
369 # /subdir/subsubdir = directory
370 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
371 d.addCallback(lambda subsubdir:
372 self.failUnless(isinstance(subsubdir,
374 d.addCallback(lambda res: n.get_child_at_path(u""))
375 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
378 d.addCallback(lambda res: n.get_metadata_for(u"child"))
379 d.addCallback(lambda metadata:
380 self.failUnlessEqual(sorted(metadata.keys()),
383 d.addCallback(lambda res:
384 self.shouldFail(KeyError, "gcamap-no",
386 n.get_child_and_metadata_at_path,
388 d.addCallback(lambda res:
389 n.get_child_and_metadata_at_path(u""))
390 def _check_child_and_metadata1(res):
391 child, metadata = res
392 self.failUnless(isinstance(child, FakeDirectoryNode))
393 # edge-metadata needs at least one path segment
394 self.failUnlessEqual(sorted(metadata.keys()), [])
395 d.addCallback(_check_child_and_metadata1)
396 d.addCallback(lambda res:
397 n.get_child_and_metadata_at_path(u"child"))
399 def _check_child_and_metadata2(res):
400 child, metadata = res
401 self.failUnlessEqual(child.get_uri(),
402 fake_file_uri.to_string())
403 self.failUnlessEqual(sorted(metadata.keys()),
405 d.addCallback(_check_child_and_metadata2)
407 d.addCallback(lambda res:
408 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
409 def _check_child_and_metadata3(res):
410 child, metadata = res
411 self.failUnless(isinstance(child, FakeDirectoryNode))
412 self.failUnlessEqual(sorted(metadata.keys()),
414 d.addCallback(_check_child_and_metadata3)
417 # it should be possible to add a child without any metadata
418 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
419 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
420 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
422 # if we don't set any defaults, the child should get timestamps
423 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
424 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
425 d.addCallback(lambda metadata:
426 self.failUnlessEqual(sorted(metadata.keys()),
429 # or we can add specific metadata at set_uri() time, which
430 # overrides the timestamps
431 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
433 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
434 d.addCallback(lambda metadata:
435 self.failUnlessEqual(metadata, {"key": "value"}))
437 d.addCallback(lambda res: n.delete(u"c2"))
438 d.addCallback(lambda res: n.delete(u"c3"))
439 d.addCallback(lambda res: n.delete(u"c4"))
441 # set_node + metadata
442 # it should be possible to add a child without any metadata
443 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
444 d.addCallback(lambda res: self.client.create_empty_dirnode())
445 d.addCallback(lambda n2:
446 self.shouldFail(ExistingChildError, "set_node-no",
447 "child 'd2' already exists",
448 n.set_node, u"d2", n2,
450 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
451 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
453 # if we don't set any defaults, the child should get timestamps
454 d.addCallback(lambda res: n.set_node(u"d3", n))
455 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
456 d.addCallback(lambda metadata:
457 self.failUnlessEqual(sorted(metadata.keys()),
460 # or we can add specific metadata at set_node() time, which
461 # overrides the timestamps
462 d.addCallback(lambda res: n.set_node(u"d4", n,
464 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
465 d.addCallback(lambda metadata:
466 self.failUnlessEqual(metadata, {"key": "value"}))
468 d.addCallback(lambda res: n.delete(u"d2"))
469 d.addCallback(lambda res: n.delete(u"d3"))
470 d.addCallback(lambda res: n.delete(u"d4"))
472 # metadata through set_children()
473 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
474 (u"e2", fake_file_uri, {}),
475 (u"e3", fake_file_uri,
478 d.addCallback(lambda res:
479 self.shouldFail(ExistingChildError, "set_children-no",
480 "child 'e1' already exists",
482 [ (u"e1", other_file_uri),
483 (u"new", other_file_uri), ],
485 # and 'new' should not have been created
486 d.addCallback(lambda res: n.list())
487 d.addCallback(lambda children: self.failIf(u"new" in children))
488 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
489 d.addCallback(lambda metadata:
490 self.failUnlessEqual(sorted(metadata.keys()),
492 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
493 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
494 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
495 d.addCallback(lambda metadata:
496 self.failUnlessEqual(metadata, {"key": "value"}))
498 d.addCallback(lambda res: n.delete(u"e1"))
499 d.addCallback(lambda res: n.delete(u"e2"))
500 d.addCallback(lambda res: n.delete(u"e3"))
502 # metadata through set_nodes()
503 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
508 d.addCallback(lambda res:
509 self.shouldFail(ExistingChildError, "set_nodes-no",
510 "child 'f1' already exists",
515 # and 'new' should not have been created
516 d.addCallback(lambda res: n.list())
517 d.addCallback(lambda children: self.failIf(u"new" in children))
518 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
519 d.addCallback(lambda metadata:
520 self.failUnlessEqual(sorted(metadata.keys()),
522 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
523 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
524 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
525 d.addCallback(lambda metadata:
526 self.failUnlessEqual(metadata, {"key": "value"}))
528 d.addCallback(lambda res: n.delete(u"f1"))
529 d.addCallback(lambda res: n.delete(u"f2"))
530 d.addCallback(lambda res: n.delete(u"f3"))
533 d.addCallback(lambda res:
534 n.set_metadata_for(u"child",
535 {"tags": ["web2.0-compatible"]}))
536 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
537 d.addCallback(lambda metadata:
538 self.failUnlessEqual(metadata,
539 {"tags": ["web2.0-compatible"]}))
542 self._start_timestamp = time.time()
543 d.addCallback(_start)
544 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
545 # floats to hundredeths (it uses str(num) instead of repr(num)).
546 # simplejson-1.7.3 does not have this bug. To prevent this bug
547 # from causing the test to fail, stall for more than a few
548 # hundrededths of a second.
549 d.addCallback(self.stall, 0.1)
550 d.addCallback(lambda res: n.add_file(u"timestamps",
551 upload.Data("stamp me", convergence="some convergence string")))
552 d.addCallback(self.stall, 0.1)
554 self._stop_timestamp = time.time()
557 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
558 def _check_timestamp1(metadata):
559 self.failUnless("ctime" in metadata)
560 self.failUnless("mtime" in metadata)
561 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
562 self._start_timestamp)
563 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
565 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
566 self._start_timestamp)
567 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
569 # Our current timestamp rules say that replacing an existing
570 # child should preserve the 'ctime' but update the mtime
571 self._old_ctime = metadata["ctime"]
572 self._old_mtime = metadata["mtime"]
573 d.addCallback(_check_timestamp1)
574 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
575 d.addCallback(lambda res: n.set_node(u"timestamps", n))
576 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
577 def _check_timestamp2(metadata):
578 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
579 "%s != %s" % (metadata["ctime"],
581 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
582 return n.delete(u"timestamps")
583 d.addCallback(_check_timestamp2)
585 # also make sure we can add/update timestamps on a
586 # previously-existing child that didn't have any, since there are
587 # a lot of 0.7.0-generated edges around out there
588 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
589 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
590 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
591 d.addCallback(lambda metadata:
592 self.failUnlessEqual(sorted(metadata.keys()),
594 d.addCallback(lambda res: n.delete(u"no_timestamps"))
596 d.addCallback(lambda res: n.delete(u"subdir"))
597 d.addCallback(lambda old_child:
598 self.failUnlessEqual(old_child.get_uri(),
599 self.subdir.get_uri()))
601 d.addCallback(lambda res: n.list())
602 d.addCallback(lambda children:
603 self.failUnlessEqual(sorted(children.keys()),
606 uploadable = upload.Data("some data", convergence="some convergence string")
607 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
608 d.addCallback(lambda newnode:
609 self.failUnless(IFileNode.providedBy(newnode)))
610 other_uploadable = upload.Data("some data", convergence="stuff")
611 d.addCallback(lambda res:
612 self.shouldFail(ExistingChildError, "add_file-no",
613 "child 'newfile' already exists",
614 n.add_file, u"newfile",
617 d.addCallback(lambda res: n.list())
618 d.addCallback(lambda children:
619 self.failUnlessEqual(sorted(children.keys()),
620 sorted([u"child", u"newfile"])))
621 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
622 d.addCallback(lambda metadata:
623 self.failUnlessEqual(sorted(metadata.keys()),
626 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
629 d.addCallback(lambda newnode:
630 self.failUnless(IFileNode.providedBy(newnode)))
631 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
632 d.addCallback(lambda metadata:
633 self.failUnlessEqual(metadata, {"key": "value"}))
634 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
636 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
637 def _created2(subdir2):
638 self.subdir2 = subdir2
639 # put something in the way, to make sure it gets overwritten
640 return subdir2.add_file(u"child", upload.Data("overwrite me",
642 d.addCallback(_created2)
644 d.addCallback(lambda res:
645 n.move_child_to(u"child", self.subdir2))
646 d.addCallback(lambda res: n.list())
647 d.addCallback(lambda children:
648 self.failUnlessEqual(sorted(children.keys()),
649 sorted([u"newfile", u"subdir2"])))
650 d.addCallback(lambda res: self.subdir2.list())
651 d.addCallback(lambda children:
652 self.failUnlessEqual(sorted(children.keys()),
654 d.addCallback(lambda res: self.subdir2.get(u"child"))
655 d.addCallback(lambda child:
656 self.failUnlessEqual(child.get_uri(),
657 fake_file_uri.to_string()))
659 # move it back, using new_child_name=
660 d.addCallback(lambda res:
661 self.subdir2.move_child_to(u"child", n, u"newchild"))
662 d.addCallback(lambda res: n.list())
663 d.addCallback(lambda children:
664 self.failUnlessEqual(sorted(children.keys()),
665 sorted([u"newchild", u"newfile",
667 d.addCallback(lambda res: self.subdir2.list())
668 d.addCallback(lambda children:
669 self.failUnlessEqual(sorted(children.keys()), []))
671 # now make sure that we honor overwrite=False
672 d.addCallback(lambda res:
673 self.subdir2.set_uri(u"newchild", other_file_uri))
675 d.addCallback(lambda res:
676 self.shouldFail(ExistingChildError, "move_child_to-no",
677 "child 'newchild' already exists",
678 n.move_child_to, u"newchild",
681 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
682 d.addCallback(lambda child:
683 self.failUnlessEqual(child.get_uri(),
684 other_file_uri.to_string()))
692 class DeepStats(unittest.TestCase):
693 def test_stats(self):
694 ds = dirnode.DeepStats(None)
695 ds.add("count-files")
696 ds.add("size-immutable-files", 123)
697 ds.histogram("size-files-histogram", 123)
698 ds.max("largest-directory", 444)
701 self.failUnlessEqual(s["count-files"], 1)
702 self.failUnlessEqual(s["size-immutable-files"], 123)
703 self.failUnlessEqual(s["largest-directory"], 444)
704 self.failUnlessEqual(s["count-literal-files"], 0)
706 ds.add("count-files")
707 ds.add("size-immutable-files", 321)
708 ds.histogram("size-files-histogram", 321)
709 ds.max("largest-directory", 2)
712 self.failUnlessEqual(s["count-files"], 2)
713 self.failUnlessEqual(s["size-immutable-files"], 444)
714 self.failUnlessEqual(s["largest-directory"], 444)
715 self.failUnlessEqual(s["count-literal-files"], 0)
716 self.failUnlessEqual(s["size-files-histogram"],
717 [ (101, 316, 1), (317, 1000, 1) ])
719 ds = dirnode.DeepStats(None)
720 for i in range(1, 1100):
721 ds.histogram("size-files-histogram", i)
722 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
724 self.failUnlessEqual(s["size-files-histogram"],
732 (3162277660169L, 10000000000000L, 1),