3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, NoSuchChildError, \
11 IDeepCheckResults, IDeepCheckAndRepairResults
12 from allmydata.util import hashutil
13 from allmydata.monitor import Monitor
14 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
15 FakeDirectoryNode, create_chk_filenode
16 from allmydata.checker_results import CheckerResults, CheckAndRepairResults
17 import common_util as testutil
19 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
20 # contain pointers to fake files. We start with a fake MutableFileNode that
21 # stores all of its data in a static table.
24 implements(IFileNode, IMutableFileNode) # sure, why not
25 def __init__(self, nodeuri):
26 if not isinstance(nodeuri, str):
27 nodeuri = nodeuri.to_string()
28 self.nodeuri = nodeuri
29 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
30 fp = hashutil.tagged_hash("tag2", nodeuri)
31 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
34 def get_readonly_uri(self):
36 def get_verifier(self):
37 return self.verifieruri
39 def check(self, monitor, verify=False):
40 r = CheckerResults("", None)
42 r.set_recoverable(True)
43 return defer.succeed(r)
45 def check_and_repair(self, monitor, verify=False):
46 d = self.check(verify)
48 r = CheckAndRepairResults(None)
49 r.pre_repair_results = r.post_repair_results = cr
54 # dirnode requires three methods from the client: upload(),
55 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
56 # only used by the convenience composite method add_file().
61 def upload(self, uploadable):
62 d = uploadable.get_size()
63 d.addCallback(lambda size: uploadable.read(size))
66 n = create_chk_filenode(self, data)
67 results = upload.UploadResults()
68 results.uri = n.get_uri()
70 d.addCallback(_got_data)
73 def create_node_from_uri(self, u):
75 if (INewDirectoryURI.providedBy(u)
76 or IReadonlyNewDirectoryURI.providedBy(u)):
77 return FakeDirectoryNode(self).init_from_uri(u)
78 return Marker(u.to_string())
80 def create_empty_dirnode(self):
81 n = FakeDirectoryNode(self)
83 d.addCallback(lambda res: n)
87 class Dirnode(unittest.TestCase, testutil.ShouldFailMixin, testutil.StallMixin):
89 self.client = FakeClient()
92 d = self.client.create_empty_dirnode()
94 self.failUnless(isinstance(res, FakeDirectoryNode))
96 self.failUnless("RW" in rep)
100 def test_corrupt(self):
101 d = self.client.create_empty_dirnode()
103 u = make_mutable_file_uri()
104 d = dn.set_uri(u"child", u, {})
105 d.addCallback(lambda res: dn.list())
106 def _check1(children):
107 self.failUnless(u"child" in children)
108 d.addCallback(_check1)
109 d.addCallback(lambda res:
110 self.shouldFail(NoSuchChildError, "get bogus", None,
114 si = IURI(filenode.get_uri()).storage_index
115 old_contents = filenode.all_contents[si]
116 # we happen to know that the writecap is encrypted near the
117 # end of the string. Flip one of its bits and make sure we
118 # detect the corruption.
119 new_contents = testutil.flip_bit(old_contents, -10)
120 # TODO: also test flipping bits in the other portions
121 filenode.all_contents[si] = new_contents
122 d.addCallback(_corrupt)
124 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
125 "HMAC does not match, crypttext is corrupted",
127 d.addCallback(_check2)
129 d.addCallback(_created)
132 def test_check(self):
133 d = self.client.create_empty_dirnode()
134 d.addCallback(lambda dn: dn.check(Monitor()))
136 self.failUnless(res.is_healthy())
140 def _test_deepcheck_create(self):
141 # create a small tree with a loop, and some non-directories
145 # root/subdir/link -> root
146 d = self.client.create_empty_dirnode()
147 def _created_root(rootnode):
148 self._rootnode = rootnode
149 return rootnode.create_empty_directory(u"subdir")
150 d.addCallback(_created_root)
151 def _created_subdir(subdir):
152 self._subdir = subdir
153 d = subdir.add_file(u"file1", upload.Data("data", None))
154 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
156 d.addCallback(_created_subdir)
158 return self._rootnode
162 def test_deepcheck(self):
163 d = self._test_deepcheck_create()
164 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
165 def _check_results(r):
166 self.failUnless(IDeepCheckResults.providedBy(r))
168 self.failUnlessEqual(c,
169 {"count-objects-checked": 3,
170 "count-objects-healthy": 3,
171 "count-objects-unhealthy": 0,
172 "count-objects-unrecoverable": 0,
173 "count-corrupt-shares": 0,
175 self.failIf(r.get_corrupt_shares())
176 self.failUnlessEqual(len(r.get_all_results()), 3)
177 d.addCallback(_check_results)
180 def test_deepcheck_and_repair(self):
181 d = self._test_deepcheck_create()
182 d.addCallback(lambda rootnode:
183 rootnode.start_deep_check_and_repair().when_done())
184 def _check_results(r):
185 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
187 self.failUnlessEqual(c,
188 {"count-objects-checked": 3,
189 "count-objects-healthy-pre-repair": 3,
190 "count-objects-unhealthy-pre-repair": 0,
191 "count-objects-unrecoverable-pre-repair": 0,
192 "count-corrupt-shares-pre-repair": 0,
193 "count-objects-healthy-post-repair": 3,
194 "count-objects-unhealthy-post-repair": 0,
195 "count-objects-unrecoverable-post-repair": 0,
196 "count-corrupt-shares-post-repair": 0,
197 "count-repairs-attempted": 0,
198 "count-repairs-successful": 0,
199 "count-repairs-unsuccessful": 0,
201 self.failIf(r.get_corrupt_shares())
202 self.failIf(r.get_remaining_corrupt_shares())
203 self.failUnlessEqual(len(r.get_all_results()), 3)
204 d.addCallback(_check_results)
207 def _mark_file_bad(self, rootnode):
208 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
209 rootnode._node.bad_shares[si] = "unhealthy"
212 def test_deepcheck_problems(self):
213 d = self._test_deepcheck_create()
214 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
215 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
216 def _check_results(r):
218 self.failUnlessEqual(c,
219 {"count-objects-checked": 3,
220 "count-objects-healthy": 2,
221 "count-objects-unhealthy": 1,
222 "count-objects-unrecoverable": 0,
223 "count-corrupt-shares": 0,
225 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
226 d.addCallback(_check_results)
229 def test_readonly(self):
230 fileuri = make_chk_file_uri(1234)
231 filenode = self.client.create_node_from_uri(fileuri)
232 uploadable = upload.Data("some data", convergence="some convergence string")
234 d = self.client.create_empty_dirnode()
236 d2 = rw_dn.set_uri(u"child", fileuri)
237 d2.addCallback(lambda res: rw_dn)
239 d.addCallback(_created)
242 ro_uri = rw_dn.get_readonly_uri()
243 ro_dn = self.client.create_node_from_uri(ro_uri)
244 self.failUnless(ro_dn.is_readonly())
245 self.failUnless(ro_dn.is_mutable())
247 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
248 ro_dn.set_uri, u"newchild", fileuri)
249 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
250 ro_dn.set_node, u"newchild", filenode)
251 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
252 ro_dn.set_nodes, [ (u"newchild", filenode) ])
253 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
254 ro_dn.add_file, u"newchild", uploadable)
255 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
256 ro_dn.delete, u"child")
257 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
258 ro_dn.create_empty_directory, u"newchild")
259 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
260 ro_dn.set_metadata_for, u"child", {})
261 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
262 ro_dn.move_child_to, u"child", rw_dn)
263 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
264 rw_dn.move_child_to, u"child", ro_dn)
266 d.addCallback(_ready)
267 def _listed(children):
268 self.failUnless(u"child" in children)
269 d.addCallback(_listed)
272 def failUnlessGreaterThan(self, a, b):
273 self.failUnless(a > b, "%r should be > %r" % (a, b))
275 def failUnlessGreaterOrEqualThan(self, a, b):
276 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
278 def test_create(self):
279 self.expected_manifest = []
281 d = self.client.create_empty_dirnode()
284 self.failUnless(n.is_mutable())
287 self.failUnless(u.startswith("URI:DIR2:"), u)
288 u_ro = n.get_readonly_uri()
289 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
290 u_v = n.get_verifier().to_string()
291 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
292 self.expected_manifest.append( ((), u) )
293 expected_si = n._uri._filenode_uri.storage_index
294 self.failUnlessEqual(n.get_storage_index(), expected_si)
297 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
298 d.addCallback(lambda res: n.has_child(u"missing"))
299 d.addCallback(lambda res: self.failIf(res))
300 fake_file_uri = make_mutable_file_uri()
301 other_file_uri = make_mutable_file_uri()
302 m = Marker(fake_file_uri)
303 ffu_v = m.get_verifier().to_string()
304 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
305 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
306 d.addCallback(lambda res:
307 self.shouldFail(ExistingChildError, "set_uri-no",
308 "child 'child' already exists",
309 n.set_uri, u"child", other_file_uri,
314 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
318 # /subdir = directory
319 def _created(subdir):
320 self.failUnless(isinstance(subdir, FakeDirectoryNode))
322 new_v = subdir.get_verifier().to_string()
323 assert isinstance(new_v, str)
324 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
325 d.addCallback(_created)
327 d.addCallback(lambda res:
328 self.shouldFail(ExistingChildError, "mkdir-no",
329 "child 'subdir' already exists",
330 n.create_empty_directory, u"subdir",
333 d.addCallback(lambda res: n.list())
334 d.addCallback(lambda children:
335 self.failUnlessEqual(sorted(children.keys()),
336 sorted([u"child", u"subdir"])))
338 d.addCallback(lambda res: n.start_deep_stats().when_done())
339 def _check_deepstats(stats):
340 self.failUnless(isinstance(stats, dict))
341 expected = {"count-immutable-files": 0,
342 "count-mutable-files": 1,
343 "count-literal-files": 0,
345 "count-directories": 2,
346 "size-immutable-files": 0,
347 "size-literal-files": 0,
348 #"size-directories": 616, # varies
349 #"largest-directory": 616,
350 "largest-directory-children": 2,
351 "largest-immutable-file": 0,
353 for k,v in expected.iteritems():
354 self.failUnlessEqual(stats[k], v,
355 "stats[%s] was %s, not %s" %
357 self.failUnless(stats["size-directories"] > 500,
358 stats["size-directories"])
359 self.failUnless(stats["largest-directory"] > 500,
360 stats["largest-directory"])
361 self.failUnlessEqual(stats["size-files-histogram"], [])
362 d.addCallback(_check_deepstats)
364 d.addCallback(lambda res: n.build_manifest().when_done())
365 def _check_manifest(res):
366 manifest = res["manifest"]
367 self.failUnlessEqual(sorted(manifest),
368 sorted(self.expected_manifest))
370 _check_deepstats(stats)
371 d.addCallback(_check_manifest)
373 def _add_subsubdir(res):
374 return self.subdir.create_empty_directory(u"subsubdir")
375 d.addCallback(_add_subsubdir)
378 # /subdir = directory
379 # /subdir/subsubdir = directory
380 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
381 d.addCallback(lambda subsubdir:
382 self.failUnless(isinstance(subsubdir,
384 d.addCallback(lambda res: n.get_child_at_path(u""))
385 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
388 d.addCallback(lambda res: n.get_metadata_for(u"child"))
389 d.addCallback(lambda metadata:
390 self.failUnlessEqual(sorted(metadata.keys()),
393 d.addCallback(lambda res:
394 self.shouldFail(NoSuchChildError, "gcamap-no",
396 n.get_child_and_metadata_at_path,
398 d.addCallback(lambda res:
399 n.get_child_and_metadata_at_path(u""))
400 def _check_child_and_metadata1(res):
401 child, metadata = res
402 self.failUnless(isinstance(child, FakeDirectoryNode))
403 # edge-metadata needs at least one path segment
404 self.failUnlessEqual(sorted(metadata.keys()), [])
405 d.addCallback(_check_child_and_metadata1)
406 d.addCallback(lambda res:
407 n.get_child_and_metadata_at_path(u"child"))
409 def _check_child_and_metadata2(res):
410 child, metadata = res
411 self.failUnlessEqual(child.get_uri(),
412 fake_file_uri.to_string())
413 self.failUnlessEqual(sorted(metadata.keys()),
415 d.addCallback(_check_child_and_metadata2)
417 d.addCallback(lambda res:
418 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
419 def _check_child_and_metadata3(res):
420 child, metadata = res
421 self.failUnless(isinstance(child, FakeDirectoryNode))
422 self.failUnlessEqual(sorted(metadata.keys()),
424 d.addCallback(_check_child_and_metadata3)
427 # it should be possible to add a child without any metadata
428 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
429 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
430 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
432 # if we don't set any defaults, the child should get timestamps
433 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
434 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
435 d.addCallback(lambda metadata:
436 self.failUnlessEqual(sorted(metadata.keys()),
439 # or we can add specific metadata at set_uri() time, which
440 # overrides the timestamps
441 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
443 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
444 d.addCallback(lambda metadata:
445 self.failUnlessEqual(metadata, {"key": "value"}))
447 d.addCallback(lambda res: n.delete(u"c2"))
448 d.addCallback(lambda res: n.delete(u"c3"))
449 d.addCallback(lambda res: n.delete(u"c4"))
451 # set_node + metadata
452 # it should be possible to add a child without any metadata
453 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
454 d.addCallback(lambda res: self.client.create_empty_dirnode())
455 d.addCallback(lambda n2:
456 self.shouldFail(ExistingChildError, "set_node-no",
457 "child 'd2' already exists",
458 n.set_node, u"d2", n2,
460 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
461 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
463 # if we don't set any defaults, the child should get timestamps
464 d.addCallback(lambda res: n.set_node(u"d3", n))
465 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
466 d.addCallback(lambda metadata:
467 self.failUnlessEqual(sorted(metadata.keys()),
470 # or we can add specific metadata at set_node() time, which
471 # overrides the timestamps
472 d.addCallback(lambda res: n.set_node(u"d4", n,
474 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
475 d.addCallback(lambda metadata:
476 self.failUnlessEqual(metadata, {"key": "value"}))
478 d.addCallback(lambda res: n.delete(u"d2"))
479 d.addCallback(lambda res: n.delete(u"d3"))
480 d.addCallback(lambda res: n.delete(u"d4"))
482 # metadata through set_children()
483 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
484 (u"e2", fake_file_uri, {}),
485 (u"e3", fake_file_uri,
488 d.addCallback(lambda res:
489 self.shouldFail(ExistingChildError, "set_children-no",
490 "child 'e1' already exists",
492 [ (u"e1", other_file_uri),
493 (u"new", other_file_uri), ],
495 # and 'new' should not have been created
496 d.addCallback(lambda res: n.list())
497 d.addCallback(lambda children: self.failIf(u"new" in children))
498 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
499 d.addCallback(lambda metadata:
500 self.failUnlessEqual(sorted(metadata.keys()),
502 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
503 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
504 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
505 d.addCallback(lambda metadata:
506 self.failUnlessEqual(metadata, {"key": "value"}))
508 d.addCallback(lambda res: n.delete(u"e1"))
509 d.addCallback(lambda res: n.delete(u"e2"))
510 d.addCallback(lambda res: n.delete(u"e3"))
512 # metadata through set_nodes()
513 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
518 d.addCallback(lambda res:
519 self.shouldFail(ExistingChildError, "set_nodes-no",
520 "child 'f1' already exists",
525 # and 'new' should not have been created
526 d.addCallback(lambda res: n.list())
527 d.addCallback(lambda children: self.failIf(u"new" in children))
528 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
529 d.addCallback(lambda metadata:
530 self.failUnlessEqual(sorted(metadata.keys()),
532 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
533 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
534 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
535 d.addCallback(lambda metadata:
536 self.failUnlessEqual(metadata, {"key": "value"}))
538 d.addCallback(lambda res: n.delete(u"f1"))
539 d.addCallback(lambda res: n.delete(u"f2"))
540 d.addCallback(lambda res: n.delete(u"f3"))
543 d.addCallback(lambda res:
544 n.set_metadata_for(u"child",
545 {"tags": ["web2.0-compatible"]}))
546 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
547 d.addCallback(lambda metadata:
548 self.failUnlessEqual(metadata,
549 {"tags": ["web2.0-compatible"]}))
552 self._start_timestamp = time.time()
553 d.addCallback(_start)
554 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
555 # floats to hundredeths (it uses str(num) instead of repr(num)).
556 # simplejson-1.7.3 does not have this bug. To prevent this bug
557 # from causing the test to fail, stall for more than a few
558 # hundrededths of a second.
559 d.addCallback(self.stall, 0.1)
560 d.addCallback(lambda res: n.add_file(u"timestamps",
561 upload.Data("stamp me", convergence="some convergence string")))
562 d.addCallback(self.stall, 0.1)
564 self._stop_timestamp = time.time()
567 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
568 def _check_timestamp1(metadata):
569 self.failUnless("ctime" in metadata)
570 self.failUnless("mtime" in metadata)
571 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
572 self._start_timestamp)
573 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
575 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
576 self._start_timestamp)
577 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
579 # Our current timestamp rules say that replacing an existing
580 # child should preserve the 'ctime' but update the mtime
581 self._old_ctime = metadata["ctime"]
582 self._old_mtime = metadata["mtime"]
583 d.addCallback(_check_timestamp1)
584 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
585 d.addCallback(lambda res: n.set_node(u"timestamps", n))
586 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
587 def _check_timestamp2(metadata):
588 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
589 "%s != %s" % (metadata["ctime"],
591 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
592 return n.delete(u"timestamps")
593 d.addCallback(_check_timestamp2)
595 # also make sure we can add/update timestamps on a
596 # previously-existing child that didn't have any, since there are
597 # a lot of 0.7.0-generated edges around out there
598 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
599 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
600 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
601 d.addCallback(lambda metadata:
602 self.failUnlessEqual(sorted(metadata.keys()),
604 d.addCallback(lambda res: n.delete(u"no_timestamps"))
606 d.addCallback(lambda res: n.delete(u"subdir"))
607 d.addCallback(lambda old_child:
608 self.failUnlessEqual(old_child.get_uri(),
609 self.subdir.get_uri()))
611 d.addCallback(lambda res: n.list())
612 d.addCallback(lambda children:
613 self.failUnlessEqual(sorted(children.keys()),
616 uploadable = upload.Data("some data", convergence="some convergence string")
617 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
618 d.addCallback(lambda newnode:
619 self.failUnless(IFileNode.providedBy(newnode)))
620 other_uploadable = upload.Data("some data", convergence="stuff")
621 d.addCallback(lambda res:
622 self.shouldFail(ExistingChildError, "add_file-no",
623 "child 'newfile' already exists",
624 n.add_file, u"newfile",
627 d.addCallback(lambda res: n.list())
628 d.addCallback(lambda children:
629 self.failUnlessEqual(sorted(children.keys()),
630 sorted([u"child", u"newfile"])))
631 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
632 d.addCallback(lambda metadata:
633 self.failUnlessEqual(sorted(metadata.keys()),
636 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
639 d.addCallback(lambda newnode:
640 self.failUnless(IFileNode.providedBy(newnode)))
641 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
642 d.addCallback(lambda metadata:
643 self.failUnlessEqual(metadata, {"key": "value"}))
644 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
646 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
647 def _created2(subdir2):
648 self.subdir2 = subdir2
649 # put something in the way, to make sure it gets overwritten
650 return subdir2.add_file(u"child", upload.Data("overwrite me",
652 d.addCallback(_created2)
654 d.addCallback(lambda res:
655 n.move_child_to(u"child", self.subdir2))
656 d.addCallback(lambda res: n.list())
657 d.addCallback(lambda children:
658 self.failUnlessEqual(sorted(children.keys()),
659 sorted([u"newfile", u"subdir2"])))
660 d.addCallback(lambda res: self.subdir2.list())
661 d.addCallback(lambda children:
662 self.failUnlessEqual(sorted(children.keys()),
664 d.addCallback(lambda res: self.subdir2.get(u"child"))
665 d.addCallback(lambda child:
666 self.failUnlessEqual(child.get_uri(),
667 fake_file_uri.to_string()))
669 # move it back, using new_child_name=
670 d.addCallback(lambda res:
671 self.subdir2.move_child_to(u"child", n, u"newchild"))
672 d.addCallback(lambda res: n.list())
673 d.addCallback(lambda children:
674 self.failUnlessEqual(sorted(children.keys()),
675 sorted([u"newchild", u"newfile",
677 d.addCallback(lambda res: self.subdir2.list())
678 d.addCallback(lambda children:
679 self.failUnlessEqual(sorted(children.keys()), []))
681 # now make sure that we honor overwrite=False
682 d.addCallback(lambda res:
683 self.subdir2.set_uri(u"newchild", other_file_uri))
685 d.addCallback(lambda res:
686 self.shouldFail(ExistingChildError, "move_child_to-no",
687 "child 'newchild' already exists",
688 n.move_child_to, u"newchild",
691 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
692 d.addCallback(lambda child:
693 self.failUnlessEqual(child.get_uri(),
694 other_file_uri.to_string()))
702 class DeepStats(unittest.TestCase):
703 def test_stats(self):
704 ds = dirnode.DeepStats(None)
705 ds.add("count-files")
706 ds.add("size-immutable-files", 123)
707 ds.histogram("size-files-histogram", 123)
708 ds.max("largest-directory", 444)
711 self.failUnlessEqual(s["count-files"], 1)
712 self.failUnlessEqual(s["size-immutable-files"], 123)
713 self.failUnlessEqual(s["largest-directory"], 444)
714 self.failUnlessEqual(s["count-literal-files"], 0)
716 ds.add("count-files")
717 ds.add("size-immutable-files", 321)
718 ds.histogram("size-files-histogram", 321)
719 ds.max("largest-directory", 2)
722 self.failUnlessEqual(s["count-files"], 2)
723 self.failUnlessEqual(s["size-immutable-files"], 444)
724 self.failUnlessEqual(s["largest-directory"], 444)
725 self.failUnlessEqual(s["count-literal-files"], 0)
726 self.failUnlessEqual(s["size-files-histogram"],
727 [ (101, 316, 1), (317, 1000, 1) ])
729 ds = dirnode.DeepStats(None)
730 for i in range(1, 1100):
731 ds.histogram("size-files-histogram", i)
732 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
734 self.failUnlessEqual(s["size-files-histogram"],
742 (3162277660169L, 10000000000000L, 1),