3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, NoSuchChildError, \
11 IDeepCheckResults, IDeepCheckAndRepairResults
12 from allmydata.mutable.filenode import MutableFileNode
13 from allmydata.mutable.common import UncoordinatedWriteError
14 from allmydata.util import hashutil, base32
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
17 FakeDirectoryNode, create_chk_filenode, ErrorMixin
18 from allmydata.test.no_network import GridTestMixin
19 from allmydata.check_results import CheckResults, CheckAndRepairResults
20 import common_util as testutil
22 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
23 # contain pointers to fake files. We start with a fake MutableFileNode that
24 # stores all of its data in a static table.
27 implements(IFileNode, IMutableFileNode) # sure, why not
28 def __init__(self, nodeuri):
29 if not isinstance(nodeuri, str):
30 nodeuri = nodeuri.to_string()
31 self.nodeuri = nodeuri
32 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
33 self.storage_index = si
34 fp = hashutil.tagged_hash("tag2", nodeuri)
35 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
38 def get_readonly_uri(self):
40 def get_verify_cap(self):
41 return self.verifieruri
42 def get_storage_index(self):
43 return self.storage_index
45 def check(self, monitor, verify=False, add_lease=False):
46 r = CheckResults(uri.from_string(self.nodeuri), None)
48 r.set_recoverable(True)
49 return defer.succeed(r)
51 def check_and_repair(self, monitor, verify=False, add_lease=False):
52 d = self.check(verify)
54 r = CheckAndRepairResults(None)
55 r.pre_repair_results = r.post_repair_results = cr
60 # dirnode requires three methods from the client: upload(),
61 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
62 # only used by the convenience composite method add_file().
67 def upload(self, uploadable):
68 d = uploadable.get_size()
69 d.addCallback(lambda size: uploadable.read(size))
72 n = create_chk_filenode(self, data)
73 results = upload.UploadResults()
74 results.uri = n.get_uri()
76 d.addCallback(_got_data)
79 def create_node_from_uri(self, u):
81 if (INewDirectoryURI.providedBy(u)
82 or IReadonlyNewDirectoryURI.providedBy(u)):
83 return FakeDirectoryNode(self).init_from_uri(u)
84 return Marker(u.to_string())
86 def create_empty_dirnode(self):
87 n = FakeDirectoryNode(self)
89 d.addCallback(lambda res: n)
93 class Dirnode(unittest.TestCase,
94 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
96 self.client = FakeClient()
99 d = self.client.create_empty_dirnode()
101 self.failUnless(isinstance(res, FakeDirectoryNode))
103 self.failUnless("RW" in rep)
107 def test_corrupt(self):
108 d = self.client.create_empty_dirnode()
110 u = make_mutable_file_uri()
111 d = dn.set_uri(u"child", u.to_string(), {})
112 d.addCallback(lambda res: dn.list())
113 def _check1(children):
114 self.failUnless(u"child" in children)
115 d.addCallback(_check1)
116 d.addCallback(lambda res:
117 self.shouldFail(NoSuchChildError, "get bogus", None,
121 si = IURI(filenode.get_uri()).storage_index
122 old_contents = filenode.all_contents[si]
123 # We happen to know that the writecap MAC is near the end of the string. Flip
124 # one of its bits and make sure we ignore the corruption.
125 new_contents = testutil.flip_bit(old_contents, -10)
126 # TODO: also test flipping bits in the other portions
127 filenode.all_contents[si] = new_contents
128 d.addCallback(_corrupt)
132 self.failUnless(res.has_key('child'))
134 d.addCallback(_check2)
136 d.addCallback(_created)
139 def test_check(self):
140 d = self.client.create_empty_dirnode()
141 d.addCallback(lambda dn: dn.check(Monitor()))
143 self.failUnless(res.is_healthy())
147 def _test_deepcheck_create(self):
148 # create a small tree with a loop, and some non-directories
152 # root/subdir/link -> root
154 d = self.client.create_empty_dirnode()
155 def _created_root(rootnode):
156 self._rootnode = rootnode
157 return rootnode.create_empty_directory(u"subdir")
158 d.addCallback(_created_root)
159 def _created_subdir(subdir):
160 self._subdir = subdir
161 d = subdir.add_file(u"file1", upload.Data("data", None))
162 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
163 d.addCallback(lambda res: self.client.create_empty_dirnode())
164 d.addCallback(lambda dn:
165 self._rootnode.set_uri(u"rodir",
166 dn.get_readonly_uri()))
168 d.addCallback(_created_subdir)
170 return self._rootnode
174 def test_deepcheck(self):
175 d = self._test_deepcheck_create()
176 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
177 def _check_results(r):
178 self.failUnless(IDeepCheckResults.providedBy(r))
180 self.failUnlessEqual(c,
181 {"count-objects-checked": 4,
182 "count-objects-healthy": 4,
183 "count-objects-unhealthy": 0,
184 "count-objects-unrecoverable": 0,
185 "count-corrupt-shares": 0,
187 self.failIf(r.get_corrupt_shares())
188 self.failUnlessEqual(len(r.get_all_results()), 4)
189 d.addCallback(_check_results)
192 def test_deepcheck_and_repair(self):
193 d = self._test_deepcheck_create()
194 d.addCallback(lambda rootnode:
195 rootnode.start_deep_check_and_repair().when_done())
196 def _check_results(r):
197 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
199 self.failUnlessEqual(c,
200 {"count-objects-checked": 4,
201 "count-objects-healthy-pre-repair": 4,
202 "count-objects-unhealthy-pre-repair": 0,
203 "count-objects-unrecoverable-pre-repair": 0,
204 "count-corrupt-shares-pre-repair": 0,
205 "count-objects-healthy-post-repair": 4,
206 "count-objects-unhealthy-post-repair": 0,
207 "count-objects-unrecoverable-post-repair": 0,
208 "count-corrupt-shares-post-repair": 0,
209 "count-repairs-attempted": 0,
210 "count-repairs-successful": 0,
211 "count-repairs-unsuccessful": 0,
213 self.failIf(r.get_corrupt_shares())
214 self.failIf(r.get_remaining_corrupt_shares())
215 self.failUnlessEqual(len(r.get_all_results()), 4)
216 d.addCallback(_check_results)
219 def _mark_file_bad(self, rootnode):
220 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
221 rootnode._node.bad_shares[si] = "unhealthy"
224 def test_deepcheck_problems(self):
225 d = self._test_deepcheck_create()
226 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
227 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
228 def _check_results(r):
230 self.failUnlessEqual(c,
231 {"count-objects-checked": 4,
232 "count-objects-healthy": 3,
233 "count-objects-unhealthy": 1,
234 "count-objects-unrecoverable": 0,
235 "count-corrupt-shares": 0,
237 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
238 d.addCallback(_check_results)
241 def test_readonly(self):
242 fileuri = make_chk_file_uri(1234)
243 filenode = self.client.create_node_from_uri(fileuri)
244 uploadable = upload.Data("some data", convergence="some convergence string")
246 d = self.client.create_empty_dirnode()
248 d2 = rw_dn.set_uri(u"child", fileuri.to_string())
249 d2.addCallback(lambda res: rw_dn)
251 d.addCallback(_created)
254 ro_uri = rw_dn.get_readonly_uri()
255 ro_dn = self.client.create_node_from_uri(ro_uri)
256 self.failUnless(ro_dn.is_readonly())
257 self.failUnless(ro_dn.is_mutable())
259 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
260 ro_dn.set_uri, u"newchild", fileuri.to_string())
261 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
262 ro_dn.set_node, u"newchild", filenode)
263 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
264 ro_dn.set_nodes, [ (u"newchild", filenode) ])
265 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
266 ro_dn.add_file, u"newchild", uploadable)
267 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
268 ro_dn.delete, u"child")
269 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
270 ro_dn.create_empty_directory, u"newchild")
271 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
272 ro_dn.set_metadata_for, u"child", {})
273 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
274 ro_dn.move_child_to, u"child", rw_dn)
275 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
276 rw_dn.move_child_to, u"child", ro_dn)
278 d.addCallback(_ready)
279 def _listed(children):
280 self.failUnless(u"child" in children)
281 d.addCallback(_listed)
284 def failUnlessGreaterThan(self, a, b):
285 self.failUnless(a > b, "%r should be > %r" % (a, b))
287 def failUnlessGreaterOrEqualThan(self, a, b):
288 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
290 def test_create(self):
291 self.expected_manifest = []
292 self.expected_verifycaps = set()
293 self.expected_storage_indexes = set()
295 d = self.client.create_empty_dirnode()
298 self.failUnless(n.is_mutable())
301 self.failUnless(u.startswith("URI:DIR2:"), u)
302 u_ro = n.get_readonly_uri()
303 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
304 u_v = n.get_verify_cap().to_string()
305 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
306 u_r = n.get_repair_cap().to_string()
307 self.failUnlessEqual(u_r, u)
308 self.expected_manifest.append( ((), u) )
309 self.expected_verifycaps.add(u_v)
310 si = n.get_storage_index()
311 self.expected_storage_indexes.add(base32.b2a(si))
312 expected_si = n._uri._filenode_uri.storage_index
313 self.failUnlessEqual(si, expected_si)
316 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
317 d.addCallback(lambda res: n.has_child(u"missing"))
318 d.addCallback(lambda res: self.failIf(res))
319 fake_file_uri = make_mutable_file_uri()
320 other_file_uri = make_mutable_file_uri()
321 m = Marker(fake_file_uri)
322 ffu_v = m.get_verify_cap().to_string()
323 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
324 self.expected_verifycaps.add(ffu_v)
325 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
326 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
327 d.addCallback(lambda res:
328 self.shouldFail(ExistingChildError, "set_uri-no",
329 "child 'child' already exists",
330 n.set_uri, u"child", other_file_uri.to_string(),
335 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
339 # /subdir = directory
340 def _created(subdir):
341 self.failUnless(isinstance(subdir, FakeDirectoryNode))
343 new_v = subdir.get_verify_cap().to_string()
344 assert isinstance(new_v, str)
345 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
346 self.expected_verifycaps.add(new_v)
347 si = subdir.get_storage_index()
348 self.expected_storage_indexes.add(base32.b2a(si))
349 d.addCallback(_created)
351 d.addCallback(lambda res:
352 self.shouldFail(ExistingChildError, "mkdir-no",
353 "child 'subdir' already exists",
354 n.create_empty_directory, u"subdir",
357 d.addCallback(lambda res: n.list())
358 d.addCallback(lambda children:
359 self.failUnlessEqual(sorted(children.keys()),
360 sorted([u"child", u"subdir"])))
362 d.addCallback(lambda res: n.start_deep_stats().when_done())
363 def _check_deepstats(stats):
364 self.failUnless(isinstance(stats, dict))
365 expected = {"count-immutable-files": 0,
366 "count-mutable-files": 1,
367 "count-literal-files": 0,
369 "count-directories": 2,
370 "size-immutable-files": 0,
371 "size-literal-files": 0,
372 #"size-directories": 616, # varies
373 #"largest-directory": 616,
374 "largest-directory-children": 2,
375 "largest-immutable-file": 0,
377 for k,v in expected.iteritems():
378 self.failUnlessEqual(stats[k], v,
379 "stats[%s] was %s, not %s" %
381 self.failUnless(stats["size-directories"] > 500,
382 stats["size-directories"])
383 self.failUnless(stats["largest-directory"] > 500,
384 stats["largest-directory"])
385 self.failUnlessEqual(stats["size-files-histogram"], [])
386 d.addCallback(_check_deepstats)
388 d.addCallback(lambda res: n.build_manifest().when_done())
389 def _check_manifest(res):
390 manifest = res["manifest"]
391 self.failUnlessEqual(sorted(manifest),
392 sorted(self.expected_manifest))
394 _check_deepstats(stats)
395 self.failUnlessEqual(self.expected_verifycaps,
397 self.failUnlessEqual(self.expected_storage_indexes,
398 res["storage-index"])
399 d.addCallback(_check_manifest)
401 def _add_subsubdir(res):
402 return self.subdir.create_empty_directory(u"subsubdir")
403 d.addCallback(_add_subsubdir)
406 # /subdir = directory
407 # /subdir/subsubdir = directory
408 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
409 d.addCallback(lambda subsubdir:
410 self.failUnless(isinstance(subsubdir,
412 d.addCallback(lambda res: n.get_child_at_path(u""))
413 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
416 d.addCallback(lambda res: n.get_metadata_for(u"child"))
417 d.addCallback(lambda metadata:
418 self.failUnlessEqual(sorted(metadata.keys()),
421 d.addCallback(lambda res:
422 self.shouldFail(NoSuchChildError, "gcamap-no",
424 n.get_child_and_metadata_at_path,
426 d.addCallback(lambda res:
427 n.get_child_and_metadata_at_path(u""))
428 def _check_child_and_metadata1(res):
429 child, metadata = res
430 self.failUnless(isinstance(child, FakeDirectoryNode))
431 # edge-metadata needs at least one path segment
432 self.failUnlessEqual(sorted(metadata.keys()), [])
433 d.addCallback(_check_child_and_metadata1)
434 d.addCallback(lambda res:
435 n.get_child_and_metadata_at_path(u"child"))
437 def _check_child_and_metadata2(res):
438 child, metadata = res
439 self.failUnlessEqual(child.get_uri(),
440 fake_file_uri.to_string())
441 self.failUnlessEqual(sorted(metadata.keys()),
443 d.addCallback(_check_child_and_metadata2)
445 d.addCallback(lambda res:
446 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
447 def _check_child_and_metadata3(res):
448 child, metadata = res
449 self.failUnless(isinstance(child, FakeDirectoryNode))
450 self.failUnlessEqual(sorted(metadata.keys()),
452 d.addCallback(_check_child_and_metadata3)
455 # it should be possible to add a child without any metadata
456 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
457 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
458 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
460 # if we don't set any defaults, the child should get timestamps
461 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
462 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
463 d.addCallback(lambda metadata:
464 self.failUnlessEqual(sorted(metadata.keys()),
467 # or we can add specific metadata at set_uri() time, which
468 # overrides the timestamps
469 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
471 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
472 d.addCallback(lambda metadata:
473 self.failUnlessEqual(metadata, {"key": "value"}))
475 d.addCallback(lambda res: n.delete(u"c2"))
476 d.addCallback(lambda res: n.delete(u"c3"))
477 d.addCallback(lambda res: n.delete(u"c4"))
479 # set_node + metadata
480 # it should be possible to add a child without any metadata
481 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
482 d.addCallback(lambda res: self.client.create_empty_dirnode())
483 d.addCallback(lambda n2:
484 self.shouldFail(ExistingChildError, "set_node-no",
485 "child 'd2' already exists",
486 n.set_node, u"d2", n2,
488 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
489 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
491 # if we don't set any defaults, the child should get timestamps
492 d.addCallback(lambda res: n.set_node(u"d3", n))
493 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
494 d.addCallback(lambda metadata:
495 self.failUnlessEqual(sorted(metadata.keys()),
498 # or we can add specific metadata at set_node() time, which
499 # overrides the timestamps
500 d.addCallback(lambda res: n.set_node(u"d4", n,
502 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
503 d.addCallback(lambda metadata:
504 self.failUnlessEqual(metadata, {"key": "value"}))
506 d.addCallback(lambda res: n.delete(u"d2"))
507 d.addCallback(lambda res: n.delete(u"d3"))
508 d.addCallback(lambda res: n.delete(u"d4"))
510 # metadata through set_children()
511 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
512 (u"e2", fake_file_uri.to_string(), {}),
513 (u"e3", fake_file_uri.to_string(),
516 d.addCallback(lambda res:
517 self.shouldFail(ExistingChildError, "set_children-no",
518 "child 'e1' already exists",
520 [ (u"e1", other_file_uri),
521 (u"new", other_file_uri), ],
523 # and 'new' should not have been created
524 d.addCallback(lambda res: n.list())
525 d.addCallback(lambda children: self.failIf(u"new" in children))
526 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
527 d.addCallback(lambda metadata:
528 self.failUnlessEqual(sorted(metadata.keys()),
530 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
531 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
532 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
533 d.addCallback(lambda metadata:
534 self.failUnlessEqual(metadata, {"key": "value"}))
536 d.addCallback(lambda res: n.delete(u"e1"))
537 d.addCallback(lambda res: n.delete(u"e2"))
538 d.addCallback(lambda res: n.delete(u"e3"))
540 # metadata through set_nodes()
541 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
546 d.addCallback(lambda res:
547 self.shouldFail(ExistingChildError, "set_nodes-no",
548 "child 'f1' already exists",
553 # and 'new' should not have been created
554 d.addCallback(lambda res: n.list())
555 d.addCallback(lambda children: self.failIf(u"new" in children))
556 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
557 d.addCallback(lambda metadata:
558 self.failUnlessEqual(sorted(metadata.keys()),
560 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
561 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
562 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
563 d.addCallback(lambda metadata:
564 self.failUnlessEqual(metadata, {"key": "value"}))
566 d.addCallback(lambda res: n.delete(u"f1"))
567 d.addCallback(lambda res: n.delete(u"f2"))
568 d.addCallback(lambda res: n.delete(u"f3"))
571 d.addCallback(lambda res:
572 n.set_metadata_for(u"child",
573 {"tags": ["web2.0-compatible"]}))
574 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
575 d.addCallback(lambda metadata:
576 self.failUnlessEqual(metadata,
577 {"tags": ["web2.0-compatible"]}))
580 self._start_timestamp = time.time()
581 d.addCallback(_start)
582 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
583 # floats to hundredeths (it uses str(num) instead of repr(num)).
584 # simplejson-1.7.3 does not have this bug. To prevent this bug
585 # from causing the test to fail, stall for more than a few
586 # hundrededths of a second.
587 d.addCallback(self.stall, 0.1)
588 d.addCallback(lambda res: n.add_file(u"timestamps",
589 upload.Data("stamp me", convergence="some convergence string")))
590 d.addCallback(self.stall, 0.1)
592 self._stop_timestamp = time.time()
595 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
596 def _check_timestamp1(metadata):
597 self.failUnless("ctime" in metadata)
598 self.failUnless("mtime" in metadata)
599 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
600 self._start_timestamp)
601 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
603 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
604 self._start_timestamp)
605 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
607 # Our current timestamp rules say that replacing an existing
608 # child should preserve the 'ctime' but update the mtime
609 self._old_ctime = metadata["ctime"]
610 self._old_mtime = metadata["mtime"]
611 d.addCallback(_check_timestamp1)
612 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
613 d.addCallback(lambda res: n.set_node(u"timestamps", n))
614 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
615 def _check_timestamp2(metadata):
616 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
617 "%s != %s" % (metadata["ctime"],
619 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
620 return n.delete(u"timestamps")
621 d.addCallback(_check_timestamp2)
623 # also make sure we can add/update timestamps on a
624 # previously-existing child that didn't have any, since there are
625 # a lot of 0.7.0-generated edges around out there
626 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
627 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
628 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
629 d.addCallback(lambda metadata:
630 self.failUnlessEqual(sorted(metadata.keys()),
632 d.addCallback(lambda res: n.delete(u"no_timestamps"))
634 d.addCallback(lambda res: n.delete(u"subdir"))
635 d.addCallback(lambda old_child:
636 self.failUnlessEqual(old_child.get_uri(),
637 self.subdir.get_uri()))
639 d.addCallback(lambda res: n.list())
640 d.addCallback(lambda children:
641 self.failUnlessEqual(sorted(children.keys()),
644 uploadable = upload.Data("some data", convergence="some convergence string")
645 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
646 d.addCallback(lambda newnode:
647 self.failUnless(IFileNode.providedBy(newnode)))
648 other_uploadable = upload.Data("some data", convergence="stuff")
649 d.addCallback(lambda res:
650 self.shouldFail(ExistingChildError, "add_file-no",
651 "child 'newfile' already exists",
652 n.add_file, u"newfile",
655 d.addCallback(lambda res: n.list())
656 d.addCallback(lambda children:
657 self.failUnlessEqual(sorted(children.keys()),
658 sorted([u"child", u"newfile"])))
659 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
660 d.addCallback(lambda metadata:
661 self.failUnlessEqual(sorted(metadata.keys()),
664 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
667 d.addCallback(lambda newnode:
668 self.failUnless(IFileNode.providedBy(newnode)))
669 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
670 d.addCallback(lambda metadata:
671 self.failUnlessEqual(metadata, {"key": "value"}))
672 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
674 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
675 def _created2(subdir2):
676 self.subdir2 = subdir2
677 # put something in the way, to make sure it gets overwritten
678 return subdir2.add_file(u"child", upload.Data("overwrite me",
680 d.addCallback(_created2)
682 d.addCallback(lambda res:
683 n.move_child_to(u"child", self.subdir2))
684 d.addCallback(lambda res: n.list())
685 d.addCallback(lambda children:
686 self.failUnlessEqual(sorted(children.keys()),
687 sorted([u"newfile", u"subdir2"])))
688 d.addCallback(lambda res: self.subdir2.list())
689 d.addCallback(lambda children:
690 self.failUnlessEqual(sorted(children.keys()),
692 d.addCallback(lambda res: self.subdir2.get(u"child"))
693 d.addCallback(lambda child:
694 self.failUnlessEqual(child.get_uri(),
695 fake_file_uri.to_string()))
697 # move it back, using new_child_name=
698 d.addCallback(lambda res:
699 self.subdir2.move_child_to(u"child", n, u"newchild"))
700 d.addCallback(lambda res: n.list())
701 d.addCallback(lambda children:
702 self.failUnlessEqual(sorted(children.keys()),
703 sorted([u"newchild", u"newfile",
705 d.addCallback(lambda res: self.subdir2.list())
706 d.addCallback(lambda children:
707 self.failUnlessEqual(sorted(children.keys()), []))
709 # now make sure that we honor overwrite=False
710 d.addCallback(lambda res:
711 self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
713 d.addCallback(lambda res:
714 self.shouldFail(ExistingChildError, "move_child_to-no",
715 "child 'newchild' already exists",
716 n.move_child_to, u"newchild",
719 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
720 d.addCallback(lambda child:
721 self.failUnlessEqual(child.get_uri(),
722 other_file_uri.to_string()))
728 d.addErrback(self.explain_error)
731 class DeepStats(unittest.TestCase):
732 def test_stats(self):
733 ds = dirnode.DeepStats(None)
734 ds.add("count-files")
735 ds.add("size-immutable-files", 123)
736 ds.histogram("size-files-histogram", 123)
737 ds.max("largest-directory", 444)
740 self.failUnlessEqual(s["count-files"], 1)
741 self.failUnlessEqual(s["size-immutable-files"], 123)
742 self.failUnlessEqual(s["largest-directory"], 444)
743 self.failUnlessEqual(s["count-literal-files"], 0)
745 ds.add("count-files")
746 ds.add("size-immutable-files", 321)
747 ds.histogram("size-files-histogram", 321)
748 ds.max("largest-directory", 2)
751 self.failUnlessEqual(s["count-files"], 2)
752 self.failUnlessEqual(s["size-immutable-files"], 444)
753 self.failUnlessEqual(s["largest-directory"], 444)
754 self.failUnlessEqual(s["count-literal-files"], 0)
755 self.failUnlessEqual(s["size-files-histogram"],
756 [ (101, 316, 1), (317, 1000, 1) ])
758 ds = dirnode.DeepStats(None)
759 for i in range(1, 1100):
760 ds.histogram("size-files-histogram", i)
761 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
763 self.failUnlessEqual(s["size-files-histogram"],
771 (3162277660169L, 10000000000000L, 1),
774 class UCWEingMutableFileNode(MutableFileNode):
775 please_ucwe_after_next_upload = False
777 def _upload(self, new_contents, servermap):
778 d = MutableFileNode._upload(self, new_contents, servermap)
780 if self.please_ucwe_after_next_upload:
781 self.please_ucwe_after_next_upload = False
782 raise UncoordinatedWriteError()
786 class UCWEingNewDirectoryNode(dirnode.NewDirectoryNode):
787 filenode_class = UCWEingMutableFileNode
790 class Deleter(GridTestMixin, unittest.TestCase):
791 def test_retry(self):
792 # ticket #550, a dirnode.delete which experiences an
793 # UncoordinatedWriteError will fail with an incorrect "you're
794 # deleting something which isn't there" NoSuchChildError exception.
796 # to trigger this, we start by creating a directory with a single
797 # file in it. Then we create a special dirnode that uses a modified
798 # MutableFileNode which will raise UncoordinatedWriteError once on
799 # demand. We then call dirnode.delete, which ought to retry and
802 self.basedir = self.mktemp()
804 c0 = self.g.clients[0]
805 d = c0.create_empty_dirnode()
806 small = upload.Data("Small enough for a LIT", None)
807 def _created_dir(dn):
809 self.root_uri = dn.get_uri()
810 return dn.add_file(u"file", small)
811 d.addCallback(_created_dir)
812 def _do_delete(ignored):
813 n = UCWEingNewDirectoryNode(c0).init_from_uri(self.root_uri)
814 assert n._node.please_ucwe_after_next_upload == False
815 n._node.please_ucwe_after_next_upload = True
816 # This should succeed, not raise an exception
817 return n.delete(u"file")
818 d.addCallback(_do_delete)