3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, NoSuchChildError, \
11 IDeepCheckResults, IDeepCheckAndRepairResults
12 from allmydata.mutable.filenode import MutableFileNode
13 from allmydata.mutable.common import UncoordinatedWriteError
14 from allmydata.util import hashutil, base32
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
17 FakeDirectoryNode, create_chk_filenode, ErrorMixin, SystemTestMixin
18 from allmydata.checker_results import CheckerResults, CheckAndRepairResults
19 import common_util as testutil
21 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
22 # contain pointers to fake files. We start with a fake MutableFileNode that
23 # stores all of its data in a static table.
26 implements(IFileNode, IMutableFileNode) # sure, why not
27 def __init__(self, nodeuri):
28 if not isinstance(nodeuri, str):
29 nodeuri = nodeuri.to_string()
30 self.nodeuri = nodeuri
31 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
32 self.storage_index = si
33 fp = hashutil.tagged_hash("tag2", nodeuri)
34 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
37 def get_readonly_uri(self):
39 def get_verify_cap(self):
40 return self.verifieruri
41 def get_storage_index(self):
42 return self.storage_index
44 def check(self, monitor, verify=False):
45 r = CheckerResults(uri.from_string(self.nodeuri), None)
47 r.set_recoverable(True)
48 return defer.succeed(r)
50 def check_and_repair(self, monitor, verify=False):
51 d = self.check(verify)
53 r = CheckAndRepairResults(None)
54 r.pre_repair_results = r.post_repair_results = cr
59 # dirnode requires three methods from the client: upload(),
60 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
61 # only used by the convenience composite method add_file().
66 def upload(self, uploadable):
67 d = uploadable.get_size()
68 d.addCallback(lambda size: uploadable.read(size))
71 n = create_chk_filenode(self, data)
72 results = upload.UploadResults()
73 results.uri = n.get_uri()
75 d.addCallback(_got_data)
78 def create_node_from_uri(self, u):
80 if (INewDirectoryURI.providedBy(u)
81 or IReadonlyNewDirectoryURI.providedBy(u)):
82 return FakeDirectoryNode(self).init_from_uri(u)
83 return Marker(u.to_string())
85 def create_empty_dirnode(self):
86 n = FakeDirectoryNode(self)
88 d.addCallback(lambda res: n)
92 class Dirnode(unittest.TestCase,
93 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
95 self.client = FakeClient()
98 d = self.client.create_empty_dirnode()
100 self.failUnless(isinstance(res, FakeDirectoryNode))
102 self.failUnless("RW" in rep)
106 def test_corrupt(self):
107 d = self.client.create_empty_dirnode()
109 u = make_mutable_file_uri()
110 d = dn.set_uri(u"child", u.to_string(), {})
111 d.addCallback(lambda res: dn.list())
112 def _check1(children):
113 self.failUnless(u"child" in children)
114 d.addCallback(_check1)
115 d.addCallback(lambda res:
116 self.shouldFail(NoSuchChildError, "get bogus", None,
120 si = IURI(filenode.get_uri()).storage_index
121 old_contents = filenode.all_contents[si]
122 # we happen to know that the writecap is encrypted near the
123 # end of the string. Flip one of its bits and make sure we
124 # detect the corruption.
125 new_contents = testutil.flip_bit(old_contents, -10)
126 # TODO: also test flipping bits in the other portions
127 filenode.all_contents[si] = new_contents
128 d.addCallback(_corrupt)
130 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
131 "HMAC does not match, crypttext is corrupted",
133 d.addCallback(_check2)
135 d.addCallback(_created)
138 def test_check(self):
139 d = self.client.create_empty_dirnode()
140 d.addCallback(lambda dn: dn.check(Monitor()))
142 self.failUnless(res.is_healthy())
146 def _test_deepcheck_create(self):
147 # create a small tree with a loop, and some non-directories
151 # root/subdir/link -> root
152 d = self.client.create_empty_dirnode()
153 def _created_root(rootnode):
154 self._rootnode = rootnode
155 return rootnode.create_empty_directory(u"subdir")
156 d.addCallback(_created_root)
157 def _created_subdir(subdir):
158 self._subdir = subdir
159 d = subdir.add_file(u"file1", upload.Data("data", None))
160 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
162 d.addCallback(_created_subdir)
164 return self._rootnode
168 def test_deepcheck(self):
169 d = self._test_deepcheck_create()
170 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
171 def _check_results(r):
172 self.failUnless(IDeepCheckResults.providedBy(r))
174 self.failUnlessEqual(c,
175 {"count-objects-checked": 3,
176 "count-objects-healthy": 3,
177 "count-objects-unhealthy": 0,
178 "count-objects-unrecoverable": 0,
179 "count-corrupt-shares": 0,
181 self.failIf(r.get_corrupt_shares())
182 self.failUnlessEqual(len(r.get_all_results()), 3)
183 d.addCallback(_check_results)
186 def test_deepcheck_and_repair(self):
187 d = self._test_deepcheck_create()
188 d.addCallback(lambda rootnode:
189 rootnode.start_deep_check_and_repair().when_done())
190 def _check_results(r):
191 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
193 self.failUnlessEqual(c,
194 {"count-objects-checked": 3,
195 "count-objects-healthy-pre-repair": 3,
196 "count-objects-unhealthy-pre-repair": 0,
197 "count-objects-unrecoverable-pre-repair": 0,
198 "count-corrupt-shares-pre-repair": 0,
199 "count-objects-healthy-post-repair": 3,
200 "count-objects-unhealthy-post-repair": 0,
201 "count-objects-unrecoverable-post-repair": 0,
202 "count-corrupt-shares-post-repair": 0,
203 "count-repairs-attempted": 0,
204 "count-repairs-successful": 0,
205 "count-repairs-unsuccessful": 0,
207 self.failIf(r.get_corrupt_shares())
208 self.failIf(r.get_remaining_corrupt_shares())
209 self.failUnlessEqual(len(r.get_all_results()), 3)
210 d.addCallback(_check_results)
213 def _mark_file_bad(self, rootnode):
214 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
215 rootnode._node.bad_shares[si] = "unhealthy"
218 def test_deepcheck_problems(self):
219 d = self._test_deepcheck_create()
220 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
221 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
222 def _check_results(r):
224 self.failUnlessEqual(c,
225 {"count-objects-checked": 3,
226 "count-objects-healthy": 2,
227 "count-objects-unhealthy": 1,
228 "count-objects-unrecoverable": 0,
229 "count-corrupt-shares": 0,
231 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
232 d.addCallback(_check_results)
235 def test_readonly(self):
236 fileuri = make_chk_file_uri(1234)
237 filenode = self.client.create_node_from_uri(fileuri)
238 uploadable = upload.Data("some data", convergence="some convergence string")
240 d = self.client.create_empty_dirnode()
242 d2 = rw_dn.set_uri(u"child", fileuri.to_string())
243 d2.addCallback(lambda res: rw_dn)
245 d.addCallback(_created)
248 ro_uri = rw_dn.get_readonly_uri()
249 ro_dn = self.client.create_node_from_uri(ro_uri)
250 self.failUnless(ro_dn.is_readonly())
251 self.failUnless(ro_dn.is_mutable())
253 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
254 ro_dn.set_uri, u"newchild", fileuri.to_string())
255 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
256 ro_dn.set_node, u"newchild", filenode)
257 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
258 ro_dn.set_nodes, [ (u"newchild", filenode) ])
259 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
260 ro_dn.add_file, u"newchild", uploadable)
261 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
262 ro_dn.delete, u"child")
263 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
264 ro_dn.create_empty_directory, u"newchild")
265 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
266 ro_dn.set_metadata_for, u"child", {})
267 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
268 ro_dn.move_child_to, u"child", rw_dn)
269 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
270 rw_dn.move_child_to, u"child", ro_dn)
272 d.addCallback(_ready)
273 def _listed(children):
274 self.failUnless(u"child" in children)
275 d.addCallback(_listed)
278 def failUnlessGreaterThan(self, a, b):
279 self.failUnless(a > b, "%r should be > %r" % (a, b))
281 def failUnlessGreaterOrEqualThan(self, a, b):
282 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
284 def test_create(self):
285 self.expected_manifest = []
286 self.expected_verifycaps = set()
287 self.expected_storage_indexes = set()
289 d = self.client.create_empty_dirnode()
292 self.failUnless(n.is_mutable())
295 self.failUnless(u.startswith("URI:DIR2:"), u)
296 u_ro = n.get_readonly_uri()
297 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
298 u_v = n.get_verify_cap().to_string()
299 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
300 self.expected_manifest.append( ((), u) )
301 self.expected_verifycaps.add(u_v)
302 si = n.get_storage_index()
303 self.expected_storage_indexes.add(base32.b2a(si))
304 expected_si = n._uri._filenode_uri.storage_index
305 self.failUnlessEqual(si, expected_si)
308 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
309 d.addCallback(lambda res: n.has_child(u"missing"))
310 d.addCallback(lambda res: self.failIf(res))
311 fake_file_uri = make_mutable_file_uri()
312 other_file_uri = make_mutable_file_uri()
313 m = Marker(fake_file_uri)
314 ffu_v = m.get_verify_cap().to_string()
315 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
316 self.expected_verifycaps.add(ffu_v)
317 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
318 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
319 d.addCallback(lambda res:
320 self.shouldFail(ExistingChildError, "set_uri-no",
321 "child 'child' already exists",
322 n.set_uri, u"child", other_file_uri.to_string(),
327 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
331 # /subdir = directory
332 def _created(subdir):
333 self.failUnless(isinstance(subdir, FakeDirectoryNode))
335 new_v = subdir.get_verify_cap().to_string()
336 assert isinstance(new_v, str)
337 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
338 self.expected_verifycaps.add(new_v)
339 si = subdir.get_storage_index()
340 self.expected_storage_indexes.add(base32.b2a(si))
341 d.addCallback(_created)
343 d.addCallback(lambda res:
344 self.shouldFail(ExistingChildError, "mkdir-no",
345 "child 'subdir' already exists",
346 n.create_empty_directory, u"subdir",
349 d.addCallback(lambda res: n.list())
350 d.addCallback(lambda children:
351 self.failUnlessEqual(sorted(children.keys()),
352 sorted([u"child", u"subdir"])))
354 d.addCallback(lambda res: n.start_deep_stats().when_done())
355 def _check_deepstats(stats):
356 self.failUnless(isinstance(stats, dict))
357 expected = {"count-immutable-files": 0,
358 "count-mutable-files": 1,
359 "count-literal-files": 0,
361 "count-directories": 2,
362 "size-immutable-files": 0,
363 "size-literal-files": 0,
364 #"size-directories": 616, # varies
365 #"largest-directory": 616,
366 "largest-directory-children": 2,
367 "largest-immutable-file": 0,
369 for k,v in expected.iteritems():
370 self.failUnlessEqual(stats[k], v,
371 "stats[%s] was %s, not %s" %
373 self.failUnless(stats["size-directories"] > 500,
374 stats["size-directories"])
375 self.failUnless(stats["largest-directory"] > 500,
376 stats["largest-directory"])
377 self.failUnlessEqual(stats["size-files-histogram"], [])
378 d.addCallback(_check_deepstats)
380 d.addCallback(lambda res: n.build_manifest().when_done())
381 def _check_manifest(res):
382 manifest = res["manifest"]
383 self.failUnlessEqual(sorted(manifest),
384 sorted(self.expected_manifest))
386 _check_deepstats(stats)
387 self.failUnlessEqual(self.expected_verifycaps,
389 self.failUnlessEqual(self.expected_storage_indexes,
390 res["storage-index"])
391 d.addCallback(_check_manifest)
393 def _add_subsubdir(res):
394 return self.subdir.create_empty_directory(u"subsubdir")
395 d.addCallback(_add_subsubdir)
398 # /subdir = directory
399 # /subdir/subsubdir = directory
400 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
401 d.addCallback(lambda subsubdir:
402 self.failUnless(isinstance(subsubdir,
404 d.addCallback(lambda res: n.get_child_at_path(u""))
405 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
408 d.addCallback(lambda res: n.get_metadata_for(u"child"))
409 d.addCallback(lambda metadata:
410 self.failUnlessEqual(sorted(metadata.keys()),
413 d.addCallback(lambda res:
414 self.shouldFail(NoSuchChildError, "gcamap-no",
416 n.get_child_and_metadata_at_path,
418 d.addCallback(lambda res:
419 n.get_child_and_metadata_at_path(u""))
420 def _check_child_and_metadata1(res):
421 child, metadata = res
422 self.failUnless(isinstance(child, FakeDirectoryNode))
423 # edge-metadata needs at least one path segment
424 self.failUnlessEqual(sorted(metadata.keys()), [])
425 d.addCallback(_check_child_and_metadata1)
426 d.addCallback(lambda res:
427 n.get_child_and_metadata_at_path(u"child"))
429 def _check_child_and_metadata2(res):
430 child, metadata = res
431 self.failUnlessEqual(child.get_uri(),
432 fake_file_uri.to_string())
433 self.failUnlessEqual(sorted(metadata.keys()),
435 d.addCallback(_check_child_and_metadata2)
437 d.addCallback(lambda res:
438 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
439 def _check_child_and_metadata3(res):
440 child, metadata = res
441 self.failUnless(isinstance(child, FakeDirectoryNode))
442 self.failUnlessEqual(sorted(metadata.keys()),
444 d.addCallback(_check_child_and_metadata3)
447 # it should be possible to add a child without any metadata
448 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
449 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
450 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
452 # if we don't set any defaults, the child should get timestamps
453 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
454 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
455 d.addCallback(lambda metadata:
456 self.failUnlessEqual(sorted(metadata.keys()),
459 # or we can add specific metadata at set_uri() time, which
460 # overrides the timestamps
461 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
463 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
464 d.addCallback(lambda metadata:
465 self.failUnlessEqual(metadata, {"key": "value"}))
467 d.addCallback(lambda res: n.delete(u"c2"))
468 d.addCallback(lambda res: n.delete(u"c3"))
469 d.addCallback(lambda res: n.delete(u"c4"))
471 # set_node + metadata
472 # it should be possible to add a child without any metadata
473 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
474 d.addCallback(lambda res: self.client.create_empty_dirnode())
475 d.addCallback(lambda n2:
476 self.shouldFail(ExistingChildError, "set_node-no",
477 "child 'd2' already exists",
478 n.set_node, u"d2", n2,
480 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
481 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
483 # if we don't set any defaults, the child should get timestamps
484 d.addCallback(lambda res: n.set_node(u"d3", n))
485 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
486 d.addCallback(lambda metadata:
487 self.failUnlessEqual(sorted(metadata.keys()),
490 # or we can add specific metadata at set_node() time, which
491 # overrides the timestamps
492 d.addCallback(lambda res: n.set_node(u"d4", n,
494 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
495 d.addCallback(lambda metadata:
496 self.failUnlessEqual(metadata, {"key": "value"}))
498 d.addCallback(lambda res: n.delete(u"d2"))
499 d.addCallback(lambda res: n.delete(u"d3"))
500 d.addCallback(lambda res: n.delete(u"d4"))
502 # metadata through set_children()
503 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
504 (u"e2", fake_file_uri.to_string(), {}),
505 (u"e3", fake_file_uri.to_string(),
508 d.addCallback(lambda res:
509 self.shouldFail(ExistingChildError, "set_children-no",
510 "child 'e1' already exists",
512 [ (u"e1", other_file_uri),
513 (u"new", other_file_uri), ],
515 # and 'new' should not have been created
516 d.addCallback(lambda res: n.list())
517 d.addCallback(lambda children: self.failIf(u"new" in children))
518 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
519 d.addCallback(lambda metadata:
520 self.failUnlessEqual(sorted(metadata.keys()),
522 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
523 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
524 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
525 d.addCallback(lambda metadata:
526 self.failUnlessEqual(metadata, {"key": "value"}))
528 d.addCallback(lambda res: n.delete(u"e1"))
529 d.addCallback(lambda res: n.delete(u"e2"))
530 d.addCallback(lambda res: n.delete(u"e3"))
532 # metadata through set_nodes()
533 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
538 d.addCallback(lambda res:
539 self.shouldFail(ExistingChildError, "set_nodes-no",
540 "child 'f1' already exists",
545 # and 'new' should not have been created
546 d.addCallback(lambda res: n.list())
547 d.addCallback(lambda children: self.failIf(u"new" in children))
548 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
549 d.addCallback(lambda metadata:
550 self.failUnlessEqual(sorted(metadata.keys()),
552 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
553 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
554 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
555 d.addCallback(lambda metadata:
556 self.failUnlessEqual(metadata, {"key": "value"}))
558 d.addCallback(lambda res: n.delete(u"f1"))
559 d.addCallback(lambda res: n.delete(u"f2"))
560 d.addCallback(lambda res: n.delete(u"f3"))
563 d.addCallback(lambda res:
564 n.set_metadata_for(u"child",
565 {"tags": ["web2.0-compatible"]}))
566 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
567 d.addCallback(lambda metadata:
568 self.failUnlessEqual(metadata,
569 {"tags": ["web2.0-compatible"]}))
572 self._start_timestamp = time.time()
573 d.addCallback(_start)
574 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
575 # floats to hundredeths (it uses str(num) instead of repr(num)).
576 # simplejson-1.7.3 does not have this bug. To prevent this bug
577 # from causing the test to fail, stall for more than a few
578 # hundrededths of a second.
579 d.addCallback(self.stall, 0.1)
580 d.addCallback(lambda res: n.add_file(u"timestamps",
581 upload.Data("stamp me", convergence="some convergence string")))
582 d.addCallback(self.stall, 0.1)
584 self._stop_timestamp = time.time()
587 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
588 def _check_timestamp1(metadata):
589 self.failUnless("ctime" in metadata)
590 self.failUnless("mtime" in metadata)
591 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
592 self._start_timestamp)
593 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
595 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
596 self._start_timestamp)
597 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
599 # Our current timestamp rules say that replacing an existing
600 # child should preserve the 'ctime' but update the mtime
601 self._old_ctime = metadata["ctime"]
602 self._old_mtime = metadata["mtime"]
603 d.addCallback(_check_timestamp1)
604 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
605 d.addCallback(lambda res: n.set_node(u"timestamps", n))
606 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
607 def _check_timestamp2(metadata):
608 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
609 "%s != %s" % (metadata["ctime"],
611 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
612 return n.delete(u"timestamps")
613 d.addCallback(_check_timestamp2)
615 # also make sure we can add/update timestamps on a
616 # previously-existing child that didn't have any, since there are
617 # a lot of 0.7.0-generated edges around out there
618 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
619 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
620 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
621 d.addCallback(lambda metadata:
622 self.failUnlessEqual(sorted(metadata.keys()),
624 d.addCallback(lambda res: n.delete(u"no_timestamps"))
626 d.addCallback(lambda res: n.delete(u"subdir"))
627 d.addCallback(lambda old_child:
628 self.failUnlessEqual(old_child.get_uri(),
629 self.subdir.get_uri()))
631 d.addCallback(lambda res: n.list())
632 d.addCallback(lambda children:
633 self.failUnlessEqual(sorted(children.keys()),
636 uploadable = upload.Data("some data", convergence="some convergence string")
637 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
638 d.addCallback(lambda newnode:
639 self.failUnless(IFileNode.providedBy(newnode)))
640 other_uploadable = upload.Data("some data", convergence="stuff")
641 d.addCallback(lambda res:
642 self.shouldFail(ExistingChildError, "add_file-no",
643 "child 'newfile' already exists",
644 n.add_file, u"newfile",
647 d.addCallback(lambda res: n.list())
648 d.addCallback(lambda children:
649 self.failUnlessEqual(sorted(children.keys()),
650 sorted([u"child", u"newfile"])))
651 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
652 d.addCallback(lambda metadata:
653 self.failUnlessEqual(sorted(metadata.keys()),
656 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
659 d.addCallback(lambda newnode:
660 self.failUnless(IFileNode.providedBy(newnode)))
661 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
662 d.addCallback(lambda metadata:
663 self.failUnlessEqual(metadata, {"key": "value"}))
664 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
666 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
667 def _created2(subdir2):
668 self.subdir2 = subdir2
669 # put something in the way, to make sure it gets overwritten
670 return subdir2.add_file(u"child", upload.Data("overwrite me",
672 d.addCallback(_created2)
674 d.addCallback(lambda res:
675 n.move_child_to(u"child", self.subdir2))
676 d.addCallback(lambda res: n.list())
677 d.addCallback(lambda children:
678 self.failUnlessEqual(sorted(children.keys()),
679 sorted([u"newfile", u"subdir2"])))
680 d.addCallback(lambda res: self.subdir2.list())
681 d.addCallback(lambda children:
682 self.failUnlessEqual(sorted(children.keys()),
684 d.addCallback(lambda res: self.subdir2.get(u"child"))
685 d.addCallback(lambda child:
686 self.failUnlessEqual(child.get_uri(),
687 fake_file_uri.to_string()))
689 # move it back, using new_child_name=
690 d.addCallback(lambda res:
691 self.subdir2.move_child_to(u"child", n, u"newchild"))
692 d.addCallback(lambda res: n.list())
693 d.addCallback(lambda children:
694 self.failUnlessEqual(sorted(children.keys()),
695 sorted([u"newchild", u"newfile",
697 d.addCallback(lambda res: self.subdir2.list())
698 d.addCallback(lambda children:
699 self.failUnlessEqual(sorted(children.keys()), []))
701 # now make sure that we honor overwrite=False
702 d.addCallback(lambda res:
703 self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
705 d.addCallback(lambda res:
706 self.shouldFail(ExistingChildError, "move_child_to-no",
707 "child 'newchild' already exists",
708 n.move_child_to, u"newchild",
711 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
712 d.addCallback(lambda child:
713 self.failUnlessEqual(child.get_uri(),
714 other_file_uri.to_string()))
720 d.addErrback(self.explain_error)
723 class DeepStats(unittest.TestCase):
724 def test_stats(self):
725 ds = dirnode.DeepStats(None)
726 ds.add("count-files")
727 ds.add("size-immutable-files", 123)
728 ds.histogram("size-files-histogram", 123)
729 ds.max("largest-directory", 444)
732 self.failUnlessEqual(s["count-files"], 1)
733 self.failUnlessEqual(s["size-immutable-files"], 123)
734 self.failUnlessEqual(s["largest-directory"], 444)
735 self.failUnlessEqual(s["count-literal-files"], 0)
737 ds.add("count-files")
738 ds.add("size-immutable-files", 321)
739 ds.histogram("size-files-histogram", 321)
740 ds.max("largest-directory", 2)
743 self.failUnlessEqual(s["count-files"], 2)
744 self.failUnlessEqual(s["size-immutable-files"], 444)
745 self.failUnlessEqual(s["largest-directory"], 444)
746 self.failUnlessEqual(s["count-literal-files"], 0)
747 self.failUnlessEqual(s["size-files-histogram"],
748 [ (101, 316, 1), (317, 1000, 1) ])
750 ds = dirnode.DeepStats(None)
751 for i in range(1, 1100):
752 ds.histogram("size-files-histogram", i)
753 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
755 self.failUnlessEqual(s["size-files-histogram"],
763 (3162277660169L, 10000000000000L, 1),
766 class UCWEingMutableFileNode(MutableFileNode):
767 please_ucwe_after_next_upload = False
769 def _upload(self, new_contents, servermap):
770 d = MutableFileNode._upload(self, new_contents, servermap)
772 if self.please_ucwe_after_next_upload:
773 self.please_ucwe_after_next_upload = False
774 raise UncoordinatedWriteError()
778 class UCWEingNewDirectoryNode(dirnode.NewDirectoryNode):
779 filenode_class = UCWEingMutableFileNode
782 class Deleter(SystemTestMixin, unittest.TestCase):
783 def test_retry(self):
784 # ticket #550, a dirnode.delete which experiences an
785 # UncoordinatedWriteError will fail with an incorrect "you're
786 # deleting something which isn't there" NoSuchChildError exception.
788 # to trigger this, we start by creating a directory with a single
789 # file in it. Then we create a special dirnode that uses a modified
790 # MutableFileNode which will raise UncoordinatedWriteError once on
791 # demand. We then call dirnode.delete, which ought to retry and
794 self.basedir = self.mktemp()
795 d = self.set_up_nodes()
796 d.addCallback(lambda ignored: self.clients[0].create_empty_dirnode())
797 small = upload.Data("Small enough for a LIT", None)
798 def _created_dir(dn):
800 self.root_uri = dn.get_uri()
801 return dn.add_file(u"file", small)
802 d.addCallback(_created_dir)
803 def _do_delete(ignored):
804 n = UCWEingNewDirectoryNode(self.clients[0]).init_from_uri(self.root_uri)
805 assert n._node.please_ucwe_after_next_upload == False
806 n._node.please_ucwe_after_next_upload = True
807 # This should succeed, not raise an exception
808 return n.delete(u"file")
809 d.addCallback(_do_delete)