3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
9 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, \
10 ExistingChildError, NoSuchChildError, \
11 IDeepCheckResults, IDeepCheckAndRepairResults
12 from allmydata.mutable.filenode import MutableFileNode
13 from allmydata.mutable.common import UncoordinatedWriteError
14 from allmydata.util import hashutil, base32
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
17 FakeDirectoryNode, create_chk_filenode, ErrorMixin
18 from allmydata.test.no_network import GridTestMixin
19 from allmydata.check_results import CheckResults, CheckAndRepairResults
20 import common_util as testutil
22 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
23 # contain pointers to fake files. We start with a fake MutableFileNode that
24 # stores all of its data in a static table.
27 implements(IFileNode, IMutableFileNode) # sure, why not
28 def __init__(self, nodeuri):
29 if not isinstance(nodeuri, str):
30 nodeuri = nodeuri.to_string()
31 self.nodeuri = nodeuri
32 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
33 self.storage_index = si
34 fp = hashutil.tagged_hash("tag2", nodeuri)
35 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
38 def get_readonly_uri(self):
40 def get_verify_cap(self):
41 return self.verifieruri
42 def get_storage_index(self):
43 return self.storage_index
45 def check(self, monitor, verify=False, add_lease=False):
46 r = CheckResults(uri.from_string(self.nodeuri), None)
48 r.set_recoverable(True)
49 return defer.succeed(r)
51 def check_and_repair(self, monitor, verify=False, add_lease=False):
52 d = self.check(verify)
54 r = CheckAndRepairResults(None)
55 r.pre_repair_results = r.post_repair_results = cr
60 # dirnode requires three methods from the client: upload(),
61 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
62 # only used by the convenience composite method add_file().
67 def upload(self, uploadable):
68 d = uploadable.get_size()
69 d.addCallback(lambda size: uploadable.read(size))
72 n = create_chk_filenode(self, data)
73 results = upload.UploadResults()
74 results.uri = n.get_uri()
76 d.addCallback(_got_data)
79 def create_node_from_uri(self, u, readcap=None):
83 if (INewDirectoryURI.providedBy(u)
84 or IReadonlyNewDirectoryURI.providedBy(u)):
85 return FakeDirectoryNode(self).init_from_uri(u)
86 return Marker(u.to_string())
88 def create_empty_dirnode(self):
89 n = FakeDirectoryNode(self)
91 d.addCallback(lambda res: n)
95 class Dirnode(unittest.TestCase,
96 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
97 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
99 self.client = FakeClient()
101 def test_basic(self):
102 d = self.client.create_empty_dirnode()
104 self.failUnless(isinstance(res, FakeDirectoryNode))
106 self.failUnless("RW" in rep)
110 def test_check(self):
111 d = self.client.create_empty_dirnode()
112 d.addCallback(lambda dn: dn.check(Monitor()))
114 self.failUnless(res.is_healthy())
118 def _test_deepcheck_create(self):
119 # create a small tree with a loop, and some non-directories
123 # root/subdir/link -> root
125 d = self.client.create_empty_dirnode()
126 def _created_root(rootnode):
127 self._rootnode = rootnode
128 return rootnode.create_empty_directory(u"subdir")
129 d.addCallback(_created_root)
130 def _created_subdir(subdir):
131 self._subdir = subdir
132 d = subdir.add_file(u"file1", upload.Data("data", None))
133 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
134 d.addCallback(lambda res: self.client.create_empty_dirnode())
135 d.addCallback(lambda dn:
136 self._rootnode.set_uri(u"rodir",
137 dn.get_readonly_uri()))
139 d.addCallback(_created_subdir)
141 return self._rootnode
145 def test_deepcheck(self):
146 d = self._test_deepcheck_create()
147 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
148 def _check_results(r):
149 self.failUnless(IDeepCheckResults.providedBy(r))
151 self.failUnlessEqual(c,
152 {"count-objects-checked": 4,
153 "count-objects-healthy": 4,
154 "count-objects-unhealthy": 0,
155 "count-objects-unrecoverable": 0,
156 "count-corrupt-shares": 0,
158 self.failIf(r.get_corrupt_shares())
159 self.failUnlessEqual(len(r.get_all_results()), 4)
160 d.addCallback(_check_results)
163 def test_deepcheck_and_repair(self):
164 d = self._test_deepcheck_create()
165 d.addCallback(lambda rootnode:
166 rootnode.start_deep_check_and_repair().when_done())
167 def _check_results(r):
168 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
170 self.failUnlessEqual(c,
171 {"count-objects-checked": 4,
172 "count-objects-healthy-pre-repair": 4,
173 "count-objects-unhealthy-pre-repair": 0,
174 "count-objects-unrecoverable-pre-repair": 0,
175 "count-corrupt-shares-pre-repair": 0,
176 "count-objects-healthy-post-repair": 4,
177 "count-objects-unhealthy-post-repair": 0,
178 "count-objects-unrecoverable-post-repair": 0,
179 "count-corrupt-shares-post-repair": 0,
180 "count-repairs-attempted": 0,
181 "count-repairs-successful": 0,
182 "count-repairs-unsuccessful": 0,
184 self.failIf(r.get_corrupt_shares())
185 self.failIf(r.get_remaining_corrupt_shares())
186 self.failUnlessEqual(len(r.get_all_results()), 4)
187 d.addCallback(_check_results)
190 def _mark_file_bad(self, rootnode):
191 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
192 rootnode._node.bad_shares[si] = "unhealthy"
195 def test_deepcheck_problems(self):
196 d = self._test_deepcheck_create()
197 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
198 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
199 def _check_results(r):
201 self.failUnlessEqual(c,
202 {"count-objects-checked": 4,
203 "count-objects-healthy": 3,
204 "count-objects-unhealthy": 1,
205 "count-objects-unrecoverable": 0,
206 "count-corrupt-shares": 0,
208 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
209 d.addCallback(_check_results)
212 def test_readonly(self):
213 fileuri = make_chk_file_uri(1234)
214 filenode = self.client.create_node_from_uri(fileuri)
215 uploadable = upload.Data("some data", convergence="some convergence string")
217 d = self.client.create_empty_dirnode()
219 d2 = rw_dn.set_uri(u"child", fileuri.to_string())
220 d2.addCallback(lambda res: rw_dn)
222 d.addCallback(_created)
225 ro_uri = rw_dn.get_readonly_uri()
226 ro_dn = self.client.create_node_from_uri(ro_uri)
227 self.failUnless(ro_dn.is_readonly())
228 self.failUnless(ro_dn.is_mutable())
230 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
231 ro_dn.set_uri, u"newchild", fileuri.to_string())
232 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
233 ro_dn.set_node, u"newchild", filenode)
234 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
235 ro_dn.set_nodes, [ (u"newchild", filenode) ])
236 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
237 ro_dn.add_file, u"newchild", uploadable)
238 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
239 ro_dn.delete, u"child")
240 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
241 ro_dn.create_empty_directory, u"newchild")
242 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
243 ro_dn.set_metadata_for, u"child", {})
244 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
245 ro_dn.move_child_to, u"child", rw_dn)
246 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
247 rw_dn.move_child_to, u"child", ro_dn)
249 d.addCallback(_ready)
250 def _listed(children):
251 self.failUnless(u"child" in children)
252 d.addCallback(_listed)
255 def failUnlessGreaterThan(self, a, b):
256 self.failUnless(a > b, "%r should be > %r" % (a, b))
258 def failUnlessGreaterOrEqualThan(self, a, b):
259 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
261 def test_create(self):
262 self.expected_manifest = []
263 self.expected_verifycaps = set()
264 self.expected_storage_indexes = set()
266 d = self.client.create_empty_dirnode()
269 self.failUnless(n.is_mutable())
272 self.failUnless(u.startswith("URI:DIR2:"), u)
273 u_ro = n.get_readonly_uri()
274 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
275 u_v = n.get_verify_cap().to_string()
276 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
277 u_r = n.get_repair_cap().to_string()
278 self.failUnlessEqual(u_r, u)
279 self.expected_manifest.append( ((), u) )
280 self.expected_verifycaps.add(u_v)
281 si = n.get_storage_index()
282 self.expected_storage_indexes.add(base32.b2a(si))
283 expected_si = n._uri._filenode_uri.storage_index
284 self.failUnlessEqual(si, expected_si)
287 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
288 d.addCallback(lambda res: n.has_child(u"missing"))
289 d.addCallback(lambda res: self.failIf(res))
290 fake_file_uri = make_mutable_file_uri()
291 other_file_uri = make_mutable_file_uri()
292 m = Marker(fake_file_uri)
293 ffu_v = m.get_verify_cap().to_string()
294 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
295 self.expected_verifycaps.add(ffu_v)
296 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
297 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
298 d.addCallback(lambda res:
299 self.shouldFail(ExistingChildError, "set_uri-no",
300 "child 'child' already exists",
301 n.set_uri, u"child", other_file_uri.to_string(),
306 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
310 # /subdir = directory
311 def _created(subdir):
312 self.failUnless(isinstance(subdir, FakeDirectoryNode))
314 new_v = subdir.get_verify_cap().to_string()
315 assert isinstance(new_v, str)
316 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
317 self.expected_verifycaps.add(new_v)
318 si = subdir.get_storage_index()
319 self.expected_storage_indexes.add(base32.b2a(si))
320 d.addCallback(_created)
322 d.addCallback(lambda res:
323 self.shouldFail(ExistingChildError, "mkdir-no",
324 "child 'subdir' already exists",
325 n.create_empty_directory, u"subdir",
328 d.addCallback(lambda res: n.list())
329 d.addCallback(lambda children:
330 self.failUnlessEqual(sorted(children.keys()),
331 sorted([u"child", u"subdir"])))
333 d.addCallback(lambda res: n.start_deep_stats().when_done())
334 def _check_deepstats(stats):
335 self.failUnless(isinstance(stats, dict))
336 expected = {"count-immutable-files": 0,
337 "count-mutable-files": 1,
338 "count-literal-files": 0,
340 "count-directories": 2,
341 "size-immutable-files": 0,
342 "size-literal-files": 0,
343 #"size-directories": 616, # varies
344 #"largest-directory": 616,
345 "largest-directory-children": 2,
346 "largest-immutable-file": 0,
348 for k,v in expected.iteritems():
349 self.failUnlessEqual(stats[k], v,
350 "stats[%s] was %s, not %s" %
352 self.failUnless(stats["size-directories"] > 500,
353 stats["size-directories"])
354 self.failUnless(stats["largest-directory"] > 500,
355 stats["largest-directory"])
356 self.failUnlessEqual(stats["size-files-histogram"], [])
357 d.addCallback(_check_deepstats)
359 d.addCallback(lambda res: n.build_manifest().when_done())
360 def _check_manifest(res):
361 manifest = res["manifest"]
362 self.failUnlessEqual(sorted(manifest),
363 sorted(self.expected_manifest))
365 _check_deepstats(stats)
366 self.failUnlessEqual(self.expected_verifycaps,
368 self.failUnlessEqual(self.expected_storage_indexes,
369 res["storage-index"])
370 d.addCallback(_check_manifest)
372 def _add_subsubdir(res):
373 return self.subdir.create_empty_directory(u"subsubdir")
374 d.addCallback(_add_subsubdir)
377 # /subdir = directory
378 # /subdir/subsubdir = directory
379 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
380 d.addCallback(lambda subsubdir:
381 self.failUnless(isinstance(subsubdir,
383 d.addCallback(lambda res: n.get_child_at_path(u""))
384 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
387 d.addCallback(lambda res: n.get_metadata_for(u"child"))
388 d.addCallback(lambda metadata:
389 self.failUnlessEqual(set(metadata.keys()),
390 set(["tahoe", "ctime", "mtime"])))
392 d.addCallback(lambda res:
393 self.shouldFail(NoSuchChildError, "gcamap-no",
395 n.get_child_and_metadata_at_path,
397 d.addCallback(lambda res:
398 n.get_child_and_metadata_at_path(u""))
399 def _check_child_and_metadata1(res):
400 child, metadata = res
401 self.failUnless(isinstance(child, FakeDirectoryNode))
402 # edge-metadata needs at least one path segment
403 self.failUnlessEqual(sorted(metadata.keys()), [])
404 d.addCallback(_check_child_and_metadata1)
405 d.addCallback(lambda res:
406 n.get_child_and_metadata_at_path(u"child"))
408 def _check_child_and_metadata2(res):
409 child, metadata = res
410 self.failUnlessEqual(child.get_uri(),
411 fake_file_uri.to_string())
412 self.failUnlessEqual(set(metadata.keys()),
413 set(["tahoe", "ctime", "mtime"]))
414 d.addCallback(_check_child_and_metadata2)
416 d.addCallback(lambda res:
417 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
418 def _check_child_and_metadata3(res):
419 child, metadata = res
420 self.failUnless(isinstance(child, FakeDirectoryNode))
421 self.failUnlessEqual(set(metadata.keys()),
422 set(["tahoe", "ctime", "mtime"]))
423 d.addCallback(_check_child_and_metadata3)
426 # it should be possible to add a child without any metadata
427 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
428 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
429 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
431 # You can't override the link timestamps.
432 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), { 'tahoe': {'linkcrtime': "bogus"}}))
433 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
434 def _has_good_linkcrtime(metadata):
435 self.failUnless(metadata.has_key('tahoe'))
436 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
437 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
438 d.addCallback(_has_good_linkcrtime)
440 # if we don't set any defaults, the child should get timestamps
441 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
442 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
443 d.addCallback(lambda metadata:
444 self.failUnlessEqual(set(metadata.keys()),
445 set(["tahoe", "ctime", "mtime"])))
447 # or we can add specific metadata at set_uri() time, which
448 # overrides the timestamps
449 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
451 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
452 d.addCallback(lambda metadata:
453 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
454 (metadata['key'] == "value"), metadata))
456 d.addCallback(lambda res: n.delete(u"c2"))
457 d.addCallback(lambda res: n.delete(u"c3"))
458 d.addCallback(lambda res: n.delete(u"c4"))
460 # set_node + metadata
461 # it should be possible to add a child without any metadata
462 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
463 d.addCallback(lambda res: self.client.create_empty_dirnode())
464 d.addCallback(lambda n2:
465 self.shouldFail(ExistingChildError, "set_node-no",
466 "child 'd2' already exists",
467 n.set_node, u"d2", n2,
469 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
470 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
472 # if we don't set any defaults, the child should get timestamps
473 d.addCallback(lambda res: n.set_node(u"d3", n))
474 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
475 d.addCallback(lambda metadata:
476 self.failUnlessEqual(set(metadata.keys()),
477 set(["tahoe", "ctime", "mtime"])))
479 # or we can add specific metadata at set_node() time, which
480 # overrides the timestamps
481 d.addCallback(lambda res: n.set_node(u"d4", n,
483 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
484 d.addCallback(lambda metadata:
485 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
486 (metadata['key'] == "value"), metadata))
488 d.addCallback(lambda res: n.delete(u"d2"))
489 d.addCallback(lambda res: n.delete(u"d3"))
490 d.addCallback(lambda res: n.delete(u"d4"))
492 # metadata through set_children()
493 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
494 (u"e2", fake_file_uri.to_string(), {}),
495 (u"e3", fake_file_uri.to_string(),
498 d.addCallback(lambda res:
499 self.shouldFail(ExistingChildError, "set_children-no",
500 "child 'e1' already exists",
502 [ (u"e1", other_file_uri),
503 (u"new", other_file_uri), ],
505 # and 'new' should not have been created
506 d.addCallback(lambda res: n.list())
507 d.addCallback(lambda children: self.failIf(u"new" in children))
508 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
509 d.addCallback(lambda metadata:
510 self.failUnlessEqual(set(metadata.keys()),
511 set(["tahoe", "ctime", "mtime"])))
512 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
513 d.addCallback(lambda metadata:
514 self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
515 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
516 d.addCallback(lambda metadata:
517 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
518 and (metadata['key'] == "value"), metadata))
520 d.addCallback(lambda res: n.delete(u"e1"))
521 d.addCallback(lambda res: n.delete(u"e2"))
522 d.addCallback(lambda res: n.delete(u"e3"))
524 # metadata through set_nodes()
525 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
530 d.addCallback(lambda res:
531 self.shouldFail(ExistingChildError, "set_nodes-no",
532 "child 'f1' already exists",
537 # and 'new' should not have been created
538 d.addCallback(lambda res: n.list())
539 d.addCallback(lambda children: self.failIf(u"new" in children))
540 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
541 d.addCallback(lambda metadata:
542 self.failUnlessEqual(set(metadata.keys()),
543 set(["tahoe", "ctime", "mtime"])))
544 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
546 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
547 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
548 d.addCallback(lambda metadata:
549 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
550 (metadata['key'] == "value"), metadata))
552 d.addCallback(lambda res: n.delete(u"f1"))
553 d.addCallback(lambda res: n.delete(u"f2"))
554 d.addCallback(lambda res: n.delete(u"f3"))
557 d.addCallback(lambda res:
558 n.set_metadata_for(u"child",
559 {"tags": ["web2.0-compatible"]}))
560 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
561 d.addCallback(lambda metadata:
562 self.failUnlessEqual(metadata,
563 {"tags": ["web2.0-compatible"]}))
566 self._start_timestamp = time.time()
567 d.addCallback(_start)
568 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
569 # floats to hundredeths (it uses str(num) instead of repr(num)).
570 # simplejson-1.7.3 does not have this bug. To prevent this bug
571 # from causing the test to fail, stall for more than a few
572 # hundrededths of a second.
573 d.addCallback(self.stall, 0.1)
574 d.addCallback(lambda res: n.add_file(u"timestamps",
575 upload.Data("stamp me", convergence="some convergence string")))
576 d.addCallback(self.stall, 0.1)
578 self._stop_timestamp = time.time()
581 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
582 def _check_timestamp1(metadata):
583 self.failUnless("ctime" in metadata)
584 self.failUnless("mtime" in metadata)
585 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
586 self._start_timestamp)
587 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
589 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
590 self._start_timestamp)
591 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
593 # Our current timestamp rules say that replacing an existing
594 # child should preserve the 'ctime' but update the mtime
595 self._old_ctime = metadata["ctime"]
596 self._old_mtime = metadata["mtime"]
597 d.addCallback(_check_timestamp1)
598 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
599 d.addCallback(lambda res: n.set_node(u"timestamps", n))
600 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
601 def _check_timestamp2(metadata):
602 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
603 "%s != %s" % (metadata["ctime"],
605 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
606 return n.delete(u"timestamps")
607 d.addCallback(_check_timestamp2)
609 # also make sure we can add/update timestamps on a
610 # previously-existing child that didn't have any, since there are
611 # a lot of 0.7.0-generated edges around out there
612 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
613 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
614 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
615 d.addCallback(lambda metadata:
616 self.failUnlessEqual(set(metadata.keys()),
617 set(["tahoe", "ctime", "mtime"])))
618 d.addCallback(lambda res: n.delete(u"no_timestamps"))
620 d.addCallback(lambda res: n.delete(u"subdir"))
621 d.addCallback(lambda old_child:
622 self.failUnlessEqual(old_child.get_uri(),
623 self.subdir.get_uri()))
625 d.addCallback(lambda res: n.list())
626 d.addCallback(lambda children:
627 self.failUnlessEqual(sorted(children.keys()),
630 uploadable = upload.Data("some data", convergence="some convergence string")
631 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
632 d.addCallback(lambda newnode:
633 self.failUnless(IFileNode.providedBy(newnode)))
634 other_uploadable = upload.Data("some data", convergence="stuff")
635 d.addCallback(lambda res:
636 self.shouldFail(ExistingChildError, "add_file-no",
637 "child 'newfile' already exists",
638 n.add_file, u"newfile",
641 d.addCallback(lambda res: n.list())
642 d.addCallback(lambda children:
643 self.failUnlessEqual(sorted(children.keys()),
644 sorted([u"child", u"newfile"])))
645 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
646 d.addCallback(lambda metadata:
647 self.failUnlessEqual(set(metadata.keys()),
648 set(["tahoe", "ctime", "mtime"])))
650 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
653 d.addCallback(lambda newnode:
654 self.failUnless(IFileNode.providedBy(newnode)))
655 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
656 d.addCallback(lambda metadata:
657 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
658 (metadata['key'] == "value"), metadata))
659 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
661 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
662 def _created2(subdir2):
663 self.subdir2 = subdir2
664 # put something in the way, to make sure it gets overwritten
665 return subdir2.add_file(u"child", upload.Data("overwrite me",
667 d.addCallback(_created2)
669 d.addCallback(lambda res:
670 n.move_child_to(u"child", self.subdir2))
671 d.addCallback(lambda res: n.list())
672 d.addCallback(lambda children:
673 self.failUnlessEqual(sorted(children.keys()),
674 sorted([u"newfile", u"subdir2"])))
675 d.addCallback(lambda res: self.subdir2.list())
676 d.addCallback(lambda children:
677 self.failUnlessEqual(sorted(children.keys()),
679 d.addCallback(lambda res: self.subdir2.get(u"child"))
680 d.addCallback(lambda child:
681 self.failUnlessEqual(child.get_uri(),
682 fake_file_uri.to_string()))
684 # move it back, using new_child_name=
685 d.addCallback(lambda res:
686 self.subdir2.move_child_to(u"child", n, u"newchild"))
687 d.addCallback(lambda res: n.list())
688 d.addCallback(lambda children:
689 self.failUnlessEqual(sorted(children.keys()),
690 sorted([u"newchild", u"newfile",
692 d.addCallback(lambda res: self.subdir2.list())
693 d.addCallback(lambda children:
694 self.failUnlessEqual(sorted(children.keys()), []))
696 # now make sure that we honor overwrite=False
697 d.addCallback(lambda res:
698 self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
700 d.addCallback(lambda res:
701 self.shouldFail(ExistingChildError, "move_child_to-no",
702 "child 'newchild' already exists",
703 n.move_child_to, u"newchild",
706 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
707 d.addCallback(lambda child:
708 self.failUnlessEqual(child.get_uri(),
709 other_file_uri.to_string()))
715 d.addErrback(self.explain_error)
718 class DeepStats(unittest.TestCase):
719 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
720 def test_stats(self):
721 ds = dirnode.DeepStats(None)
722 ds.add("count-files")
723 ds.add("size-immutable-files", 123)
724 ds.histogram("size-files-histogram", 123)
725 ds.max("largest-directory", 444)
728 self.failUnlessEqual(s["count-files"], 1)
729 self.failUnlessEqual(s["size-immutable-files"], 123)
730 self.failUnlessEqual(s["largest-directory"], 444)
731 self.failUnlessEqual(s["count-literal-files"], 0)
733 ds.add("count-files")
734 ds.add("size-immutable-files", 321)
735 ds.histogram("size-files-histogram", 321)
736 ds.max("largest-directory", 2)
739 self.failUnlessEqual(s["count-files"], 2)
740 self.failUnlessEqual(s["size-immutable-files"], 444)
741 self.failUnlessEqual(s["largest-directory"], 444)
742 self.failUnlessEqual(s["count-literal-files"], 0)
743 self.failUnlessEqual(s["size-files-histogram"],
744 [ (101, 316, 1), (317, 1000, 1) ])
746 ds = dirnode.DeepStats(None)
747 for i in range(1, 1100):
748 ds.histogram("size-files-histogram", i)
749 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
751 self.failUnlessEqual(s["size-files-histogram"],
759 (3162277660169L, 10000000000000L, 1),
762 class UCWEingMutableFileNode(MutableFileNode):
763 please_ucwe_after_next_upload = False
765 def _upload(self, new_contents, servermap):
766 d = MutableFileNode._upload(self, new_contents, servermap)
768 if self.please_ucwe_after_next_upload:
769 self.please_ucwe_after_next_upload = False
770 raise UncoordinatedWriteError()
774 class UCWEingNewDirectoryNode(dirnode.NewDirectoryNode):
775 filenode_class = UCWEingMutableFileNode
778 class Deleter(GridTestMixin, unittest.TestCase):
779 timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
780 def test_retry(self):
781 # ticket #550, a dirnode.delete which experiences an
782 # UncoordinatedWriteError will fail with an incorrect "you're
783 # deleting something which isn't there" NoSuchChildError exception.
785 # to trigger this, we start by creating a directory with a single
786 # file in it. Then we create a special dirnode that uses a modified
787 # MutableFileNode which will raise UncoordinatedWriteError once on
788 # demand. We then call dirnode.delete, which ought to retry and
791 self.basedir = self.mktemp()
793 c0 = self.g.clients[0]
794 d = c0.create_empty_dirnode()
795 small = upload.Data("Small enough for a LIT", None)
796 def _created_dir(dn):
798 self.root_uri = dn.get_uri()
799 return dn.add_file(u"file", small)
800 d.addCallback(_created_dir)
801 def _do_delete(ignored):
802 n = UCWEingNewDirectoryNode(c0).init_from_uri(self.root_uri)
803 assert n._node.please_ucwe_after_next_upload == False
804 n._node.please_ucwe_after_next_upload = True
805 # This should succeed, not raise an exception
806 return n.delete(u"file")
807 d.addCallback(_do_delete)