3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from allmydata import uri, dirnode
6 from allmydata.client import Client
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IFileNode, \
9 ExistingChildError, NoSuchChildError, \
10 IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
11 from allmydata.mutable.filenode import MutableFileNode
12 from allmydata.mutable.common import UncoordinatedWriteError
13 from allmydata.util import hashutil, base32
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
17 from allmydata.test.no_network import GridTestMixin
18 from allmydata.unknown import UnknownNode
19 from allmydata.nodemaker import NodeMaker
20 from base64 import b32decode
21 import common_util as testutil
23 class Dirnode(GridTestMixin, unittest.TestCase,
24 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
25 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
28 self.basedir = "dirnode/Dirnode/test_basic"
31 d = c.create_dirnode()
33 self.failUnless(isinstance(res, dirnode.DirectoryNode))
35 self.failUnless("RW" in rep)
40 self.basedir = "dirnode/Dirnode/test_check"
43 d = c.create_dirnode()
44 d.addCallback(lambda dn: dn.check(Monitor()))
46 self.failUnless(res.is_healthy())
50 def _test_deepcheck_create(self):
51 # create a small tree with a loop, and some non-directories
55 # root/subdir/link -> root
58 d = c.create_dirnode()
59 def _created_root(rootnode):
60 self._rootnode = rootnode
61 return rootnode.create_empty_directory(u"subdir")
62 d.addCallback(_created_root)
63 def _created_subdir(subdir):
65 d = subdir.add_file(u"file1", upload.Data("data"*100, None))
66 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
67 d.addCallback(lambda res: c.create_dirnode())
68 d.addCallback(lambda dn:
69 self._rootnode.set_uri(u"rodir",
71 dn.get_readonly_uri()))
73 d.addCallback(_created_subdir)
79 def test_deepcheck(self):
80 self.basedir = "dirnode/Dirnode/test_deepcheck"
82 d = self._test_deepcheck_create()
83 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
84 def _check_results(r):
85 self.failUnless(IDeepCheckResults.providedBy(r))
87 self.failUnlessEqual(c,
88 {"count-objects-checked": 4,
89 "count-objects-healthy": 4,
90 "count-objects-unhealthy": 0,
91 "count-objects-unrecoverable": 0,
92 "count-corrupt-shares": 0,
94 self.failIf(r.get_corrupt_shares())
95 self.failUnlessEqual(len(r.get_all_results()), 4)
96 d.addCallback(_check_results)
99 def test_deepcheck_and_repair(self):
100 self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair"
102 d = self._test_deepcheck_create()
103 d.addCallback(lambda rootnode:
104 rootnode.start_deep_check_and_repair().when_done())
105 def _check_results(r):
106 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
108 self.failUnlessEqual(c,
109 {"count-objects-checked": 4,
110 "count-objects-healthy-pre-repair": 4,
111 "count-objects-unhealthy-pre-repair": 0,
112 "count-objects-unrecoverable-pre-repair": 0,
113 "count-corrupt-shares-pre-repair": 0,
114 "count-objects-healthy-post-repair": 4,
115 "count-objects-unhealthy-post-repair": 0,
116 "count-objects-unrecoverable-post-repair": 0,
117 "count-corrupt-shares-post-repair": 0,
118 "count-repairs-attempted": 0,
119 "count-repairs-successful": 0,
120 "count-repairs-unsuccessful": 0,
122 self.failIf(r.get_corrupt_shares())
123 self.failIf(r.get_remaining_corrupt_shares())
124 self.failUnlessEqual(len(r.get_all_results()), 4)
125 d.addCallback(_check_results)
128 def _mark_file_bad(self, rootnode):
129 si = rootnode.get_storage_index()
130 self.delete_shares_numbered(rootnode.get_uri(), [0])
133 def test_deepcheck_problems(self):
134 self.basedir = "dirnode/Dirnode/test_deepcheck_problems"
136 d = self._test_deepcheck_create()
137 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
138 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
139 def _check_results(r):
141 self.failUnlessEqual(c,
142 {"count-objects-checked": 4,
143 "count-objects-healthy": 3,
144 "count-objects-unhealthy": 1,
145 "count-objects-unrecoverable": 0,
146 "count-corrupt-shares": 0,
148 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
149 d.addCallback(_check_results)
152 def test_readonly(self):
153 self.basedir = "dirnode/Dirnode/test_readonly"
155 c = self.g.clients[0]
157 filecap = make_chk_file_uri(1234)
158 filenode = nm.create_from_cap(filecap)
159 uploadable = upload.Data("some data", convergence="some convergence string")
161 d = c.create_dirnode()
163 d2 = rw_dn.set_uri(u"child", filecap, filecap)
164 d2.addCallback(lambda res: rw_dn)
166 d.addCallback(_created)
169 ro_uri = rw_dn.get_readonly_uri()
170 ro_dn = c.create_node_from_uri(ro_uri)
171 self.failUnless(ro_dn.is_readonly())
172 self.failUnless(ro_dn.is_mutable())
174 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
175 ro_dn.set_uri, u"newchild", filecap, filecap)
176 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
177 ro_dn.set_node, u"newchild", filenode)
178 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
179 ro_dn.set_nodes, [ (u"newchild", filenode) ])
180 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
181 ro_dn.add_file, u"newchild", uploadable)
182 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
183 ro_dn.delete, u"child")
184 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
185 ro_dn.create_empty_directory, u"newchild")
186 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
187 ro_dn.set_metadata_for, u"child", {})
188 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
189 ro_dn.move_child_to, u"child", rw_dn)
190 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
191 rw_dn.move_child_to, u"child", ro_dn)
193 d.addCallback(_ready)
194 def _listed(children):
195 self.failUnless(u"child" in children)
196 d.addCallback(_listed)
199 def failUnlessGreaterThan(self, a, b):
200 self.failUnless(a > b, "%r should be > %r" % (a, b))
202 def failUnlessGreaterOrEqualThan(self, a, b):
203 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
205 def test_create(self):
206 self.basedir = "dirnode/Dirnode/test_create"
208 c = self.g.clients[0]
210 self.expected_manifest = []
211 self.expected_verifycaps = set()
212 self.expected_storage_indexes = set()
214 d = c.create_dirnode()
218 self.failUnless(n.is_mutable())
221 self.failUnless(u.startswith("URI:DIR2:"), u)
222 u_ro = n.get_readonly_uri()
223 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
224 u_v = n.get_verify_cap().to_string()
225 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
226 u_r = n.get_repair_cap().to_string()
227 self.failUnlessEqual(u_r, u)
228 self.expected_manifest.append( ((), u) )
229 self.expected_verifycaps.add(u_v)
230 si = n.get_storage_index()
231 self.expected_storage_indexes.add(base32.b2a(si))
232 expected_si = n._uri._filenode_uri.storage_index
233 self.failUnlessEqual(si, expected_si)
236 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
237 d.addCallback(lambda res: n.has_child(u"missing"))
238 d.addCallback(lambda res: self.failIf(res))
240 fake_file_uri = make_mutable_file_uri()
241 other_file_uri = make_mutable_file_uri()
242 m = c.nodemaker.create_from_cap(fake_file_uri)
243 ffu_v = m.get_verify_cap().to_string()
244 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
245 self.expected_verifycaps.add(ffu_v)
246 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
247 d.addCallback(lambda res: n.set_uri(u"child",
248 fake_file_uri, fake_file_uri))
249 d.addCallback(lambda res:
250 self.shouldFail(ExistingChildError, "set_uri-no",
251 "child 'child' already exists",
253 other_file_uri, other_file_uri,
258 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
262 # /subdir = directory
263 def _created(subdir):
264 self.failUnless(isinstance(subdir, dirnode.DirectoryNode))
266 new_v = subdir.get_verify_cap().to_string()
267 assert isinstance(new_v, str)
268 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
269 self.expected_verifycaps.add(new_v)
270 si = subdir.get_storage_index()
271 self.expected_storage_indexes.add(base32.b2a(si))
272 d.addCallback(_created)
274 d.addCallback(lambda res:
275 self.shouldFail(ExistingChildError, "mkdir-no",
276 "child 'subdir' already exists",
277 n.create_empty_directory, u"subdir",
280 d.addCallback(lambda res: n.list())
281 d.addCallback(lambda children:
282 self.failUnlessEqual(sorted(children.keys()),
283 sorted([u"child", u"subdir"])))
285 d.addCallback(lambda res: n.start_deep_stats().when_done())
286 def _check_deepstats(stats):
287 self.failUnless(isinstance(stats, dict))
288 expected = {"count-immutable-files": 0,
289 "count-mutable-files": 1,
290 "count-literal-files": 0,
292 "count-directories": 2,
293 "size-immutable-files": 0,
294 "size-literal-files": 0,
295 #"size-directories": 616, # varies
296 #"largest-directory": 616,
297 "largest-directory-children": 2,
298 "largest-immutable-file": 0,
300 for k,v in expected.iteritems():
301 self.failUnlessEqual(stats[k], v,
302 "stats[%s] was %s, not %s" %
304 self.failUnless(stats["size-directories"] > 500,
305 stats["size-directories"])
306 self.failUnless(stats["largest-directory"] > 500,
307 stats["largest-directory"])
308 self.failUnlessEqual(stats["size-files-histogram"], [])
309 d.addCallback(_check_deepstats)
311 d.addCallback(lambda res: n.build_manifest().when_done())
312 def _check_manifest(res):
313 manifest = res["manifest"]
314 self.failUnlessEqual(sorted(manifest),
315 sorted(self.expected_manifest))
317 _check_deepstats(stats)
318 self.failUnlessEqual(self.expected_verifycaps,
320 self.failUnlessEqual(self.expected_storage_indexes,
321 res["storage-index"])
322 d.addCallback(_check_manifest)
324 def _add_subsubdir(res):
325 return self.subdir.create_empty_directory(u"subsubdir")
326 d.addCallback(_add_subsubdir)
329 # /subdir = directory
330 # /subdir/subsubdir = directory
331 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
332 d.addCallback(lambda subsubdir:
333 self.failUnless(isinstance(subsubdir,
334 dirnode.DirectoryNode)))
335 d.addCallback(lambda res: n.get_child_at_path(u""))
336 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
339 d.addCallback(lambda res: n.get_metadata_for(u"child"))
340 d.addCallback(lambda metadata:
341 self.failUnlessEqual(set(metadata.keys()),
342 set(["tahoe", "ctime", "mtime"])))
344 d.addCallback(lambda res:
345 self.shouldFail(NoSuchChildError, "gcamap-no",
347 n.get_child_and_metadata_at_path,
349 d.addCallback(lambda res:
350 n.get_child_and_metadata_at_path(u""))
351 def _check_child_and_metadata1(res):
352 child, metadata = res
353 self.failUnless(isinstance(child, dirnode.DirectoryNode))
354 # edge-metadata needs at least one path segment
355 self.failUnlessEqual(sorted(metadata.keys()), [])
356 d.addCallback(_check_child_and_metadata1)
357 d.addCallback(lambda res:
358 n.get_child_and_metadata_at_path(u"child"))
360 def _check_child_and_metadata2(res):
361 child, metadata = res
362 self.failUnlessEqual(child.get_uri(),
364 self.failUnlessEqual(set(metadata.keys()),
365 set(["tahoe", "ctime", "mtime"]))
366 d.addCallback(_check_child_and_metadata2)
368 d.addCallback(lambda res:
369 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
370 def _check_child_and_metadata3(res):
371 child, metadata = res
372 self.failUnless(isinstance(child, dirnode.DirectoryNode))
373 self.failUnlessEqual(set(metadata.keys()),
374 set(["tahoe", "ctime", "mtime"]))
375 d.addCallback(_check_child_and_metadata3)
378 # it should be possible to add a child without any metadata
379 d.addCallback(lambda res: n.set_uri(u"c2",
380 fake_file_uri, fake_file_uri,
382 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
383 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
385 # You can't override the link timestamps.
386 d.addCallback(lambda res: n.set_uri(u"c2",
387 fake_file_uri, fake_file_uri,
388 { 'tahoe': {'linkcrtime': "bogus"}}))
389 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
390 def _has_good_linkcrtime(metadata):
391 self.failUnless(metadata.has_key('tahoe'))
392 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
393 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
394 d.addCallback(_has_good_linkcrtime)
396 # if we don't set any defaults, the child should get timestamps
397 d.addCallback(lambda res: n.set_uri(u"c3",
398 fake_file_uri, fake_file_uri))
399 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
400 d.addCallback(lambda metadata:
401 self.failUnlessEqual(set(metadata.keys()),
402 set(["tahoe", "ctime", "mtime"])))
404 # or we can add specific metadata at set_uri() time, which
405 # overrides the timestamps
406 d.addCallback(lambda res: n.set_uri(u"c4",
407 fake_file_uri, fake_file_uri,
409 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
410 d.addCallback(lambda metadata:
411 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
412 (metadata['key'] == "value"), metadata))
414 d.addCallback(lambda res: n.delete(u"c2"))
415 d.addCallback(lambda res: n.delete(u"c3"))
416 d.addCallback(lambda res: n.delete(u"c4"))
418 # set_node + metadata
419 # it should be possible to add a child without any metadata
420 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
421 d.addCallback(lambda res: c.create_dirnode())
422 d.addCallback(lambda n2:
423 self.shouldFail(ExistingChildError, "set_node-no",
424 "child 'd2' already exists",
425 n.set_node, u"d2", n2,
427 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
428 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
430 # if we don't set any defaults, the child should get timestamps
431 d.addCallback(lambda res: n.set_node(u"d3", n))
432 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
433 d.addCallback(lambda metadata:
434 self.failUnlessEqual(set(metadata.keys()),
435 set(["tahoe", "ctime", "mtime"])))
437 # or we can add specific metadata at set_node() time, which
438 # overrides the timestamps
439 d.addCallback(lambda res: n.set_node(u"d4", n,
441 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
442 d.addCallback(lambda metadata:
443 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
444 (metadata['key'] == "value"), metadata))
446 d.addCallback(lambda res: n.delete(u"d2"))
447 d.addCallback(lambda res: n.delete(u"d3"))
448 d.addCallback(lambda res: n.delete(u"d4"))
450 # metadata through set_children()
451 d.addCallback(lambda res:
453 u"e1": (fake_file_uri, fake_file_uri),
454 u"e2": (fake_file_uri, fake_file_uri, {}),
455 u"e3": (fake_file_uri, fake_file_uri,
458 d.addCallback(lambda n2: self.failUnlessIdentical(n2, n))
459 d.addCallback(lambda res:
460 self.shouldFail(ExistingChildError, "set_children-no",
461 "child 'e1' already exists",
463 { u"e1": (other_file_uri,
465 u"new": (other_file_uri,
469 # and 'new' should not have been created
470 d.addCallback(lambda res: n.list())
471 d.addCallback(lambda children: self.failIf(u"new" in children))
472 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
473 d.addCallback(lambda metadata:
474 self.failUnlessEqual(set(metadata.keys()),
475 set(["tahoe", "ctime", "mtime"])))
476 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
477 d.addCallback(lambda metadata:
478 self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
479 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
480 d.addCallback(lambda metadata:
481 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
482 and (metadata['key'] == "value"), metadata))
484 d.addCallback(lambda res: n.delete(u"e1"))
485 d.addCallback(lambda res: n.delete(u"e2"))
486 d.addCallback(lambda res: n.delete(u"e3"))
488 # metadata through set_nodes()
489 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
494 d.addCallback(lambda n2: self.failUnlessIdentical(n2, n))
495 d.addCallback(lambda res:
496 self.shouldFail(ExistingChildError, "set_nodes-no",
497 "child 'f1' already exists",
502 # and 'new' should not have been created
503 d.addCallback(lambda res: n.list())
504 d.addCallback(lambda children: self.failIf(u"new" in children))
505 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
506 d.addCallback(lambda metadata:
507 self.failUnlessEqual(set(metadata.keys()),
508 set(["tahoe", "ctime", "mtime"])))
509 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
511 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
512 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
513 d.addCallback(lambda metadata:
514 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
515 (metadata['key'] == "value"), metadata))
517 d.addCallback(lambda res: n.delete(u"f1"))
518 d.addCallback(lambda res: n.delete(u"f2"))
519 d.addCallback(lambda res: n.delete(u"f3"))
522 d.addCallback(lambda res:
523 n.set_metadata_for(u"child",
524 {"tags": ["web2.0-compatible"]}))
525 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
526 d.addCallback(lambda metadata:
527 self.failUnlessEqual(metadata,
528 {"tags": ["web2.0-compatible"]}))
531 self._start_timestamp = time.time()
532 d.addCallback(_start)
533 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
534 # floats to hundredeths (it uses str(num) instead of repr(num)).
535 # simplejson-1.7.3 does not have this bug. To prevent this bug
536 # from causing the test to fail, stall for more than a few
537 # hundrededths of a second.
538 d.addCallback(self.stall, 0.1)
539 d.addCallback(lambda res: n.add_file(u"timestamps",
540 upload.Data("stamp me", convergence="some convergence string")))
541 d.addCallback(self.stall, 0.1)
543 self._stop_timestamp = time.time()
546 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
547 def _check_timestamp1(metadata):
548 self.failUnless("ctime" in metadata)
549 self.failUnless("mtime" in metadata)
550 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
551 self._start_timestamp)
552 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
554 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
555 self._start_timestamp)
556 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
558 # Our current timestamp rules say that replacing an existing
559 # child should preserve the 'ctime' but update the mtime
560 self._old_ctime = metadata["ctime"]
561 self._old_mtime = metadata["mtime"]
562 d.addCallback(_check_timestamp1)
563 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
564 d.addCallback(lambda res: n.set_node(u"timestamps", n))
565 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
566 def _check_timestamp2(metadata):
567 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
568 "%s != %s" % (metadata["ctime"],
570 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
571 return n.delete(u"timestamps")
572 d.addCallback(_check_timestamp2)
574 # also make sure we can add/update timestamps on a
575 # previously-existing child that didn't have any, since there are
576 # a lot of 0.7.0-generated edges around out there
577 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
578 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
579 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
580 d.addCallback(lambda metadata:
581 self.failUnlessEqual(set(metadata.keys()),
582 set(["tahoe", "ctime", "mtime"])))
583 d.addCallback(lambda res: n.delete(u"no_timestamps"))
585 d.addCallback(lambda res: n.delete(u"subdir"))
586 d.addCallback(lambda old_child:
587 self.failUnlessEqual(old_child.get_uri(),
588 self.subdir.get_uri()))
590 d.addCallback(lambda res: n.list())
591 d.addCallback(lambda children:
592 self.failUnlessEqual(sorted(children.keys()),
595 uploadable1 = upload.Data("some data", convergence="converge")
596 d.addCallback(lambda res: n.add_file(u"newfile", uploadable1))
597 d.addCallback(lambda newnode:
598 self.failUnless(IFileNode.providedBy(newnode)))
599 uploadable2 = upload.Data("some data", convergence="stuff")
600 d.addCallback(lambda res:
601 self.shouldFail(ExistingChildError, "add_file-no",
602 "child 'newfile' already exists",
603 n.add_file, u"newfile",
606 d.addCallback(lambda res: n.list())
607 d.addCallback(lambda children:
608 self.failUnlessEqual(sorted(children.keys()),
609 sorted([u"child", u"newfile"])))
610 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
611 d.addCallback(lambda metadata:
612 self.failUnlessEqual(set(metadata.keys()),
613 set(["tahoe", "ctime", "mtime"])))
615 uploadable3 = upload.Data("some data", convergence="converge")
616 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
619 d.addCallback(lambda newnode:
620 self.failUnless(IFileNode.providedBy(newnode)))
621 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
622 d.addCallback(lambda metadata:
623 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
624 (metadata['key'] == "value"), metadata))
625 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
627 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
628 def _created2(subdir2):
629 self.subdir2 = subdir2
630 # put something in the way, to make sure it gets overwritten
631 return subdir2.add_file(u"child", upload.Data("overwrite me",
633 d.addCallback(_created2)
635 d.addCallback(lambda res:
636 n.move_child_to(u"child", self.subdir2))
637 d.addCallback(lambda res: n.list())
638 d.addCallback(lambda children:
639 self.failUnlessEqual(sorted(children.keys()),
640 sorted([u"newfile", u"subdir2"])))
641 d.addCallback(lambda res: self.subdir2.list())
642 d.addCallback(lambda children:
643 self.failUnlessEqual(sorted(children.keys()),
645 d.addCallback(lambda res: self.subdir2.get(u"child"))
646 d.addCallback(lambda child:
647 self.failUnlessEqual(child.get_uri(),
650 # move it back, using new_child_name=
651 d.addCallback(lambda res:
652 self.subdir2.move_child_to(u"child", n, u"newchild"))
653 d.addCallback(lambda res: n.list())
654 d.addCallback(lambda children:
655 self.failUnlessEqual(sorted(children.keys()),
656 sorted([u"newchild", u"newfile",
658 d.addCallback(lambda res: self.subdir2.list())
659 d.addCallback(lambda children:
660 self.failUnlessEqual(sorted(children.keys()), []))
662 # now make sure that we honor overwrite=False
663 d.addCallback(lambda res:
664 self.subdir2.set_uri(u"newchild",
665 other_file_uri, other_file_uri))
667 d.addCallback(lambda res:
668 self.shouldFail(ExistingChildError, "move_child_to-no",
669 "child 'newchild' already exists",
670 n.move_child_to, u"newchild",
673 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
674 d.addCallback(lambda child:
675 self.failUnlessEqual(child.get_uri(),
682 d.addErrback(self.explain_error)
685 class Packing(unittest.TestCase):
686 # This is a base32-encoded representation of the directory tree
690 # as represented after being fed to _pack_contents.
691 # We have it here so we can decode it, feed it to
692 # _unpack_contents, and verify that _unpack_contents
695 known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
697 def test_unpack_and_pack_behavior(self):
698 known_tree = b32decode(self.known_tree)
699 nodemaker = NodeMaker(None, None, None,
701 {"k": 3, "n": 10}, None)
702 writecap = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q"
703 filenode = nodemaker.create_from_cap(writecap)
704 node = dirnode.DirectoryNode(filenode, nodemaker, None)
705 children = node._unpack_contents(known_tree)
706 self._check_children(children)
708 packed_children = node._pack_contents(children)
709 children = node._unpack_contents(packed_children)
710 self._check_children(children)
712 def _check_children(self, children):
713 # Are all the expected child nodes there?
714 self.failUnless(children.has_key(u'file1'))
715 self.failUnless(children.has_key(u'file2'))
716 self.failUnless(children.has_key(u'file3'))
718 # Are the metadata for child 3 right?
719 file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
720 file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
721 file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
722 self.failUnlessEqual(file3_metadata, children[u'file3'][1])
723 self.failUnlessEqual(file3_rocap,
724 children[u'file3'][0].get_readonly_uri())
725 self.failUnlessEqual(file3_rwcap,
726 children[u'file3'][0].get_uri())
728 # Are the metadata for child 2 right?
729 file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
730 file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
731 file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
732 self.failUnlessEqual(file2_metadata, children[u'file2'][1])
733 self.failUnlessEqual(file2_rocap,
734 children[u'file2'][0].get_readonly_uri())
735 self.failUnlessEqual(file2_rwcap,
736 children[u'file2'][0].get_uri())
738 # Are the metadata for child 1 right?
739 file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
740 file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
741 file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
742 self.failUnlessEqual(file1_metadata, children[u'file1'][1])
743 self.failUnlessEqual(file1_rocap,
744 children[u'file1'][0].get_readonly_uri())
745 self.failUnlessEqual(file1_rwcap,
746 children[u'file1'][0].get_uri())
748 def test_caching_dict(self):
749 d = dirnode.CachingDict()
750 d.set_both_items("test", "test2", ("test3", "test4"))
751 cached, value = d.get_both_items("test")
753 self.failUnlessEqual(cached, "test2")
754 self.failUnlessEqual(value, ("test3", "test4"))
756 d['test'] = ("test3", "test2")
758 cached, value = d.get_both_items("test")
760 self.failUnlessEqual(cached, None)
761 self.failUnlessEqual(value, ("test3", "test2"))
763 class FakeMutableFile:
765 def __init__(self, initial_contents=""):
766 self.data = initial_contents
767 counter = FakeMutableFile.counter
768 FakeMutableFile.counter += 1
769 writekey = hashutil.ssk_writekey_hash(str(counter))
770 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
771 self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
773 return self.uri.to_string()
774 def download_best_version(self):
775 return defer.succeed(self.data)
776 def get_writekey(self):
778 def is_readonly(self):
780 def is_mutable(self):
782 def modify(self, modifier):
783 self.data = modifier(self.data, None, True)
784 return defer.succeed(None)
786 class FakeNodeMaker(NodeMaker):
787 def create_mutable_file(self, contents="", keysize=None):
788 return defer.succeed(FakeMutableFile(contents))
790 class FakeClient2(Client):
792 self.nodemaker = FakeNodeMaker(None, None, None,
794 {"k":3,"n":10}, None)
795 def create_node_from_uri(self, rwcap, rocap):
796 return self.nodemaker.create_from_cap(rwcap, rocap)
798 class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
800 self.client = FakeClient2()
801 self.nodemaker = self.client.nodemaker
803 def test_from_future(self):
804 # create a dirnode that contains unknown URI types, and make sure we
805 # tolerate them properly. Since dirnodes aren't allowed to add
806 # unknown node types, we have to be tricky.
807 d = self.nodemaker.create_new_mutable_directory()
808 future_writecap = "x-tahoe-crazy://I_am_from_the_future."
809 future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
810 future_node = UnknownNode(future_writecap, future_readcap)
813 return n.set_node(u"future", future_node)
816 # we should be prohibited from adding an unknown URI to a directory,
817 # since we don't know how to diminish the cap to a readcap (for the
818 # dirnode's rocap slot), and we don't want to accidentally grant
819 # write access to a holder of the dirnode's readcap.
820 d.addCallback(lambda ign:
821 self.shouldFail(CannotPackUnknownNodeError,
823 "cannot pack unknown node as child add",
824 self._node.set_uri, u"add",
825 future_writecap, future_readcap))
826 d.addCallback(lambda ign: self._node.list())
827 def _check(children):
828 self.failUnlessEqual(len(children), 1)
829 (fn, metadata) = children[u"future"]
830 self.failUnless(isinstance(fn, UnknownNode), fn)
831 self.failUnlessEqual(fn.get_uri(), future_writecap)
832 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
833 # but we *should* be allowed to copy this node, because the
834 # UnknownNode contains all the information that was in the
835 # original directory (readcap and writecap), so we're preserving
837 return self._node.set_node(u"copy", fn)
838 d.addCallback(_check)
839 d.addCallback(lambda ign: self._node.list())
840 def _check2(children):
841 self.failUnlessEqual(len(children), 2)
842 (fn, metadata) = children[u"copy"]
843 self.failUnless(isinstance(fn, UnknownNode), fn)
844 self.failUnlessEqual(fn.get_uri(), future_writecap)
845 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
848 class DeepStats(unittest.TestCase):
849 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
850 def test_stats(self):
851 ds = dirnode.DeepStats(None)
852 ds.add("count-files")
853 ds.add("size-immutable-files", 123)
854 ds.histogram("size-files-histogram", 123)
855 ds.max("largest-directory", 444)
858 self.failUnlessEqual(s["count-files"], 1)
859 self.failUnlessEqual(s["size-immutable-files"], 123)
860 self.failUnlessEqual(s["largest-directory"], 444)
861 self.failUnlessEqual(s["count-literal-files"], 0)
863 ds.add("count-files")
864 ds.add("size-immutable-files", 321)
865 ds.histogram("size-files-histogram", 321)
866 ds.max("largest-directory", 2)
869 self.failUnlessEqual(s["count-files"], 2)
870 self.failUnlessEqual(s["size-immutable-files"], 444)
871 self.failUnlessEqual(s["largest-directory"], 444)
872 self.failUnlessEqual(s["count-literal-files"], 0)
873 self.failUnlessEqual(s["size-files-histogram"],
874 [ (101, 316, 1), (317, 1000, 1) ])
876 ds = dirnode.DeepStats(None)
877 for i in range(1, 1100):
878 ds.histogram("size-files-histogram", i)
879 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
881 self.failUnlessEqual(s["size-files-histogram"],
889 (3162277660169L, 10000000000000L, 1),
892 class UCWEingMutableFileNode(MutableFileNode):
893 please_ucwe_after_next_upload = False
895 def _upload(self, new_contents, servermap):
896 d = MutableFileNode._upload(self, new_contents, servermap)
898 if self.please_ucwe_after_next_upload:
899 self.please_ucwe_after_next_upload = False
900 raise UncoordinatedWriteError()
905 class UCWEingNodeMaker(NodeMaker):
906 def _create_mutable(self, cap):
907 n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder,
908 self.default_encoding_parameters,
910 return n.init_from_uri(cap)
913 class Deleter(GridTestMixin, unittest.TestCase):
914 timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
915 def test_retry(self):
916 # ticket #550, a dirnode.delete which experiences an
917 # UncoordinatedWriteError will fail with an incorrect "you're
918 # deleting something which isn't there" NoSuchChildError exception.
920 # to trigger this, we start by creating a directory with a single
921 # file in it. Then we create a special dirnode that uses a modified
922 # MutableFileNode which will raise UncoordinatedWriteError once on
923 # demand. We then call dirnode.delete, which ought to retry and
926 self.basedir = self.mktemp()
928 c0 = self.g.clients[0]
929 d = c0.create_dirnode()
930 small = upload.Data("Small enough for a LIT", None)
931 def _created_dir(dn):
933 self.root_uri = dn.get_uri()
934 return dn.add_file(u"file", small)
935 d.addCallback(_created_dir)
936 def _do_delete(ignored):
937 nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder,
938 c0.get_history(), c0.getServiceNamed("uploader"),
940 c0.download_cache_dirman,
941 c0.get_encoding_parameters(),
943 n = nm.create_from_cap(self.root_uri)
944 assert n._node.please_ucwe_after_next_upload == False
945 n._node.please_ucwe_after_next_upload = True
946 # This should succeed, not raise an exception
947 return n.delete(u"file")
948 d.addCallback(_do_delete)
952 class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
954 def test_overwrite(self):
955 # note: This functionality could be tested without actually creating
956 # several RSA keys. It would be faster without the GridTestMixin: use
957 # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead
958 # of dn.add_file, and use a special NodeMaker that creates fake
960 self.basedir = "dirnode/Adder/test_overwrite"
962 c = self.g.clients[0]
963 fileuri = make_chk_file_uri(1234)
964 filenode = c.nodemaker.create_from_cap(fileuri)
965 d = c.create_dirnode()
967 def _create_directory_tree(root_node):
972 d = root_node.add_file(u'file1', upload.Data("Important Things",
974 d.addCallback(lambda res:
975 root_node.add_file(u'file2', upload.Data("Sekrit Codes", None)))
976 d.addCallback(lambda res:
977 root_node.create_empty_directory(u"dir1"))
978 d.addCallback(lambda res: root_node)
981 d.addCallback(_create_directory_tree)
983 def _test_adder(root_node):
984 d = root_node.set_node(u'file1', filenode)
985 # We've overwritten file1. Let's try it with a directory
986 d.addCallback(lambda res:
987 root_node.create_empty_directory(u'dir2'))
988 d.addCallback(lambda res:
989 root_node.set_node(u'dir2', filenode))
990 # We try overwriting a file with a child while also specifying
991 # overwrite=False. We should receive an ExistingChildError
993 d.addCallback(lambda res:
994 self.shouldFail(ExistingChildError, "set_node",
995 "child 'file1' already exists",
996 root_node.set_node, u"file1",
997 filenode, overwrite=False))
998 # If we try with a directory, we should see the same thing
999 d.addCallback(lambda res:
1000 self.shouldFail(ExistingChildError, "set_node",
1001 "child 'dir1' already exists",
1002 root_node.set_node, u'dir1', filenode,
1004 d.addCallback(lambda res:
1005 root_node.set_node(u'file1', filenode,
1006 overwrite="only-files"))
1007 d.addCallback(lambda res:
1008 self.shouldFail(ExistingChildError, "set_node",
1009 "child 'dir1' already exists",
1010 root_node.set_node, u'dir1', filenode,
1011 overwrite="only-files"))
1014 d.addCallback(_test_adder)