3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from allmydata import uri, dirnode
6 from allmydata.client import Client
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IFileNode, \
9 ExistingChildError, NoSuchChildError, \
10 IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
11 from allmydata.mutable.filenode import MutableFileNode
12 from allmydata.mutable.common import UncoordinatedWriteError
13 from allmydata.util import hashutil, base32
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
17 from allmydata.test.no_network import GridTestMixin
18 from allmydata.unknown import UnknownNode
19 from allmydata.nodemaker import NodeMaker
20 from base64 import b32decode
21 import common_util as testutil
23 class Dirnode(GridTestMixin, unittest.TestCase,
24 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
25 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
28 self.basedir = "dirnode/Dirnode/test_basic"
31 d = c.create_dirnode()
33 self.failUnless(isinstance(res, dirnode.DirectoryNode))
35 self.failUnless("RW" in rep)
39 def test_initial_children(self):
40 self.basedir = "dirnode/Dirnode/test_initial_children"
44 setup_py_uri = "URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861"
45 one_uri = "URI:LIT:n5xgk" # LIT for "one"
46 kids = {u"one": (nm.create_from_cap(one_uri), {}),
47 u"two": (nm.create_from_cap(setup_py_uri),
48 {"metakey": "metavalue"}),
50 d = c.create_dirnode(kids)
52 self.failUnless(isinstance(dn, dirnode.DirectoryNode))
54 self.failUnless("RW" in rep)
56 d.addCallback(_created)
57 def _check_kids(children):
58 self.failUnlessEqual(sorted(children.keys()), [u"one", u"two"])
59 one_node, one_metadata = children[u"one"]
60 two_node, two_metadata = children[u"two"]
61 self.failUnlessEqual(one_node.get_size(), 3)
62 self.failUnlessEqual(two_node.get_size(), 14861)
63 self.failUnless(isinstance(one_metadata, dict), one_metadata)
64 self.failUnlessEqual(two_metadata["metakey"], "metavalue")
65 d.addCallback(_check_kids)
66 d.addCallback(lambda ign: nm.create_new_mutable_directory(kids))
67 d.addCallback(lambda dn: dn.list())
68 d.addCallback(_check_kids)
69 future_writecap = "x-tahoe-crazy://I_am_from_the_future."
70 future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
71 future_node = UnknownNode(future_writecap, future_readcap)
72 bad_kids1 = {u"one": (future_node, {})}
73 d.addCallback(lambda ign:
74 self.shouldFail(AssertionError, "bad_kids1",
75 "does not accept UnknownNode",
76 nm.create_new_mutable_directory,
78 bad_kids2 = {u"one": (nm.create_from_cap(one_uri), None)}
79 d.addCallback(lambda ign:
80 self.shouldFail(AssertionError, "bad_kids2",
81 "requires metadata to be a dict",
82 nm.create_new_mutable_directory,
87 self.basedir = "dirnode/Dirnode/test_check"
90 d = c.create_dirnode()
91 d.addCallback(lambda dn: dn.check(Monitor()))
93 self.failUnless(res.is_healthy())
97 def _test_deepcheck_create(self):
98 # create a small tree with a loop, and some non-directories
102 # root/subdir/link -> root
104 c = self.g.clients[0]
105 d = c.create_dirnode()
106 def _created_root(rootnode):
107 self._rootnode = rootnode
108 return rootnode.create_subdirectory(u"subdir")
109 d.addCallback(_created_root)
110 def _created_subdir(subdir):
111 self._subdir = subdir
112 d = subdir.add_file(u"file1", upload.Data("data"*100, None))
113 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
114 d.addCallback(lambda res: c.create_dirnode())
115 d.addCallback(lambda dn:
116 self._rootnode.set_uri(u"rodir",
118 dn.get_readonly_uri()))
120 d.addCallback(_created_subdir)
122 return self._rootnode
126 def test_deepcheck(self):
127 self.basedir = "dirnode/Dirnode/test_deepcheck"
129 d = self._test_deepcheck_create()
130 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
131 def _check_results(r):
132 self.failUnless(IDeepCheckResults.providedBy(r))
134 self.failUnlessEqual(c,
135 {"count-objects-checked": 4,
136 "count-objects-healthy": 4,
137 "count-objects-unhealthy": 0,
138 "count-objects-unrecoverable": 0,
139 "count-corrupt-shares": 0,
141 self.failIf(r.get_corrupt_shares())
142 self.failUnlessEqual(len(r.get_all_results()), 4)
143 d.addCallback(_check_results)
146 def test_deepcheck_and_repair(self):
147 self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair"
149 d = self._test_deepcheck_create()
150 d.addCallback(lambda rootnode:
151 rootnode.start_deep_check_and_repair().when_done())
152 def _check_results(r):
153 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
155 self.failUnlessEqual(c,
156 {"count-objects-checked": 4,
157 "count-objects-healthy-pre-repair": 4,
158 "count-objects-unhealthy-pre-repair": 0,
159 "count-objects-unrecoverable-pre-repair": 0,
160 "count-corrupt-shares-pre-repair": 0,
161 "count-objects-healthy-post-repair": 4,
162 "count-objects-unhealthy-post-repair": 0,
163 "count-objects-unrecoverable-post-repair": 0,
164 "count-corrupt-shares-post-repair": 0,
165 "count-repairs-attempted": 0,
166 "count-repairs-successful": 0,
167 "count-repairs-unsuccessful": 0,
169 self.failIf(r.get_corrupt_shares())
170 self.failIf(r.get_remaining_corrupt_shares())
171 self.failUnlessEqual(len(r.get_all_results()), 4)
172 d.addCallback(_check_results)
175 def _mark_file_bad(self, rootnode):
176 si = rootnode.get_storage_index()
177 self.delete_shares_numbered(rootnode.get_uri(), [0])
180 def test_deepcheck_problems(self):
181 self.basedir = "dirnode/Dirnode/test_deepcheck_problems"
183 d = self._test_deepcheck_create()
184 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
185 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
186 def _check_results(r):
188 self.failUnlessEqual(c,
189 {"count-objects-checked": 4,
190 "count-objects-healthy": 3,
191 "count-objects-unhealthy": 1,
192 "count-objects-unrecoverable": 0,
193 "count-corrupt-shares": 0,
195 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
196 d.addCallback(_check_results)
199 def test_readonly(self):
200 self.basedir = "dirnode/Dirnode/test_readonly"
202 c = self.g.clients[0]
204 filecap = make_chk_file_uri(1234)
205 filenode = nm.create_from_cap(filecap)
206 uploadable = upload.Data("some data", convergence="some convergence string")
208 d = c.create_dirnode()
210 d2 = rw_dn.set_uri(u"child", filecap, filecap)
211 d2.addCallback(lambda res: rw_dn)
213 d.addCallback(_created)
216 ro_uri = rw_dn.get_readonly_uri()
217 ro_dn = c.create_node_from_uri(ro_uri)
218 self.failUnless(ro_dn.is_readonly())
219 self.failUnless(ro_dn.is_mutable())
221 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
222 ro_dn.set_uri, u"newchild", filecap, filecap)
223 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
224 ro_dn.set_node, u"newchild", filenode)
225 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
226 ro_dn.set_nodes, { u"newchild": (filenode, None) })
227 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
228 ro_dn.add_file, u"newchild", uploadable)
229 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
230 ro_dn.delete, u"child")
231 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
232 ro_dn.create_subdirectory, u"newchild")
233 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
234 ro_dn.set_metadata_for, u"child", {})
235 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
236 ro_dn.move_child_to, u"child", rw_dn)
237 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
238 rw_dn.move_child_to, u"child", ro_dn)
240 d.addCallback(_ready)
241 def _listed(children):
242 self.failUnless(u"child" in children)
243 d.addCallback(_listed)
246 def failUnlessGreaterThan(self, a, b):
247 self.failUnless(a > b, "%r should be > %r" % (a, b))
249 def failUnlessGreaterOrEqualThan(self, a, b):
250 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
252 def test_create(self):
253 self.basedir = "dirnode/Dirnode/test_create"
255 c = self.g.clients[0]
257 self.expected_manifest = []
258 self.expected_verifycaps = set()
259 self.expected_storage_indexes = set()
261 d = c.create_dirnode()
265 self.failUnless(n.is_mutable())
268 self.failUnless(u.startswith("URI:DIR2:"), u)
269 u_ro = n.get_readonly_uri()
270 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
271 u_v = n.get_verify_cap().to_string()
272 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
273 u_r = n.get_repair_cap().to_string()
274 self.failUnlessEqual(u_r, u)
275 self.expected_manifest.append( ((), u) )
276 self.expected_verifycaps.add(u_v)
277 si = n.get_storage_index()
278 self.expected_storage_indexes.add(base32.b2a(si))
279 expected_si = n._uri._filenode_uri.storage_index
280 self.failUnlessEqual(si, expected_si)
283 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
284 d.addCallback(lambda res: n.has_child(u"missing"))
285 d.addCallback(lambda res: self.failIf(res))
287 fake_file_uri = make_mutable_file_uri()
288 other_file_uri = make_mutable_file_uri()
289 m = c.nodemaker.create_from_cap(fake_file_uri)
290 ffu_v = m.get_verify_cap().to_string()
291 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
292 self.expected_verifycaps.add(ffu_v)
293 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
294 d.addCallback(lambda res: n.set_uri(u"child",
295 fake_file_uri, fake_file_uri))
296 d.addCallback(lambda res:
297 self.shouldFail(ExistingChildError, "set_uri-no",
298 "child 'child' already exists",
300 other_file_uri, other_file_uri,
305 d.addCallback(lambda res: n.create_subdirectory(u"subdir"))
309 # /subdir = directory
310 def _created(subdir):
311 self.failUnless(isinstance(subdir, dirnode.DirectoryNode))
313 new_v = subdir.get_verify_cap().to_string()
314 assert isinstance(new_v, str)
315 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
316 self.expected_verifycaps.add(new_v)
317 si = subdir.get_storage_index()
318 self.expected_storage_indexes.add(base32.b2a(si))
319 d.addCallback(_created)
321 d.addCallback(lambda res:
322 self.shouldFail(ExistingChildError, "mkdir-no",
323 "child 'subdir' already exists",
324 n.create_subdirectory, u"subdir",
327 d.addCallback(lambda res: n.list())
328 d.addCallback(lambda children:
329 self.failUnlessEqual(sorted(children.keys()),
330 sorted([u"child", u"subdir"])))
332 d.addCallback(lambda res: n.start_deep_stats().when_done())
333 def _check_deepstats(stats):
334 self.failUnless(isinstance(stats, dict))
335 expected = {"count-immutable-files": 0,
336 "count-mutable-files": 1,
337 "count-literal-files": 0,
339 "count-directories": 2,
340 "size-immutable-files": 0,
341 "size-literal-files": 0,
342 #"size-directories": 616, # varies
343 #"largest-directory": 616,
344 "largest-directory-children": 2,
345 "largest-immutable-file": 0,
347 for k,v in expected.iteritems():
348 self.failUnlessEqual(stats[k], v,
349 "stats[%s] was %s, not %s" %
351 self.failUnless(stats["size-directories"] > 500,
352 stats["size-directories"])
353 self.failUnless(stats["largest-directory"] > 500,
354 stats["largest-directory"])
355 self.failUnlessEqual(stats["size-files-histogram"], [])
356 d.addCallback(_check_deepstats)
358 d.addCallback(lambda res: n.build_manifest().when_done())
359 def _check_manifest(res):
360 manifest = res["manifest"]
361 self.failUnlessEqual(sorted(manifest),
362 sorted(self.expected_manifest))
364 _check_deepstats(stats)
365 self.failUnlessEqual(self.expected_verifycaps,
367 self.failUnlessEqual(self.expected_storage_indexes,
368 res["storage-index"])
369 d.addCallback(_check_manifest)
371 def _add_subsubdir(res):
372 return self.subdir.create_subdirectory(u"subsubdir")
373 d.addCallback(_add_subsubdir)
376 # /subdir = directory
377 # /subdir/subsubdir = directory
378 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
379 d.addCallback(lambda subsubdir:
380 self.failUnless(isinstance(subsubdir,
381 dirnode.DirectoryNode)))
382 d.addCallback(lambda res: n.get_child_at_path(u""))
383 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
386 d.addCallback(lambda res: n.get_metadata_for(u"child"))
387 d.addCallback(lambda metadata:
388 self.failUnlessEqual(set(metadata.keys()),
389 set(["tahoe", "ctime", "mtime"])))
391 d.addCallback(lambda res:
392 self.shouldFail(NoSuchChildError, "gcamap-no",
394 n.get_child_and_metadata_at_path,
396 d.addCallback(lambda res:
397 n.get_child_and_metadata_at_path(u""))
398 def _check_child_and_metadata1(res):
399 child, metadata = res
400 self.failUnless(isinstance(child, dirnode.DirectoryNode))
401 # edge-metadata needs at least one path segment
402 self.failUnlessEqual(sorted(metadata.keys()), [])
403 d.addCallback(_check_child_and_metadata1)
404 d.addCallback(lambda res:
405 n.get_child_and_metadata_at_path(u"child"))
407 def _check_child_and_metadata2(res):
408 child, metadata = res
409 self.failUnlessEqual(child.get_uri(),
411 self.failUnlessEqual(set(metadata.keys()),
412 set(["tahoe", "ctime", "mtime"]))
413 d.addCallback(_check_child_and_metadata2)
415 d.addCallback(lambda res:
416 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
417 def _check_child_and_metadata3(res):
418 child, metadata = res
419 self.failUnless(isinstance(child, dirnode.DirectoryNode))
420 self.failUnlessEqual(set(metadata.keys()),
421 set(["tahoe", "ctime", "mtime"]))
422 d.addCallback(_check_child_and_metadata3)
425 # it should be possible to add a child without any metadata
426 d.addCallback(lambda res: n.set_uri(u"c2",
427 fake_file_uri, fake_file_uri,
429 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
430 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
432 # You can't override the link timestamps.
433 d.addCallback(lambda res: n.set_uri(u"c2",
434 fake_file_uri, fake_file_uri,
435 { 'tahoe': {'linkcrtime': "bogus"}}))
436 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
437 def _has_good_linkcrtime(metadata):
438 self.failUnless(metadata.has_key('tahoe'))
439 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
440 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
441 d.addCallback(_has_good_linkcrtime)
443 # if we don't set any defaults, the child should get timestamps
444 d.addCallback(lambda res: n.set_uri(u"c3",
445 fake_file_uri, fake_file_uri))
446 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
447 d.addCallback(lambda metadata:
448 self.failUnlessEqual(set(metadata.keys()),
449 set(["tahoe", "ctime", "mtime"])))
451 # or we can add specific metadata at set_uri() time, which
452 # overrides the timestamps
453 d.addCallback(lambda res: n.set_uri(u"c4",
454 fake_file_uri, fake_file_uri,
456 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
457 d.addCallback(lambda metadata:
458 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
459 (metadata['key'] == "value"), metadata))
461 d.addCallback(lambda res: n.delete(u"c2"))
462 d.addCallback(lambda res: n.delete(u"c3"))
463 d.addCallback(lambda res: n.delete(u"c4"))
465 # set_node + metadata
466 # it should be possible to add a child without any metadata
467 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
468 d.addCallback(lambda res: c.create_dirnode())
469 d.addCallback(lambda n2:
470 self.shouldFail(ExistingChildError, "set_node-no",
471 "child 'd2' already exists",
472 n.set_node, u"d2", n2,
474 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
475 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
477 # if we don't set any defaults, the child should get timestamps
478 d.addCallback(lambda res: n.set_node(u"d3", n))
479 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
480 d.addCallback(lambda metadata:
481 self.failUnlessEqual(set(metadata.keys()),
482 set(["tahoe", "ctime", "mtime"])))
484 # or we can add specific metadata at set_node() time, which
485 # overrides the timestamps
486 d.addCallback(lambda res: n.set_node(u"d4", n,
488 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
489 d.addCallback(lambda metadata:
490 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
491 (metadata['key'] == "value"), metadata))
493 d.addCallback(lambda res: n.delete(u"d2"))
494 d.addCallback(lambda res: n.delete(u"d3"))
495 d.addCallback(lambda res: n.delete(u"d4"))
497 # metadata through set_children()
498 d.addCallback(lambda res:
500 u"e1": (fake_file_uri, fake_file_uri),
501 u"e2": (fake_file_uri, fake_file_uri, {}),
502 u"e3": (fake_file_uri, fake_file_uri,
505 d.addCallback(lambda n2: self.failUnlessIdentical(n2, n))
506 d.addCallback(lambda res:
507 self.shouldFail(ExistingChildError, "set_children-no",
508 "child 'e1' already exists",
510 { u"e1": (other_file_uri,
512 u"new": (other_file_uri,
516 # and 'new' should not have been created
517 d.addCallback(lambda res: n.list())
518 d.addCallback(lambda children: self.failIf(u"new" in children))
519 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
520 d.addCallback(lambda metadata:
521 self.failUnlessEqual(set(metadata.keys()),
522 set(["tahoe", "ctime", "mtime"])))
523 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
524 d.addCallback(lambda metadata:
525 self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
526 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
527 d.addCallback(lambda metadata:
528 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
529 and (metadata['key'] == "value"), metadata))
531 d.addCallback(lambda res: n.delete(u"e1"))
532 d.addCallback(lambda res: n.delete(u"e2"))
533 d.addCallback(lambda res: n.delete(u"e3"))
535 # metadata through set_nodes()
536 d.addCallback(lambda res:
537 n.set_nodes({ u"f1": (n, None),
539 u"f3": (n, {"key": "value"}),
541 d.addCallback(lambda n2: self.failUnlessIdentical(n2, n))
542 d.addCallback(lambda res:
543 self.shouldFail(ExistingChildError, "set_nodes-no",
544 "child 'f1' already exists",
545 n.set_nodes, { u"f1": (n, None),
546 u"new": (n, None), },
548 # and 'new' should not have been created
549 d.addCallback(lambda res: n.list())
550 d.addCallback(lambda children: self.failIf(u"new" in children))
551 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
552 d.addCallback(lambda metadata:
553 self.failUnlessEqual(set(metadata.keys()),
554 set(["tahoe", "ctime", "mtime"])))
555 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
557 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
558 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
559 d.addCallback(lambda metadata:
560 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
561 (metadata['key'] == "value"), metadata))
563 d.addCallback(lambda res: n.delete(u"f1"))
564 d.addCallback(lambda res: n.delete(u"f2"))
565 d.addCallback(lambda res: n.delete(u"f3"))
568 d.addCallback(lambda res:
569 n.set_metadata_for(u"child",
570 {"tags": ["web2.0-compatible"]}))
571 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
572 d.addCallback(lambda metadata:
573 self.failUnlessEqual(metadata,
574 {"tags": ["web2.0-compatible"]}))
577 self._start_timestamp = time.time()
578 d.addCallback(_start)
579 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
580 # floats to hundredeths (it uses str(num) instead of repr(num)).
581 # simplejson-1.7.3 does not have this bug. To prevent this bug
582 # from causing the test to fail, stall for more than a few
583 # hundrededths of a second.
584 d.addCallback(self.stall, 0.1)
585 d.addCallback(lambda res: n.add_file(u"timestamps",
586 upload.Data("stamp me", convergence="some convergence string")))
587 d.addCallback(self.stall, 0.1)
589 self._stop_timestamp = time.time()
592 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
593 def _check_timestamp1(metadata):
594 self.failUnless("ctime" in metadata)
595 self.failUnless("mtime" in metadata)
596 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
597 self._start_timestamp)
598 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
600 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
601 self._start_timestamp)
602 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
604 # Our current timestamp rules say that replacing an existing
605 # child should preserve the 'ctime' but update the mtime
606 self._old_ctime = metadata["ctime"]
607 self._old_mtime = metadata["mtime"]
608 d.addCallback(_check_timestamp1)
609 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
610 d.addCallback(lambda res: n.set_node(u"timestamps", n))
611 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
612 def _check_timestamp2(metadata):
613 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
614 "%s != %s" % (metadata["ctime"],
616 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
617 return n.delete(u"timestamps")
618 d.addCallback(_check_timestamp2)
620 # also make sure we can add/update timestamps on a
621 # previously-existing child that didn't have any, since there are
622 # a lot of 0.7.0-generated edges around out there
623 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
624 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
625 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
626 d.addCallback(lambda metadata:
627 self.failUnlessEqual(set(metadata.keys()),
628 set(["tahoe", "ctime", "mtime"])))
629 d.addCallback(lambda res: n.delete(u"no_timestamps"))
631 d.addCallback(lambda res: n.delete(u"subdir"))
632 d.addCallback(lambda old_child:
633 self.failUnlessEqual(old_child.get_uri(),
634 self.subdir.get_uri()))
636 d.addCallback(lambda res: n.list())
637 d.addCallback(lambda children:
638 self.failUnlessEqual(sorted(children.keys()),
641 uploadable1 = upload.Data("some data", convergence="converge")
642 d.addCallback(lambda res: n.add_file(u"newfile", uploadable1))
643 d.addCallback(lambda newnode:
644 self.failUnless(IFileNode.providedBy(newnode)))
645 uploadable2 = upload.Data("some data", convergence="stuff")
646 d.addCallback(lambda res:
647 self.shouldFail(ExistingChildError, "add_file-no",
648 "child 'newfile' already exists",
649 n.add_file, u"newfile",
652 d.addCallback(lambda res: n.list())
653 d.addCallback(lambda children:
654 self.failUnlessEqual(sorted(children.keys()),
655 sorted([u"child", u"newfile"])))
656 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
657 d.addCallback(lambda metadata:
658 self.failUnlessEqual(set(metadata.keys()),
659 set(["tahoe", "ctime", "mtime"])))
661 uploadable3 = upload.Data("some data", convergence="converge")
662 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
665 d.addCallback(lambda newnode:
666 self.failUnless(IFileNode.providedBy(newnode)))
667 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
668 d.addCallback(lambda metadata:
669 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
670 (metadata['key'] == "value"), metadata))
671 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
673 d.addCallback(lambda res: n.create_subdirectory(u"subdir2"))
674 def _created2(subdir2):
675 self.subdir2 = subdir2
676 # put something in the way, to make sure it gets overwritten
677 return subdir2.add_file(u"child", upload.Data("overwrite me",
679 d.addCallback(_created2)
681 d.addCallback(lambda res:
682 n.move_child_to(u"child", self.subdir2))
683 d.addCallback(lambda res: n.list())
684 d.addCallback(lambda children:
685 self.failUnlessEqual(sorted(children.keys()),
686 sorted([u"newfile", u"subdir2"])))
687 d.addCallback(lambda res: self.subdir2.list())
688 d.addCallback(lambda children:
689 self.failUnlessEqual(sorted(children.keys()),
691 d.addCallback(lambda res: self.subdir2.get(u"child"))
692 d.addCallback(lambda child:
693 self.failUnlessEqual(child.get_uri(),
696 # move it back, using new_child_name=
697 d.addCallback(lambda res:
698 self.subdir2.move_child_to(u"child", n, u"newchild"))
699 d.addCallback(lambda res: n.list())
700 d.addCallback(lambda children:
701 self.failUnlessEqual(sorted(children.keys()),
702 sorted([u"newchild", u"newfile",
704 d.addCallback(lambda res: self.subdir2.list())
705 d.addCallback(lambda children:
706 self.failUnlessEqual(sorted(children.keys()), []))
708 # now make sure that we honor overwrite=False
709 d.addCallback(lambda res:
710 self.subdir2.set_uri(u"newchild",
711 other_file_uri, other_file_uri))
713 d.addCallback(lambda res:
714 self.shouldFail(ExistingChildError, "move_child_to-no",
715 "child 'newchild' already exists",
716 n.move_child_to, u"newchild",
719 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
720 d.addCallback(lambda child:
721 self.failUnlessEqual(child.get_uri(),
728 d.addErrback(self.explain_error)
731 def test_create_subdirectory(self):
732 self.basedir = "dirnode/Dirnode/test_create_subdirectory"
734 c = self.g.clients[0]
737 d = c.create_dirnode()
741 fake_file_uri = make_mutable_file_uri()
742 other_file_uri = make_mutable_file_uri()
743 md = {"metakey": "metavalue"}
744 kids = {u"kid1": (nm.create_from_cap(fake_file_uri), {}),
745 u"kid2": (nm.create_from_cap(other_file_uri), md),
747 d = n.create_subdirectory(u"subdir", kids)
749 d = n.get_child_at_path(u"subdir")
750 d.addCallback(lambda sub2: self.failUnlessEqual(sub2.get_uri(),
752 d.addCallback(lambda ign: sub.list())
754 d.addCallback(_check)
755 def _check_kids(kids2):
756 self.failUnlessEqual(sorted(kids.keys()), sorted(kids2.keys()))
757 self.failUnlessEqual(kids2[u"kid2"][1]["metakey"], "metavalue")
758 d.addCallback(_check_kids)
763 class MinimalFakeMutableFile:
764 def get_writekey(self):
767 class Packing(unittest.TestCase):
768 # This is a base32-encoded representation of the directory tree
772 # as represented after being fed to _pack_contents.
773 # We have it here so we can decode it, feed it to
774 # _unpack_contents, and verify that _unpack_contents
777 known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
779 def test_unpack_and_pack_behavior(self):
780 known_tree = b32decode(self.known_tree)
781 nodemaker = NodeMaker(None, None, None,
783 {"k": 3, "n": 10}, None)
784 writecap = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q"
785 filenode = nodemaker.create_from_cap(writecap)
786 node = dirnode.DirectoryNode(filenode, nodemaker, None)
787 children = node._unpack_contents(known_tree)
788 self._check_children(children)
790 packed_children = node._pack_contents(children)
791 children = node._unpack_contents(packed_children)
792 self._check_children(children)
794 def _check_children(self, children):
795 # Are all the expected child nodes there?
796 self.failUnless(children.has_key(u'file1'))
797 self.failUnless(children.has_key(u'file2'))
798 self.failUnless(children.has_key(u'file3'))
800 # Are the metadata for child 3 right?
801 file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
802 file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
803 file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
804 self.failUnlessEqual(file3_metadata, children[u'file3'][1])
805 self.failUnlessEqual(file3_rocap,
806 children[u'file3'][0].get_readonly_uri())
807 self.failUnlessEqual(file3_rwcap,
808 children[u'file3'][0].get_uri())
810 # Are the metadata for child 2 right?
811 file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
812 file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
813 file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
814 self.failUnlessEqual(file2_metadata, children[u'file2'][1])
815 self.failUnlessEqual(file2_rocap,
816 children[u'file2'][0].get_readonly_uri())
817 self.failUnlessEqual(file2_rwcap,
818 children[u'file2'][0].get_uri())
820 # Are the metadata for child 1 right?
821 file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
822 file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
823 file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
824 self.failUnlessEqual(file1_metadata, children[u'file1'][1])
825 self.failUnlessEqual(file1_rocap,
826 children[u'file1'][0].get_readonly_uri())
827 self.failUnlessEqual(file1_rwcap,
828 children[u'file1'][0].get_uri())
830 def _make_kids(self, nm, which):
831 caps = {"imm": "URI:CHK:n7r3m6wmomelk4sep3kw5cvduq:os7ijw5c3maek7pg65e5254k2fzjflavtpejjyhshpsxuqzhcwwq:3:20:14861",
832 "lit": "URI:LIT:n5xgk", # LIT for "one"
833 "write": "URI:SSK:vfvcbdfbszyrsaxchgevhmmlii:euw4iw7bbnkrrwpzuburbhppuxhc3gwxv26f6imekhz7zyw2ojnq",
834 "read": "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q",
835 "dirwrite": "URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq",
836 "dirread": "URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq",
840 kids[unicode(name)] = (nm.create_from_cap(caps[name]), {})
843 def test_deep_immutable(self):
844 nm = NodeMaker(None, None, None, None, None, None, {"k": 3, "n": 10},
846 fn = MinimalFakeMutableFile()
848 kids = self._make_kids(nm, ["imm", "lit", "write", "read",
849 "dirwrite", "dirread"])
850 packed = dirnode.pack_children(fn, kids, deep_immutable=False)
851 self.failUnlessIn("lit", packed)
853 kids = self._make_kids(nm, ["imm", "lit"])
854 packed = dirnode.pack_children(fn, kids, deep_immutable=True)
855 self.failUnlessIn("lit", packed)
857 kids = self._make_kids(nm, ["imm", "lit", "write"])
858 e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
859 dirnode.pack_children,
860 fn, kids, deep_immutable=True)
862 # read-only is not enough: all children must be immutable
863 kids = self._make_kids(nm, ["imm", "lit", "read"])
864 e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
865 dirnode.pack_children,
866 fn, kids, deep_immutable=True)
868 kids = self._make_kids(nm, ["imm", "lit", "dirwrite"])
869 e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
870 dirnode.pack_children,
871 fn, kids, deep_immutable=True)
873 kids = self._make_kids(nm, ["imm", "lit", "dirread"])
874 e = self.failUnlessRaises(dirnode.MustBeDeepImmutable,
875 dirnode.pack_children,
876 fn, kids, deep_immutable=True)
878 class FakeMutableFile:
880 def __init__(self, initial_contents=""):
881 self.data = self._get_initial_contents(initial_contents)
882 counter = FakeMutableFile.counter
883 FakeMutableFile.counter += 1
884 writekey = hashutil.ssk_writekey_hash(str(counter))
885 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
886 self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
888 def _get_initial_contents(self, contents):
889 if isinstance(contents, str):
893 assert callable(contents), "%s should be callable, not %s" % \
894 (contents, type(contents))
895 return contents(self)
898 return self.uri.to_string()
899 def download_best_version(self):
900 return defer.succeed(self.data)
901 def get_writekey(self):
903 def is_readonly(self):
905 def is_mutable(self):
907 def modify(self, modifier):
908 self.data = modifier(self.data, None, True)
909 return defer.succeed(None)
911 class FakeNodeMaker(NodeMaker):
912 def create_mutable_file(self, contents="", keysize=None):
913 return defer.succeed(FakeMutableFile(contents))
915 class FakeClient2(Client):
917 self.nodemaker = FakeNodeMaker(None, None, None,
919 {"k":3,"n":10}, None)
920 def create_node_from_uri(self, rwcap, rocap):
921 return self.nodemaker.create_from_cap(rwcap, rocap)
923 class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
925 self.client = FakeClient2()
926 self.nodemaker = self.client.nodemaker
928 def test_from_future(self):
929 # create a dirnode that contains unknown URI types, and make sure we
930 # tolerate them properly. Since dirnodes aren't allowed to add
931 # unknown node types, we have to be tricky.
932 d = self.nodemaker.create_new_mutable_directory()
933 future_writecap = "x-tahoe-crazy://I_am_from_the_future."
934 future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
935 future_node = UnknownNode(future_writecap, future_readcap)
938 return n.set_node(u"future", future_node)
941 # we should be prohibited from adding an unknown URI to a directory,
942 # since we don't know how to diminish the cap to a readcap (for the
943 # dirnode's rocap slot), and we don't want to accidentally grant
944 # write access to a holder of the dirnode's readcap.
945 d.addCallback(lambda ign:
946 self.shouldFail(CannotPackUnknownNodeError,
948 "cannot pack unknown node as child add",
949 self._node.set_uri, u"add",
950 future_writecap, future_readcap))
951 d.addCallback(lambda ign: self._node.list())
952 def _check(children):
953 self.failUnlessEqual(len(children), 1)
954 (fn, metadata) = children[u"future"]
955 self.failUnless(isinstance(fn, UnknownNode), fn)
956 self.failUnlessEqual(fn.get_uri(), future_writecap)
957 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
958 # but we *should* be allowed to copy this node, because the
959 # UnknownNode contains all the information that was in the
960 # original directory (readcap and writecap), so we're preserving
962 return self._node.set_node(u"copy", fn)
963 d.addCallback(_check)
964 d.addCallback(lambda ign: self._node.list())
965 def _check2(children):
966 self.failUnlessEqual(len(children), 2)
967 (fn, metadata) = children[u"copy"]
968 self.failUnless(isinstance(fn, UnknownNode), fn)
969 self.failUnlessEqual(fn.get_uri(), future_writecap)
970 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
973 class DeepStats(unittest.TestCase):
974 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
975 def test_stats(self):
976 ds = dirnode.DeepStats(None)
977 ds.add("count-files")
978 ds.add("size-immutable-files", 123)
979 ds.histogram("size-files-histogram", 123)
980 ds.max("largest-directory", 444)
983 self.failUnlessEqual(s["count-files"], 1)
984 self.failUnlessEqual(s["size-immutable-files"], 123)
985 self.failUnlessEqual(s["largest-directory"], 444)
986 self.failUnlessEqual(s["count-literal-files"], 0)
988 ds.add("count-files")
989 ds.add("size-immutable-files", 321)
990 ds.histogram("size-files-histogram", 321)
991 ds.max("largest-directory", 2)
994 self.failUnlessEqual(s["count-files"], 2)
995 self.failUnlessEqual(s["size-immutable-files"], 444)
996 self.failUnlessEqual(s["largest-directory"], 444)
997 self.failUnlessEqual(s["count-literal-files"], 0)
998 self.failUnlessEqual(s["size-files-histogram"],
999 [ (101, 316, 1), (317, 1000, 1) ])
1001 ds = dirnode.DeepStats(None)
1002 for i in range(1, 1100):
1003 ds.histogram("size-files-histogram", i)
1004 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
1005 s = ds.get_results()
1006 self.failUnlessEqual(s["size-files-histogram"],
1014 (3162277660169L, 10000000000000L, 1),
1017 class UCWEingMutableFileNode(MutableFileNode):
1018 please_ucwe_after_next_upload = False
1020 def _upload(self, new_contents, servermap):
1021 d = MutableFileNode._upload(self, new_contents, servermap)
1023 if self.please_ucwe_after_next_upload:
1024 self.please_ucwe_after_next_upload = False
1025 raise UncoordinatedWriteError()
1027 d.addCallback(_ucwe)
1030 class UCWEingNodeMaker(NodeMaker):
1031 def _create_mutable(self, cap):
1032 n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder,
1033 self.default_encoding_parameters,
1035 return n.init_from_uri(cap)
1038 class Deleter(GridTestMixin, unittest.TestCase):
1039 timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
1040 def test_retry(self):
1041 # ticket #550, a dirnode.delete which experiences an
1042 # UncoordinatedWriteError will fail with an incorrect "you're
1043 # deleting something which isn't there" NoSuchChildError exception.
1045 # to trigger this, we start by creating a directory with a single
1046 # file in it. Then we create a special dirnode that uses a modified
1047 # MutableFileNode which will raise UncoordinatedWriteError once on
1048 # demand. We then call dirnode.delete, which ought to retry and
1051 self.basedir = self.mktemp()
1053 c0 = self.g.clients[0]
1054 d = c0.create_dirnode()
1055 small = upload.Data("Small enough for a LIT", None)
1056 def _created_dir(dn):
1058 self.root_uri = dn.get_uri()
1059 return dn.add_file(u"file", small)
1060 d.addCallback(_created_dir)
1061 def _do_delete(ignored):
1062 nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder,
1063 c0.get_history(), c0.getServiceNamed("uploader"),
1065 c0.download_cache_dirman,
1066 c0.get_encoding_parameters(),
1068 n = nm.create_from_cap(self.root_uri)
1069 assert n._node.please_ucwe_after_next_upload == False
1070 n._node.please_ucwe_after_next_upload = True
1071 # This should succeed, not raise an exception
1072 return n.delete(u"file")
1073 d.addCallback(_do_delete)
1077 class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
1079 def test_overwrite(self):
1080 # note: This functionality could be tested without actually creating
1081 # several RSA keys. It would be faster without the GridTestMixin: use
1082 # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead
1083 # of dn.add_file, and use a special NodeMaker that creates fake
1085 self.basedir = "dirnode/Adder/test_overwrite"
1087 c = self.g.clients[0]
1088 fileuri = make_chk_file_uri(1234)
1089 filenode = c.nodemaker.create_from_cap(fileuri)
1090 d = c.create_dirnode()
1092 def _create_directory_tree(root_node):
1097 d = root_node.add_file(u'file1', upload.Data("Important Things",
1099 d.addCallback(lambda res:
1100 root_node.add_file(u'file2', upload.Data("Sekrit Codes", None)))
1101 d.addCallback(lambda res:
1102 root_node.create_subdirectory(u"dir1"))
1103 d.addCallback(lambda res: root_node)
1106 d.addCallback(_create_directory_tree)
1108 def _test_adder(root_node):
1109 d = root_node.set_node(u'file1', filenode)
1110 # We've overwritten file1. Let's try it with a directory
1111 d.addCallback(lambda res:
1112 root_node.create_subdirectory(u'dir2'))
1113 d.addCallback(lambda res:
1114 root_node.set_node(u'dir2', filenode))
1115 # We try overwriting a file with a child while also specifying
1116 # overwrite=False. We should receive an ExistingChildError
1118 d.addCallback(lambda res:
1119 self.shouldFail(ExistingChildError, "set_node",
1120 "child 'file1' already exists",
1121 root_node.set_node, u"file1",
1122 filenode, overwrite=False))
1123 # If we try with a directory, we should see the same thing
1124 d.addCallback(lambda res:
1125 self.shouldFail(ExistingChildError, "set_node",
1126 "child 'dir1' already exists",
1127 root_node.set_node, u'dir1', filenode,
1129 d.addCallback(lambda res:
1130 root_node.set_node(u'file1', filenode,
1131 overwrite="only-files"))
1132 d.addCallback(lambda res:
1133 self.shouldFail(ExistingChildError, "set_node",
1134 "child 'dir1' already exists",
1135 root_node.set_node, u'dir1', filenode,
1136 overwrite="only-files"))
1139 d.addCallback(_test_adder)