]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_dirnode.py
dirnode.set_uri/set_children: change signature to take writecap+readcap
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_dirnode.py
1
2 import time
3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from allmydata import uri, dirnode
6 from allmydata.client import Client
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IFileNode, \
9      ExistingChildError, NoSuchChildError, \
10      IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
11 from allmydata.mutable.filenode import MutableFileNode
12 from allmydata.mutable.common import UncoordinatedWriteError
13 from allmydata.util import hashutil, base32
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
16      ErrorMixin
17 from allmydata.test.no_network import GridTestMixin
18 from allmydata.unknown import UnknownNode
19 from allmydata.nodemaker import NodeMaker
20 from base64 import b32decode
21 import common_util as testutil
22
23 class Dirnode(GridTestMixin, unittest.TestCase,
24               testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
25     timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
26
27     def test_basic(self):
28         self.basedir = "dirnode/Dirnode/test_basic"
29         self.set_up_grid()
30         c = self.g.clients[0]
31         d = c.create_dirnode()
32         def _done(res):
33             self.failUnless(isinstance(res, dirnode.DirectoryNode))
34             rep = str(res)
35             self.failUnless("RW" in rep)
36         d.addCallback(_done)
37         return d
38
39     def test_check(self):
40         self.basedir = "dirnode/Dirnode/test_check"
41         self.set_up_grid()
42         c = self.g.clients[0]
43         d = c.create_dirnode()
44         d.addCallback(lambda dn: dn.check(Monitor()))
45         def _done(res):
46             self.failUnless(res.is_healthy())
47         d.addCallback(_done)
48         return d
49
50     def _test_deepcheck_create(self):
51         # create a small tree with a loop, and some non-directories
52         #  root/
53         #  root/subdir/
54         #  root/subdir/file1
55         #  root/subdir/link -> root
56         #  root/rodir
57         c = self.g.clients[0]
58         d = c.create_dirnode()
59         def _created_root(rootnode):
60             self._rootnode = rootnode
61             return rootnode.create_empty_directory(u"subdir")
62         d.addCallback(_created_root)
63         def _created_subdir(subdir):
64             self._subdir = subdir
65             d = subdir.add_file(u"file1", upload.Data("data"*100, None))
66             d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
67             d.addCallback(lambda res: c.create_dirnode())
68             d.addCallback(lambda dn:
69                           self._rootnode.set_uri(u"rodir",
70                                                  dn.get_uri(),
71                                                  dn.get_readonly_uri()))
72             return d
73         d.addCallback(_created_subdir)
74         def _done(res):
75             return self._rootnode
76         d.addCallback(_done)
77         return d
78
79     def test_deepcheck(self):
80         self.basedir = "dirnode/Dirnode/test_deepcheck"
81         self.set_up_grid()
82         d = self._test_deepcheck_create()
83         d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
84         def _check_results(r):
85             self.failUnless(IDeepCheckResults.providedBy(r))
86             c = r.get_counters()
87             self.failUnlessEqual(c,
88                                  {"count-objects-checked": 4,
89                                   "count-objects-healthy": 4,
90                                   "count-objects-unhealthy": 0,
91                                   "count-objects-unrecoverable": 0,
92                                   "count-corrupt-shares": 0,
93                                   })
94             self.failIf(r.get_corrupt_shares())
95             self.failUnlessEqual(len(r.get_all_results()), 4)
96         d.addCallback(_check_results)
97         return d
98
99     def test_deepcheck_and_repair(self):
100         self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair"
101         self.set_up_grid()
102         d = self._test_deepcheck_create()
103         d.addCallback(lambda rootnode:
104                       rootnode.start_deep_check_and_repair().when_done())
105         def _check_results(r):
106             self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
107             c = r.get_counters()
108             self.failUnlessEqual(c,
109                                  {"count-objects-checked": 4,
110                                   "count-objects-healthy-pre-repair": 4,
111                                   "count-objects-unhealthy-pre-repair": 0,
112                                   "count-objects-unrecoverable-pre-repair": 0,
113                                   "count-corrupt-shares-pre-repair": 0,
114                                   "count-objects-healthy-post-repair": 4,
115                                   "count-objects-unhealthy-post-repair": 0,
116                                   "count-objects-unrecoverable-post-repair": 0,
117                                   "count-corrupt-shares-post-repair": 0,
118                                   "count-repairs-attempted": 0,
119                                   "count-repairs-successful": 0,
120                                   "count-repairs-unsuccessful": 0,
121                                   })
122             self.failIf(r.get_corrupt_shares())
123             self.failIf(r.get_remaining_corrupt_shares())
124             self.failUnlessEqual(len(r.get_all_results()), 4)
125         d.addCallback(_check_results)
126         return d
127
128     def _mark_file_bad(self, rootnode):
129         si = rootnode.get_storage_index()
130         self.delete_shares_numbered(rootnode.get_uri(), [0])
131         return rootnode
132
133     def test_deepcheck_problems(self):
134         self.basedir = "dirnode/Dirnode/test_deepcheck_problems"
135         self.set_up_grid()
136         d = self._test_deepcheck_create()
137         d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
138         d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
139         def _check_results(r):
140             c = r.get_counters()
141             self.failUnlessEqual(c,
142                                  {"count-objects-checked": 4,
143                                   "count-objects-healthy": 3,
144                                   "count-objects-unhealthy": 1,
145                                   "count-objects-unrecoverable": 0,
146                                   "count-corrupt-shares": 0,
147                                   })
148             #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
149         d.addCallback(_check_results)
150         return d
151
152     def test_readonly(self):
153         self.basedir = "dirnode/Dirnode/test_readonly"
154         self.set_up_grid()
155         c = self.g.clients[0]
156         nm = c.nodemaker
157         filecap = make_chk_file_uri(1234)
158         filenode = nm.create_from_cap(filecap)
159         uploadable = upload.Data("some data", convergence="some convergence string")
160
161         d = c.create_dirnode()
162         def _created(rw_dn):
163             d2 = rw_dn.set_uri(u"child", filecap, filecap)
164             d2.addCallback(lambda res: rw_dn)
165             return d2
166         d.addCallback(_created)
167
168         def _ready(rw_dn):
169             ro_uri = rw_dn.get_readonly_uri()
170             ro_dn = c.create_node_from_uri(ro_uri)
171             self.failUnless(ro_dn.is_readonly())
172             self.failUnless(ro_dn.is_mutable())
173
174             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
175                             ro_dn.set_uri, u"newchild", filecap, filecap)
176             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
177                             ro_dn.set_node, u"newchild", filenode)
178             self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
179                             ro_dn.set_nodes, [ (u"newchild", filenode) ])
180             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
181                             ro_dn.add_file, u"newchild", uploadable)
182             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
183                             ro_dn.delete, u"child")
184             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
185                             ro_dn.create_empty_directory, u"newchild")
186             self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
187                             ro_dn.set_metadata_for, u"child", {})
188             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
189                             ro_dn.move_child_to, u"child", rw_dn)
190             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
191                             rw_dn.move_child_to, u"child", ro_dn)
192             return ro_dn.list()
193         d.addCallback(_ready)
194         def _listed(children):
195             self.failUnless(u"child" in children)
196         d.addCallback(_listed)
197         return d
198
199     def failUnlessGreaterThan(self, a, b):
200         self.failUnless(a > b, "%r should be > %r" % (a, b))
201
202     def failUnlessGreaterOrEqualThan(self, a, b):
203         self.failUnless(a >= b, "%r should be >= %r" % (a, b))
204
205     def test_create(self):
206         self.basedir = "dirnode/Dirnode/test_create"
207         self.set_up_grid()
208         c = self.g.clients[0]
209
210         self.expected_manifest = []
211         self.expected_verifycaps = set()
212         self.expected_storage_indexes = set()
213
214         d = c.create_dirnode()
215         def _then(n):
216             # /
217             self.rootnode = n
218             self.failUnless(n.is_mutable())
219             u = n.get_uri()
220             self.failUnless(u)
221             self.failUnless(u.startswith("URI:DIR2:"), u)
222             u_ro = n.get_readonly_uri()
223             self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
224             u_v = n.get_verify_cap().to_string()
225             self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
226             u_r = n.get_repair_cap().to_string()
227             self.failUnlessEqual(u_r, u)
228             self.expected_manifest.append( ((), u) )
229             self.expected_verifycaps.add(u_v)
230             si = n.get_storage_index()
231             self.expected_storage_indexes.add(base32.b2a(si))
232             expected_si = n._uri._filenode_uri.storage_index
233             self.failUnlessEqual(si, expected_si)
234
235             d = n.list()
236             d.addCallback(lambda res: self.failUnlessEqual(res, {}))
237             d.addCallback(lambda res: n.has_child(u"missing"))
238             d.addCallback(lambda res: self.failIf(res))
239
240             fake_file_uri = make_mutable_file_uri()
241             other_file_uri = make_mutable_file_uri()
242             m = c.nodemaker.create_from_cap(fake_file_uri)
243             ffu_v = m.get_verify_cap().to_string()
244             self.expected_manifest.append( ((u"child",) , m.get_uri()) )
245             self.expected_verifycaps.add(ffu_v)
246             self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
247             d.addCallback(lambda res: n.set_uri(u"child",
248                                                 fake_file_uri, fake_file_uri))
249             d.addCallback(lambda res:
250                           self.shouldFail(ExistingChildError, "set_uri-no",
251                                           "child 'child' already exists",
252                                           n.set_uri, u"child",
253                                           other_file_uri, other_file_uri,
254                                           overwrite=False))
255             # /
256             # /child = mutable
257
258             d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
259
260             # /
261             # /child = mutable
262             # /subdir = directory
263             def _created(subdir):
264                 self.failUnless(isinstance(subdir, dirnode.DirectoryNode))
265                 self.subdir = subdir
266                 new_v = subdir.get_verify_cap().to_string()
267                 assert isinstance(new_v, str)
268                 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
269                 self.expected_verifycaps.add(new_v)
270                 si = subdir.get_storage_index()
271                 self.expected_storage_indexes.add(base32.b2a(si))
272             d.addCallback(_created)
273
274             d.addCallback(lambda res:
275                           self.shouldFail(ExistingChildError, "mkdir-no",
276                                           "child 'subdir' already exists",
277                                           n.create_empty_directory, u"subdir",
278                                           overwrite=False))
279
280             d.addCallback(lambda res: n.list())
281             d.addCallback(lambda children:
282                           self.failUnlessEqual(sorted(children.keys()),
283                                                sorted([u"child", u"subdir"])))
284
285             d.addCallback(lambda res: n.start_deep_stats().when_done())
286             def _check_deepstats(stats):
287                 self.failUnless(isinstance(stats, dict))
288                 expected = {"count-immutable-files": 0,
289                             "count-mutable-files": 1,
290                             "count-literal-files": 0,
291                             "count-files": 1,
292                             "count-directories": 2,
293                             "size-immutable-files": 0,
294                             "size-literal-files": 0,
295                             #"size-directories": 616, # varies
296                             #"largest-directory": 616,
297                             "largest-directory-children": 2,
298                             "largest-immutable-file": 0,
299                             }
300                 for k,v in expected.iteritems():
301                     self.failUnlessEqual(stats[k], v,
302                                          "stats[%s] was %s, not %s" %
303                                          (k, stats[k], v))
304                 self.failUnless(stats["size-directories"] > 500,
305                                 stats["size-directories"])
306                 self.failUnless(stats["largest-directory"] > 500,
307                                 stats["largest-directory"])
308                 self.failUnlessEqual(stats["size-files-histogram"], [])
309             d.addCallback(_check_deepstats)
310
311             d.addCallback(lambda res: n.build_manifest().when_done())
312             def _check_manifest(res):
313                 manifest = res["manifest"]
314                 self.failUnlessEqual(sorted(manifest),
315                                      sorted(self.expected_manifest))
316                 stats = res["stats"]
317                 _check_deepstats(stats)
318                 self.failUnlessEqual(self.expected_verifycaps,
319                                      res["verifycaps"])
320                 self.failUnlessEqual(self.expected_storage_indexes,
321                                      res["storage-index"])
322             d.addCallback(_check_manifest)
323
324             def _add_subsubdir(res):
325                 return self.subdir.create_empty_directory(u"subsubdir")
326             d.addCallback(_add_subsubdir)
327             # /
328             # /child = mutable
329             # /subdir = directory
330             # /subdir/subsubdir = directory
331             d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
332             d.addCallback(lambda subsubdir:
333                           self.failUnless(isinstance(subsubdir,
334                                                      dirnode.DirectoryNode)))
335             d.addCallback(lambda res: n.get_child_at_path(u""))
336             d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
337                                                            n.get_uri()))
338
339             d.addCallback(lambda res: n.get_metadata_for(u"child"))
340             d.addCallback(lambda metadata:
341                           self.failUnlessEqual(set(metadata.keys()),
342                                                set(["tahoe", "ctime", "mtime"])))
343
344             d.addCallback(lambda res:
345                           self.shouldFail(NoSuchChildError, "gcamap-no",
346                                           "nope",
347                                           n.get_child_and_metadata_at_path,
348                                           u"subdir/nope"))
349             d.addCallback(lambda res:
350                           n.get_child_and_metadata_at_path(u""))
351             def _check_child_and_metadata1(res):
352                 child, metadata = res
353                 self.failUnless(isinstance(child, dirnode.DirectoryNode))
354                 # edge-metadata needs at least one path segment
355                 self.failUnlessEqual(sorted(metadata.keys()), [])
356             d.addCallback(_check_child_and_metadata1)
357             d.addCallback(lambda res:
358                           n.get_child_and_metadata_at_path(u"child"))
359
360             def _check_child_and_metadata2(res):
361                 child, metadata = res
362                 self.failUnlessEqual(child.get_uri(),
363                                      fake_file_uri)
364                 self.failUnlessEqual(set(metadata.keys()),
365                                      set(["tahoe", "ctime", "mtime"]))
366             d.addCallback(_check_child_and_metadata2)
367
368             d.addCallback(lambda res:
369                           n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
370             def _check_child_and_metadata3(res):
371                 child, metadata = res
372                 self.failUnless(isinstance(child, dirnode.DirectoryNode))
373                 self.failUnlessEqual(set(metadata.keys()),
374                                      set(["tahoe", "ctime", "mtime"]))
375             d.addCallback(_check_child_and_metadata3)
376
377             # set_uri + metadata
378             # it should be possible to add a child without any metadata
379             d.addCallback(lambda res: n.set_uri(u"c2",
380                                                 fake_file_uri, fake_file_uri,
381                                                 {}))
382             d.addCallback(lambda res: n.get_metadata_for(u"c2"))
383             d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
384
385             # You can't override the link timestamps.
386             d.addCallback(lambda res: n.set_uri(u"c2",
387                                                 fake_file_uri, fake_file_uri,
388                                                 { 'tahoe': {'linkcrtime': "bogus"}}))
389             d.addCallback(lambda res: n.get_metadata_for(u"c2"))
390             def _has_good_linkcrtime(metadata):
391                 self.failUnless(metadata.has_key('tahoe'))
392                 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
393                 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
394             d.addCallback(_has_good_linkcrtime)
395
396             # if we don't set any defaults, the child should get timestamps
397             d.addCallback(lambda res: n.set_uri(u"c3",
398                                                 fake_file_uri, fake_file_uri))
399             d.addCallback(lambda res: n.get_metadata_for(u"c3"))
400             d.addCallback(lambda metadata:
401                           self.failUnlessEqual(set(metadata.keys()),
402                                                set(["tahoe", "ctime", "mtime"])))
403
404             # or we can add specific metadata at set_uri() time, which
405             # overrides the timestamps
406             d.addCallback(lambda res: n.set_uri(u"c4",
407                                                 fake_file_uri, fake_file_uri,
408                                                 {"key": "value"}))
409             d.addCallback(lambda res: n.get_metadata_for(u"c4"))
410             d.addCallback(lambda metadata:
411                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
412                                               (metadata['key'] == "value"), metadata))
413
414             d.addCallback(lambda res: n.delete(u"c2"))
415             d.addCallback(lambda res: n.delete(u"c3"))
416             d.addCallback(lambda res: n.delete(u"c4"))
417
418             # set_node + metadata
419             # it should be possible to add a child without any metadata
420             d.addCallback(lambda res: n.set_node(u"d2", n, {}))
421             d.addCallback(lambda res: c.create_dirnode())
422             d.addCallback(lambda n2:
423                           self.shouldFail(ExistingChildError, "set_node-no",
424                                           "child 'd2' already exists",
425                                           n.set_node, u"d2", n2,
426                                           overwrite=False))
427             d.addCallback(lambda res: n.get_metadata_for(u"d2"))
428             d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
429
430             # if we don't set any defaults, the child should get timestamps
431             d.addCallback(lambda res: n.set_node(u"d3", n))
432             d.addCallback(lambda res: n.get_metadata_for(u"d3"))
433             d.addCallback(lambda metadata:
434                           self.failUnlessEqual(set(metadata.keys()),
435                                                set(["tahoe", "ctime", "mtime"])))
436
437             # or we can add specific metadata at set_node() time, which
438             # overrides the timestamps
439             d.addCallback(lambda res: n.set_node(u"d4", n,
440                                                 {"key": "value"}))
441             d.addCallback(lambda res: n.get_metadata_for(u"d4"))
442             d.addCallback(lambda metadata:
443                           self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
444                                           (metadata['key'] == "value"), metadata))
445
446             d.addCallback(lambda res: n.delete(u"d2"))
447             d.addCallback(lambda res: n.delete(u"d3"))
448             d.addCallback(lambda res: n.delete(u"d4"))
449
450             # metadata through set_children()
451             d.addCallback(lambda res:
452                           n.set_children([
453                               (u"e1", fake_file_uri, fake_file_uri),
454                               (u"e2", fake_file_uri, fake_file_uri, {}),
455                               (u"e3", fake_file_uri, fake_file_uri,
456                                {"key": "value"}),
457                               ]))
458             d.addCallback(lambda res:
459                           self.shouldFail(ExistingChildError, "set_children-no",
460                                           "child 'e1' already exists",
461                                           n.set_children,
462                                           [ (u"e1", other_file_uri, other_file_uri),
463                                             (u"new", other_file_uri, other_file_uri), ],
464                                           overwrite=False))
465             # and 'new' should not have been created
466             d.addCallback(lambda res: n.list())
467             d.addCallback(lambda children: self.failIf(u"new" in children))
468             d.addCallback(lambda res: n.get_metadata_for(u"e1"))
469             d.addCallback(lambda metadata:
470                           self.failUnlessEqual(set(metadata.keys()),
471                                                set(["tahoe", "ctime", "mtime"])))
472             d.addCallback(lambda res: n.get_metadata_for(u"e2"))
473             d.addCallback(lambda metadata:
474                           self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
475             d.addCallback(lambda res: n.get_metadata_for(u"e3"))
476             d.addCallback(lambda metadata:
477                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
478                                               and (metadata['key'] == "value"), metadata))
479
480             d.addCallback(lambda res: n.delete(u"e1"))
481             d.addCallback(lambda res: n.delete(u"e2"))
482             d.addCallback(lambda res: n.delete(u"e3"))
483
484             # metadata through set_nodes()
485             d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
486                                                     (u"f2", n, {}),
487                                                     (u"f3", n,
488                                                      {"key": "value"}),
489                                                     ]))
490             d.addCallback(lambda res:
491                           self.shouldFail(ExistingChildError, "set_nodes-no",
492                                           "child 'f1' already exists",
493                                           n.set_nodes,
494                                           [ (u"f1", n),
495                                             (u"new", n), ],
496                                           overwrite=False))
497             # and 'new' should not have been created
498             d.addCallback(lambda res: n.list())
499             d.addCallback(lambda children: self.failIf(u"new" in children))
500             d.addCallback(lambda res: n.get_metadata_for(u"f1"))
501             d.addCallback(lambda metadata:
502                           self.failUnlessEqual(set(metadata.keys()),
503                                                set(["tahoe", "ctime", "mtime"])))
504             d.addCallback(lambda res: n.get_metadata_for(u"f2"))
505             d.addCallback(
506                 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
507             d.addCallback(lambda res: n.get_metadata_for(u"f3"))
508             d.addCallback(lambda metadata:
509                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
510                                               (metadata['key'] == "value"), metadata))
511
512             d.addCallback(lambda res: n.delete(u"f1"))
513             d.addCallback(lambda res: n.delete(u"f2"))
514             d.addCallback(lambda res: n.delete(u"f3"))
515
516
517             d.addCallback(lambda res:
518                           n.set_metadata_for(u"child",
519                                              {"tags": ["web2.0-compatible"]}))
520             d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
521             d.addCallback(lambda metadata:
522                           self.failUnlessEqual(metadata,
523                                                {"tags": ["web2.0-compatible"]}))
524
525             def _start(res):
526                 self._start_timestamp = time.time()
527             d.addCallback(_start)
528             # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
529             # floats to hundredeths (it uses str(num) instead of repr(num)).
530             # simplejson-1.7.3 does not have this bug. To prevent this bug
531             # from causing the test to fail, stall for more than a few
532             # hundrededths of a second.
533             d.addCallback(self.stall, 0.1)
534             d.addCallback(lambda res: n.add_file(u"timestamps",
535                                                  upload.Data("stamp me", convergence="some convergence string")))
536             d.addCallback(self.stall, 0.1)
537             def _stop(res):
538                 self._stop_timestamp = time.time()
539             d.addCallback(_stop)
540
541             d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
542             def _check_timestamp1(metadata):
543                 self.failUnless("ctime" in metadata)
544                 self.failUnless("mtime" in metadata)
545                 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
546                                                   self._start_timestamp)
547                 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
548                                                   metadata["ctime"])
549                 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
550                                                   self._start_timestamp)
551                 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
552                                                   metadata["mtime"])
553                 # Our current timestamp rules say that replacing an existing
554                 # child should preserve the 'ctime' but update the mtime
555                 self._old_ctime = metadata["ctime"]
556                 self._old_mtime = metadata["mtime"]
557             d.addCallback(_check_timestamp1)
558             d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
559             d.addCallback(lambda res: n.set_node(u"timestamps", n))
560             d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
561             def _check_timestamp2(metadata):
562                 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
563                                      "%s != %s" % (metadata["ctime"],
564                                                    self._old_ctime))
565                 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
566                 return n.delete(u"timestamps")
567             d.addCallback(_check_timestamp2)
568
569             # also make sure we can add/update timestamps on a
570             # previously-existing child that didn't have any, since there are
571             # a lot of 0.7.0-generated edges around out there
572             d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
573             d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
574             d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
575             d.addCallback(lambda metadata:
576                           self.failUnlessEqual(set(metadata.keys()),
577                                                set(["tahoe", "ctime", "mtime"])))
578             d.addCallback(lambda res: n.delete(u"no_timestamps"))
579
580             d.addCallback(lambda res: n.delete(u"subdir"))
581             d.addCallback(lambda old_child:
582                           self.failUnlessEqual(old_child.get_uri(),
583                                                self.subdir.get_uri()))
584
585             d.addCallback(lambda res: n.list())
586             d.addCallback(lambda children:
587                           self.failUnlessEqual(sorted(children.keys()),
588                                                sorted([u"child"])))
589
590             uploadable1 = upload.Data("some data", convergence="converge")
591             d.addCallback(lambda res: n.add_file(u"newfile", uploadable1))
592             d.addCallback(lambda newnode:
593                           self.failUnless(IFileNode.providedBy(newnode)))
594             uploadable2 = upload.Data("some data", convergence="stuff")
595             d.addCallback(lambda res:
596                           self.shouldFail(ExistingChildError, "add_file-no",
597                                           "child 'newfile' already exists",
598                                           n.add_file, u"newfile",
599                                           uploadable2,
600                                           overwrite=False))
601             d.addCallback(lambda res: n.list())
602             d.addCallback(lambda children:
603                           self.failUnlessEqual(sorted(children.keys()),
604                                                sorted([u"child", u"newfile"])))
605             d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
606             d.addCallback(lambda metadata:
607                           self.failUnlessEqual(set(metadata.keys()),
608                                                set(["tahoe", "ctime", "mtime"])))
609
610             uploadable3 = upload.Data("some data", convergence="converge")
611             d.addCallback(lambda res: n.add_file(u"newfile-metadata",
612                                                  uploadable3,
613                                                  {"key": "value"}))
614             d.addCallback(lambda newnode:
615                           self.failUnless(IFileNode.providedBy(newnode)))
616             d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
617             d.addCallback(lambda metadata:
618                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
619                                               (metadata['key'] == "value"), metadata))
620             d.addCallback(lambda res: n.delete(u"newfile-metadata"))
621
622             d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
623             def _created2(subdir2):
624                 self.subdir2 = subdir2
625                 # put something in the way, to make sure it gets overwritten
626                 return subdir2.add_file(u"child", upload.Data("overwrite me",
627                                                               "converge"))
628             d.addCallback(_created2)
629
630             d.addCallback(lambda res:
631                           n.move_child_to(u"child", self.subdir2))
632             d.addCallback(lambda res: n.list())
633             d.addCallback(lambda children:
634                           self.failUnlessEqual(sorted(children.keys()),
635                                                sorted([u"newfile", u"subdir2"])))
636             d.addCallback(lambda res: self.subdir2.list())
637             d.addCallback(lambda children:
638                           self.failUnlessEqual(sorted(children.keys()),
639                                                sorted([u"child"])))
640             d.addCallback(lambda res: self.subdir2.get(u"child"))
641             d.addCallback(lambda child:
642                           self.failUnlessEqual(child.get_uri(),
643                                                fake_file_uri))
644
645             # move it back, using new_child_name=
646             d.addCallback(lambda res:
647                           self.subdir2.move_child_to(u"child", n, u"newchild"))
648             d.addCallback(lambda res: n.list())
649             d.addCallback(lambda children:
650                           self.failUnlessEqual(sorted(children.keys()),
651                                                sorted([u"newchild", u"newfile",
652                                                        u"subdir2"])))
653             d.addCallback(lambda res: self.subdir2.list())
654             d.addCallback(lambda children:
655                           self.failUnlessEqual(sorted(children.keys()), []))
656
657             # now make sure that we honor overwrite=False
658             d.addCallback(lambda res:
659                           self.subdir2.set_uri(u"newchild",
660                                                other_file_uri, other_file_uri))
661
662             d.addCallback(lambda res:
663                           self.shouldFail(ExistingChildError, "move_child_to-no",
664                                           "child 'newchild' already exists",
665                                           n.move_child_to, u"newchild",
666                                           self.subdir2,
667                                           overwrite=False))
668             d.addCallback(lambda res: self.subdir2.get(u"newchild"))
669             d.addCallback(lambda child:
670                           self.failUnlessEqual(child.get_uri(),
671                                                other_file_uri))
672
673             return d
674
675         d.addCallback(_then)
676
677         d.addErrback(self.explain_error)
678         return d
679
680 class Packing(unittest.TestCase):
681     # This is a base32-encoded representation of the directory tree
682     # root/file1
683     # root/file2
684     # root/file3
685     # as represented after being fed to _pack_contents.
686     # We have it here so we can decode it, feed it to
687     # _unpack_contents, and verify that _unpack_contents
688     # works correctly.
689
690     known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
691
692     def test_unpack_and_pack_behavior(self):
693         known_tree = b32decode(self.known_tree)
694         nodemaker = NodeMaker(None, None, None,
695                               None, None, None,
696                               {"k": 3, "n": 10}, None)
697         writecap = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q"
698         filenode = nodemaker.create_from_cap(writecap)
699         node = dirnode.DirectoryNode(filenode, nodemaker, None)
700         children = node._unpack_contents(known_tree)
701         self._check_children(children)
702
703         packed_children = node._pack_contents(children)
704         children = node._unpack_contents(packed_children)
705         self._check_children(children)
706
707     def _check_children(self, children):
708         # Are all the expected child nodes there?
709         self.failUnless(children.has_key(u'file1'))
710         self.failUnless(children.has_key(u'file2'))
711         self.failUnless(children.has_key(u'file3'))
712
713         # Are the metadata for child 3 right?
714         file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
715         file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
716         file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
717         self.failUnlessEqual(file3_metadata, children[u'file3'][1])
718         self.failUnlessEqual(file3_rocap,
719                              children[u'file3'][0].get_readonly_uri())
720         self.failUnlessEqual(file3_rwcap,
721                              children[u'file3'][0].get_uri())
722
723         # Are the metadata for child 2 right?
724         file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
725         file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
726         file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
727         self.failUnlessEqual(file2_metadata, children[u'file2'][1])
728         self.failUnlessEqual(file2_rocap,
729                              children[u'file2'][0].get_readonly_uri())
730         self.failUnlessEqual(file2_rwcap,
731                              children[u'file2'][0].get_uri())
732
733         # Are the metadata for child 1 right?
734         file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
735         file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
736         file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
737         self.failUnlessEqual(file1_metadata, children[u'file1'][1])
738         self.failUnlessEqual(file1_rocap,
739                              children[u'file1'][0].get_readonly_uri())
740         self.failUnlessEqual(file1_rwcap,
741                              children[u'file1'][0].get_uri())
742
743     def test_caching_dict(self):
744         d = dirnode.CachingDict()
745         d.set_both_items("test", "test2", ("test3", "test4"))
746         cached, value = d.get_both_items("test")
747
748         self.failUnlessEqual(cached, "test2")
749         self.failUnlessEqual(value, ("test3", "test4"))
750
751         d['test'] = ("test3", "test2")
752
753         cached, value = d.get_both_items("test")
754
755         self.failUnlessEqual(cached, None)
756         self.failUnlessEqual(value, ("test3", "test2"))
757
758 class FakeMutableFile:
759     counter = 0
760     def __init__(self, initial_contents=""):
761         self.data = initial_contents
762         counter = FakeMutableFile.counter
763         FakeMutableFile.counter += 1
764         writekey = hashutil.ssk_writekey_hash(str(counter))
765         fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
766         self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
767     def get_uri(self):
768         return self.uri.to_string()
769     def download_best_version(self):
770         return defer.succeed(self.data)
771     def get_writekey(self):
772         return "writekey"
773     def is_readonly(self):
774         return False
775     def is_mutable(self):
776         return True
777     def modify(self, modifier):
778         self.data = modifier(self.data, None, True)
779         return defer.succeed(None)
780
781 class FakeNodeMaker(NodeMaker):
782     def create_mutable_file(self, contents="", keysize=None):
783         return defer.succeed(FakeMutableFile(contents))
784
785 class FakeClient2(Client):
786     def __init__(self):
787         self.nodemaker = FakeNodeMaker(None, None, None,
788                                        None, None, None,
789                                        {"k":3,"n":10}, None)
790     def create_node_from_uri(self, rwcap, rocap):
791         return self.nodemaker.create_from_cap(rwcap, rocap)
792
793 class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
794     def setUp(self):
795         self.client = FakeClient2()
796         self.nodemaker = self.client.nodemaker
797
798     def test_from_future(self):
799         # create a dirnode that contains unknown URI types, and make sure we
800         # tolerate them properly. Since dirnodes aren't allowed to add
801         # unknown node types, we have to be tricky.
802         d = self.nodemaker.create_new_mutable_directory()
803         future_writecap = "x-tahoe-crazy://I_am_from_the_future."
804         future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
805         future_node = UnknownNode(future_writecap, future_readcap)
806         def _then(n):
807             self._node = n
808             return n.set_node(u"future", future_node)
809         d.addCallback(_then)
810
811         # we should be prohibited from adding an unknown URI to a directory,
812         # since we don't know how to diminish the cap to a readcap (for the
813         # dirnode's rocap slot), and we don't want to accidentally grant
814         # write access to a holder of the dirnode's readcap.
815         d.addCallback(lambda ign:
816              self.shouldFail(CannotPackUnknownNodeError,
817                              "copy unknown",
818                              "cannot pack unknown node as child add",
819                              self._node.set_uri, u"add",
820                              future_writecap, future_readcap))
821         d.addCallback(lambda ign: self._node.list())
822         def _check(children):
823             self.failUnlessEqual(len(children), 1)
824             (fn, metadata) = children[u"future"]
825             self.failUnless(isinstance(fn, UnknownNode), fn)
826             self.failUnlessEqual(fn.get_uri(), future_writecap)
827             self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
828             # but we *should* be allowed to copy this node, because the
829             # UnknownNode contains all the information that was in the
830             # original directory (readcap and writecap), so we're preserving
831             # everything.
832             return self._node.set_node(u"copy", fn)
833         d.addCallback(_check)
834         d.addCallback(lambda ign: self._node.list())
835         def _check2(children):
836             self.failUnlessEqual(len(children), 2)
837             (fn, metadata) = children[u"copy"]
838             self.failUnless(isinstance(fn, UnknownNode), fn)
839             self.failUnlessEqual(fn.get_uri(), future_writecap)
840             self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
841         return d
842
843 class DeepStats(unittest.TestCase):
844     timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
845     def test_stats(self):
846         ds = dirnode.DeepStats(None)
847         ds.add("count-files")
848         ds.add("size-immutable-files", 123)
849         ds.histogram("size-files-histogram", 123)
850         ds.max("largest-directory", 444)
851
852         s = ds.get_results()
853         self.failUnlessEqual(s["count-files"], 1)
854         self.failUnlessEqual(s["size-immutable-files"], 123)
855         self.failUnlessEqual(s["largest-directory"], 444)
856         self.failUnlessEqual(s["count-literal-files"], 0)
857
858         ds.add("count-files")
859         ds.add("size-immutable-files", 321)
860         ds.histogram("size-files-histogram", 321)
861         ds.max("largest-directory", 2)
862
863         s = ds.get_results()
864         self.failUnlessEqual(s["count-files"], 2)
865         self.failUnlessEqual(s["size-immutable-files"], 444)
866         self.failUnlessEqual(s["largest-directory"], 444)
867         self.failUnlessEqual(s["count-literal-files"], 0)
868         self.failUnlessEqual(s["size-files-histogram"],
869                              [ (101, 316, 1), (317, 1000, 1) ])
870
871         ds = dirnode.DeepStats(None)
872         for i in range(1, 1100):
873             ds.histogram("size-files-histogram", i)
874         ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
875         s = ds.get_results()
876         self.failUnlessEqual(s["size-files-histogram"],
877                              [ (1, 3, 3),
878                                (4, 10, 7),
879                                (11, 31, 21),
880                                (32, 100, 69),
881                                (101, 316, 216),
882                                (317, 1000, 684),
883                                (1001, 3162, 99),
884                                (3162277660169L, 10000000000000L, 1),
885                                ])
886
887 class UCWEingMutableFileNode(MutableFileNode):
888     please_ucwe_after_next_upload = False
889
890     def _upload(self, new_contents, servermap):
891         d = MutableFileNode._upload(self, new_contents, servermap)
892         def _ucwe(res):
893             if self.please_ucwe_after_next_upload:
894                 self.please_ucwe_after_next_upload = False
895                 raise UncoordinatedWriteError()
896             return res
897         d.addCallback(_ucwe)
898         return d
899
900 class UCWEingNodeMaker(NodeMaker):
901     def _create_mutable(self, cap):
902         n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder,
903                                    self.default_encoding_parameters,
904                                    self.history)
905         return n.init_from_uri(cap)
906
907
908 class Deleter(GridTestMixin, unittest.TestCase):
909     timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
910     def test_retry(self):
911         # ticket #550, a dirnode.delete which experiences an
912         # UncoordinatedWriteError will fail with an incorrect "you're
913         # deleting something which isn't there" NoSuchChildError exception.
914
915         # to trigger this, we start by creating a directory with a single
916         # file in it. Then we create a special dirnode that uses a modified
917         # MutableFileNode which will raise UncoordinatedWriteError once on
918         # demand. We then call dirnode.delete, which ought to retry and
919         # succeed.
920
921         self.basedir = self.mktemp()
922         self.set_up_grid()
923         c0 = self.g.clients[0]
924         d = c0.create_dirnode()
925         small = upload.Data("Small enough for a LIT", None)
926         def _created_dir(dn):
927             self.root = dn
928             self.root_uri = dn.get_uri()
929             return dn.add_file(u"file", small)
930         d.addCallback(_created_dir)
931         def _do_delete(ignored):
932             nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder,
933                                   c0.get_history(), c0.getServiceNamed("uploader"),
934                                   c0.downloader,
935                                   c0.download_cache_dirman,
936                                   c0.get_encoding_parameters(),
937                                   c0._key_generator)
938             n = nm.create_from_cap(self.root_uri)
939             assert n._node.please_ucwe_after_next_upload == False
940             n._node.please_ucwe_after_next_upload = True
941             # This should succeed, not raise an exception
942             return n.delete(u"file")
943         d.addCallback(_do_delete)
944
945         return d
946
947 class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
948
949     def test_overwrite(self):
950         # note: This functionality could be tested without actually creating
951         # several RSA keys. It would be faster without the GridTestMixin: use
952         # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead
953         # of dn.add_file, and use a special NodeMaker that creates fake
954         # mutable files.
955         self.basedir = "dirnode/Adder/test_overwrite"
956         self.set_up_grid()
957         c = self.g.clients[0]
958         fileuri = make_chk_file_uri(1234)
959         filenode = c.nodemaker.create_from_cap(fileuri)
960         d = c.create_dirnode()
961
962         def _create_directory_tree(root_node):
963             # Build
964             # root/file1
965             # root/file2
966             # root/dir1
967             d = root_node.add_file(u'file1', upload.Data("Important Things",
968                 None))
969             d.addCallback(lambda res:
970                 root_node.add_file(u'file2', upload.Data("Sekrit Codes", None)))
971             d.addCallback(lambda res:
972                 root_node.create_empty_directory(u"dir1"))
973             d.addCallback(lambda res: root_node)
974             return d
975
976         d.addCallback(_create_directory_tree)
977
978         def _test_adder(root_node):
979             d = root_node.set_node(u'file1', filenode)
980             # We've overwritten file1. Let's try it with a directory
981             d.addCallback(lambda res:
982                 root_node.create_empty_directory(u'dir2'))
983             d.addCallback(lambda res:
984                 root_node.set_node(u'dir2', filenode))
985             # We try overwriting a file with a child while also specifying
986             # overwrite=False. We should receive an ExistingChildError
987             # when we do this.
988             d.addCallback(lambda res:
989                 self.shouldFail(ExistingChildError, "set_node",
990                                 "child 'file1' already exists",
991                                root_node.set_node, u"file1",
992                                filenode, overwrite=False))
993             # If we try with a directory, we should see the same thing
994             d.addCallback(lambda res:
995                 self.shouldFail(ExistingChildError, "set_node",
996                                 "child 'dir1' already exists",
997                                 root_node.set_node, u'dir1', filenode,
998                                 overwrite=False))
999             d.addCallback(lambda res:
1000                  root_node.set_node(u'file1', filenode,
1001                                     overwrite="only-files"))
1002             d.addCallback(lambda res:
1003                  self.shouldFail(ExistingChildError, "set_node",
1004                                 "child 'dir1' already exists",
1005                                 root_node.set_node, u'dir1', filenode,
1006                                 overwrite="only-files"))
1007             return d
1008
1009         d.addCallback(_test_adder)
1010         return d