]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_dirnode.py
immutable.Downloader: pass StorageBroker to constructor, stop being a Service
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_dirnode.py
1
2 import time
3 from twisted.trial import unittest
4 from twisted.internet import defer
5 from allmydata import uri, dirnode
6 from allmydata.client import Client
7 from allmydata.immutable import upload
8 from allmydata.interfaces import IFileNode, \
9      ExistingChildError, NoSuchChildError, \
10      IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
11 from allmydata.mutable.filenode import MutableFileNode
12 from allmydata.mutable.common import UncoordinatedWriteError
13 from allmydata.util import hashutil, base32
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
16      ErrorMixin
17 from allmydata.test.no_network import GridTestMixin
18 from allmydata.unknown import UnknownNode
19 from allmydata.nodemaker import NodeMaker
20 from base64 import b32decode
21 import common_util as testutil
22
23 class Dirnode(GridTestMixin, unittest.TestCase,
24               testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
25     timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
26
27     def test_basic(self):
28         self.basedir = "dirnode/Dirnode/test_basic"
29         self.set_up_grid()
30         c = self.g.clients[0]
31         d = c.create_empty_dirnode()
32         def _done(res):
33             self.failUnless(isinstance(res, dirnode.DirectoryNode))
34             rep = str(res)
35             self.failUnless("RW" in rep)
36         d.addCallback(_done)
37         return d
38
39     def test_check(self):
40         self.basedir = "dirnode/Dirnode/test_check"
41         self.set_up_grid()
42         c = self.g.clients[0]
43         d = c.create_empty_dirnode()
44         d.addCallback(lambda dn: dn.check(Monitor()))
45         def _done(res):
46             self.failUnless(res.is_healthy())
47         d.addCallback(_done)
48         return d
49
50     def _test_deepcheck_create(self):
51         # create a small tree with a loop, and some non-directories
52         #  root/
53         #  root/subdir/
54         #  root/subdir/file1
55         #  root/subdir/link -> root
56         #  root/rodir
57         c = self.g.clients[0]
58         d = c.create_empty_dirnode()
59         def _created_root(rootnode):
60             self._rootnode = rootnode
61             return rootnode.create_empty_directory(u"subdir")
62         d.addCallback(_created_root)
63         def _created_subdir(subdir):
64             self._subdir = subdir
65             d = subdir.add_file(u"file1", upload.Data("data"*100, None))
66             d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
67             d.addCallback(lambda res: c.create_empty_dirnode())
68             d.addCallback(lambda dn:
69                           self._rootnode.set_uri(u"rodir",
70                                                  dn.get_readonly_uri()))
71             return d
72         d.addCallback(_created_subdir)
73         def _done(res):
74             return self._rootnode
75         d.addCallback(_done)
76         return d
77
78     def test_deepcheck(self):
79         self.basedir = "dirnode/Dirnode/test_deepcheck"
80         self.set_up_grid()
81         d = self._test_deepcheck_create()
82         d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
83         def _check_results(r):
84             self.failUnless(IDeepCheckResults.providedBy(r))
85             c = r.get_counters()
86             self.failUnlessEqual(c,
87                                  {"count-objects-checked": 4,
88                                   "count-objects-healthy": 4,
89                                   "count-objects-unhealthy": 0,
90                                   "count-objects-unrecoverable": 0,
91                                   "count-corrupt-shares": 0,
92                                   })
93             self.failIf(r.get_corrupt_shares())
94             self.failUnlessEqual(len(r.get_all_results()), 4)
95         d.addCallback(_check_results)
96         return d
97
98     def test_deepcheck_and_repair(self):
99         self.basedir = "dirnode/Dirnode/test_deepcheck_and_repair"
100         self.set_up_grid()
101         d = self._test_deepcheck_create()
102         d.addCallback(lambda rootnode:
103                       rootnode.start_deep_check_and_repair().when_done())
104         def _check_results(r):
105             self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
106             c = r.get_counters()
107             self.failUnlessEqual(c,
108                                  {"count-objects-checked": 4,
109                                   "count-objects-healthy-pre-repair": 4,
110                                   "count-objects-unhealthy-pre-repair": 0,
111                                   "count-objects-unrecoverable-pre-repair": 0,
112                                   "count-corrupt-shares-pre-repair": 0,
113                                   "count-objects-healthy-post-repair": 4,
114                                   "count-objects-unhealthy-post-repair": 0,
115                                   "count-objects-unrecoverable-post-repair": 0,
116                                   "count-corrupt-shares-post-repair": 0,
117                                   "count-repairs-attempted": 0,
118                                   "count-repairs-successful": 0,
119                                   "count-repairs-unsuccessful": 0,
120                                   })
121             self.failIf(r.get_corrupt_shares())
122             self.failIf(r.get_remaining_corrupt_shares())
123             self.failUnlessEqual(len(r.get_all_results()), 4)
124         d.addCallback(_check_results)
125         return d
126
127     def _mark_file_bad(self, rootnode):
128         si = rootnode.get_storage_index()
129         self.delete_shares_numbered(rootnode.get_uri(), [0])
130         return rootnode
131
132     def test_deepcheck_problems(self):
133         self.basedir = "dirnode/Dirnode/test_deepcheck_problems"
134         self.set_up_grid()
135         d = self._test_deepcheck_create()
136         d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
137         d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
138         def _check_results(r):
139             c = r.get_counters()
140             self.failUnlessEqual(c,
141                                  {"count-objects-checked": 4,
142                                   "count-objects-healthy": 3,
143                                   "count-objects-unhealthy": 1,
144                                   "count-objects-unrecoverable": 0,
145                                   "count-corrupt-shares": 0,
146                                   })
147             #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
148         d.addCallback(_check_results)
149         return d
150
151     def test_readonly(self):
152         self.basedir = "dirnode/Dirnode/test_readonly"
153         self.set_up_grid()
154         c = self.g.clients[0]
155         nm = c.nodemaker
156         filecap = make_chk_file_uri(1234)
157         filenode = nm.create_from_cap(filecap)
158         uploadable = upload.Data("some data", convergence="some convergence string")
159
160         d = c.create_empty_dirnode()
161         def _created(rw_dn):
162             d2 = rw_dn.set_uri(u"child", filecap)
163             d2.addCallback(lambda res: rw_dn)
164             return d2
165         d.addCallback(_created)
166
167         def _ready(rw_dn):
168             ro_uri = rw_dn.get_readonly_uri()
169             ro_dn = c.create_node_from_uri(ro_uri)
170             self.failUnless(ro_dn.is_readonly())
171             self.failUnless(ro_dn.is_mutable())
172
173             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
174                             ro_dn.set_uri, u"newchild", filecap)
175             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
176                             ro_dn.set_node, u"newchild", filenode)
177             self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
178                             ro_dn.set_nodes, [ (u"newchild", filenode) ])
179             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
180                             ro_dn.add_file, u"newchild", uploadable)
181             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
182                             ro_dn.delete, u"child")
183             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
184                             ro_dn.create_empty_directory, u"newchild")
185             self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
186                             ro_dn.set_metadata_for, u"child", {})
187             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
188                             ro_dn.move_child_to, u"child", rw_dn)
189             self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
190                             rw_dn.move_child_to, u"child", ro_dn)
191             return ro_dn.list()
192         d.addCallback(_ready)
193         def _listed(children):
194             self.failUnless(u"child" in children)
195         d.addCallback(_listed)
196         return d
197
198     def failUnlessGreaterThan(self, a, b):
199         self.failUnless(a > b, "%r should be > %r" % (a, b))
200
201     def failUnlessGreaterOrEqualThan(self, a, b):
202         self.failUnless(a >= b, "%r should be >= %r" % (a, b))
203
204     def test_create(self):
205         self.basedir = "dirnode/Dirnode/test_create"
206         self.set_up_grid()
207         c = self.g.clients[0]
208
209         self.expected_manifest = []
210         self.expected_verifycaps = set()
211         self.expected_storage_indexes = set()
212
213         d = c.create_empty_dirnode()
214         def _then(n):
215             # /
216             self.rootnode = n
217             self.failUnless(n.is_mutable())
218             u = n.get_uri()
219             self.failUnless(u)
220             self.failUnless(u.startswith("URI:DIR2:"), u)
221             u_ro = n.get_readonly_uri()
222             self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
223             u_v = n.get_verify_cap().to_string()
224             self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
225             u_r = n.get_repair_cap().to_string()
226             self.failUnlessEqual(u_r, u)
227             self.expected_manifest.append( ((), u) )
228             self.expected_verifycaps.add(u_v)
229             si = n.get_storage_index()
230             self.expected_storage_indexes.add(base32.b2a(si))
231             expected_si = n._uri._filenode_uri.storage_index
232             self.failUnlessEqual(si, expected_si)
233
234             d = n.list()
235             d.addCallback(lambda res: self.failUnlessEqual(res, {}))
236             d.addCallback(lambda res: n.has_child(u"missing"))
237             d.addCallback(lambda res: self.failIf(res))
238
239             fake_file_uri = make_mutable_file_uri()
240             other_file_uri = make_mutable_file_uri()
241             m = c.nodemaker.create_from_cap(fake_file_uri)
242             ffu_v = m.get_verify_cap().to_string()
243             self.expected_manifest.append( ((u"child",) , m.get_uri()) )
244             self.expected_verifycaps.add(ffu_v)
245             self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
246             d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
247             d.addCallback(lambda res:
248                           self.shouldFail(ExistingChildError, "set_uri-no",
249                                           "child 'child' already exists",
250                                           n.set_uri, u"child", other_file_uri,
251                                           overwrite=False))
252             # /
253             # /child = mutable
254
255             d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
256
257             # /
258             # /child = mutable
259             # /subdir = directory
260             def _created(subdir):
261                 self.failUnless(isinstance(subdir, dirnode.DirectoryNode))
262                 self.subdir = subdir
263                 new_v = subdir.get_verify_cap().to_string()
264                 assert isinstance(new_v, str)
265                 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
266                 self.expected_verifycaps.add(new_v)
267                 si = subdir.get_storage_index()
268                 self.expected_storage_indexes.add(base32.b2a(si))
269             d.addCallback(_created)
270
271             d.addCallback(lambda res:
272                           self.shouldFail(ExistingChildError, "mkdir-no",
273                                           "child 'subdir' already exists",
274                                           n.create_empty_directory, u"subdir",
275                                           overwrite=False))
276
277             d.addCallback(lambda res: n.list())
278             d.addCallback(lambda children:
279                           self.failUnlessEqual(sorted(children.keys()),
280                                                sorted([u"child", u"subdir"])))
281
282             d.addCallback(lambda res: n.start_deep_stats().when_done())
283             def _check_deepstats(stats):
284                 self.failUnless(isinstance(stats, dict))
285                 expected = {"count-immutable-files": 0,
286                             "count-mutable-files": 1,
287                             "count-literal-files": 0,
288                             "count-files": 1,
289                             "count-directories": 2,
290                             "size-immutable-files": 0,
291                             "size-literal-files": 0,
292                             #"size-directories": 616, # varies
293                             #"largest-directory": 616,
294                             "largest-directory-children": 2,
295                             "largest-immutable-file": 0,
296                             }
297                 for k,v in expected.iteritems():
298                     self.failUnlessEqual(stats[k], v,
299                                          "stats[%s] was %s, not %s" %
300                                          (k, stats[k], v))
301                 self.failUnless(stats["size-directories"] > 500,
302                                 stats["size-directories"])
303                 self.failUnless(stats["largest-directory"] > 500,
304                                 stats["largest-directory"])
305                 self.failUnlessEqual(stats["size-files-histogram"], [])
306             d.addCallback(_check_deepstats)
307
308             d.addCallback(lambda res: n.build_manifest().when_done())
309             def _check_manifest(res):
310                 manifest = res["manifest"]
311                 self.failUnlessEqual(sorted(manifest),
312                                      sorted(self.expected_manifest))
313                 stats = res["stats"]
314                 _check_deepstats(stats)
315                 self.failUnlessEqual(self.expected_verifycaps,
316                                      res["verifycaps"])
317                 self.failUnlessEqual(self.expected_storage_indexes,
318                                      res["storage-index"])
319             d.addCallback(_check_manifest)
320
321             def _add_subsubdir(res):
322                 return self.subdir.create_empty_directory(u"subsubdir")
323             d.addCallback(_add_subsubdir)
324             # /
325             # /child = mutable
326             # /subdir = directory
327             # /subdir/subsubdir = directory
328             d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
329             d.addCallback(lambda subsubdir:
330                           self.failUnless(isinstance(subsubdir,
331                                                      dirnode.DirectoryNode)))
332             d.addCallback(lambda res: n.get_child_at_path(u""))
333             d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
334                                                            n.get_uri()))
335
336             d.addCallback(lambda res: n.get_metadata_for(u"child"))
337             d.addCallback(lambda metadata:
338                           self.failUnlessEqual(set(metadata.keys()),
339                                                set(["tahoe", "ctime", "mtime"])))
340
341             d.addCallback(lambda res:
342                           self.shouldFail(NoSuchChildError, "gcamap-no",
343                                           "nope",
344                                           n.get_child_and_metadata_at_path,
345                                           u"subdir/nope"))
346             d.addCallback(lambda res:
347                           n.get_child_and_metadata_at_path(u""))
348             def _check_child_and_metadata1(res):
349                 child, metadata = res
350                 self.failUnless(isinstance(child, dirnode.DirectoryNode))
351                 # edge-metadata needs at least one path segment
352                 self.failUnlessEqual(sorted(metadata.keys()), [])
353             d.addCallback(_check_child_and_metadata1)
354             d.addCallback(lambda res:
355                           n.get_child_and_metadata_at_path(u"child"))
356
357             def _check_child_and_metadata2(res):
358                 child, metadata = res
359                 self.failUnlessEqual(child.get_uri(),
360                                      fake_file_uri)
361                 self.failUnlessEqual(set(metadata.keys()),
362                                      set(["tahoe", "ctime", "mtime"]))
363             d.addCallback(_check_child_and_metadata2)
364
365             d.addCallback(lambda res:
366                           n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
367             def _check_child_and_metadata3(res):
368                 child, metadata = res
369                 self.failUnless(isinstance(child, dirnode.DirectoryNode))
370                 self.failUnlessEqual(set(metadata.keys()),
371                                      set(["tahoe", "ctime", "mtime"]))
372             d.addCallback(_check_child_and_metadata3)
373
374             # set_uri + metadata
375             # it should be possible to add a child without any metadata
376             d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
377             d.addCallback(lambda res: n.get_metadata_for(u"c2"))
378             d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
379
380             # You can't override the link timestamps.
381             d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, { 'tahoe': {'linkcrtime': "bogus"}}))
382             d.addCallback(lambda res: n.get_metadata_for(u"c2"))
383             def _has_good_linkcrtime(metadata):
384                 self.failUnless(metadata.has_key('tahoe'))
385                 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
386                 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
387             d.addCallback(_has_good_linkcrtime)
388
389             # if we don't set any defaults, the child should get timestamps
390             d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
391             d.addCallback(lambda res: n.get_metadata_for(u"c3"))
392             d.addCallback(lambda metadata:
393                           self.failUnlessEqual(set(metadata.keys()),
394                                                set(["tahoe", "ctime", "mtime"])))
395
396             # or we can add specific metadata at set_uri() time, which
397             # overrides the timestamps
398             d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
399                                                 {"key": "value"}))
400             d.addCallback(lambda res: n.get_metadata_for(u"c4"))
401             d.addCallback(lambda metadata:
402                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
403                                               (metadata['key'] == "value"), metadata))
404
405             d.addCallback(lambda res: n.delete(u"c2"))
406             d.addCallback(lambda res: n.delete(u"c3"))
407             d.addCallback(lambda res: n.delete(u"c4"))
408
409             # set_node + metadata
410             # it should be possible to add a child without any metadata
411             d.addCallback(lambda res: n.set_node(u"d2", n, {}))
412             d.addCallback(lambda res: c.create_empty_dirnode())
413             d.addCallback(lambda n2:
414                           self.shouldFail(ExistingChildError, "set_node-no",
415                                           "child 'd2' already exists",
416                                           n.set_node, u"d2", n2,
417                                           overwrite=False))
418             d.addCallback(lambda res: n.get_metadata_for(u"d2"))
419             d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
420
421             # if we don't set any defaults, the child should get timestamps
422             d.addCallback(lambda res: n.set_node(u"d3", n))
423             d.addCallback(lambda res: n.get_metadata_for(u"d3"))
424             d.addCallback(lambda metadata:
425                           self.failUnlessEqual(set(metadata.keys()),
426                                                set(["tahoe", "ctime", "mtime"])))
427
428             # or we can add specific metadata at set_node() time, which
429             # overrides the timestamps
430             d.addCallback(lambda res: n.set_node(u"d4", n,
431                                                 {"key": "value"}))
432             d.addCallback(lambda res: n.get_metadata_for(u"d4"))
433             d.addCallback(lambda metadata:
434                           self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
435                                           (metadata['key'] == "value"), metadata))
436
437             d.addCallback(lambda res: n.delete(u"d2"))
438             d.addCallback(lambda res: n.delete(u"d3"))
439             d.addCallback(lambda res: n.delete(u"d4"))
440
441             # metadata through set_children()
442             d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
443                                                    (u"e2", fake_file_uri, {}),
444                                                    (u"e3", fake_file_uri,
445                                                     {"key": "value"}),
446                                                    ]))
447             d.addCallback(lambda res:
448                           self.shouldFail(ExistingChildError, "set_children-no",
449                                           "child 'e1' already exists",
450                                           n.set_children,
451                                           [ (u"e1", other_file_uri),
452                                             (u"new", other_file_uri), ],
453                                           overwrite=False))
454             # and 'new' should not have been created
455             d.addCallback(lambda res: n.list())
456             d.addCallback(lambda children: self.failIf(u"new" in children))
457             d.addCallback(lambda res: n.get_metadata_for(u"e1"))
458             d.addCallback(lambda metadata:
459                           self.failUnlessEqual(set(metadata.keys()),
460                                                set(["tahoe", "ctime", "mtime"])))
461             d.addCallback(lambda res: n.get_metadata_for(u"e2"))
462             d.addCallback(lambda metadata:
463                           self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
464             d.addCallback(lambda res: n.get_metadata_for(u"e3"))
465             d.addCallback(lambda metadata:
466                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
467                                               and (metadata['key'] == "value"), metadata))
468
469             d.addCallback(lambda res: n.delete(u"e1"))
470             d.addCallback(lambda res: n.delete(u"e2"))
471             d.addCallback(lambda res: n.delete(u"e3"))
472
473             # metadata through set_nodes()
474             d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
475                                                     (u"f2", n, {}),
476                                                     (u"f3", n,
477                                                      {"key": "value"}),
478                                                     ]))
479             d.addCallback(lambda res:
480                           self.shouldFail(ExistingChildError, "set_nodes-no",
481                                           "child 'f1' already exists",
482                                           n.set_nodes,
483                                           [ (u"f1", n),
484                                             (u"new", n), ],
485                                           overwrite=False))
486             # and 'new' should not have been created
487             d.addCallback(lambda res: n.list())
488             d.addCallback(lambda children: self.failIf(u"new" in children))
489             d.addCallback(lambda res: n.get_metadata_for(u"f1"))
490             d.addCallback(lambda metadata:
491                           self.failUnlessEqual(set(metadata.keys()),
492                                                set(["tahoe", "ctime", "mtime"])))
493             d.addCallback(lambda res: n.get_metadata_for(u"f2"))
494             d.addCallback(
495                 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
496             d.addCallback(lambda res: n.get_metadata_for(u"f3"))
497             d.addCallback(lambda metadata:
498                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
499                                               (metadata['key'] == "value"), metadata))
500
501             d.addCallback(lambda res: n.delete(u"f1"))
502             d.addCallback(lambda res: n.delete(u"f2"))
503             d.addCallback(lambda res: n.delete(u"f3"))
504
505
506             d.addCallback(lambda res:
507                           n.set_metadata_for(u"child",
508                                              {"tags": ["web2.0-compatible"]}))
509             d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
510             d.addCallback(lambda metadata:
511                           self.failUnlessEqual(metadata,
512                                                {"tags": ["web2.0-compatible"]}))
513
514             def _start(res):
515                 self._start_timestamp = time.time()
516             d.addCallback(_start)
517             # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
518             # floats to hundredeths (it uses str(num) instead of repr(num)).
519             # simplejson-1.7.3 does not have this bug. To prevent this bug
520             # from causing the test to fail, stall for more than a few
521             # hundrededths of a second.
522             d.addCallback(self.stall, 0.1)
523             d.addCallback(lambda res: n.add_file(u"timestamps",
524                                                  upload.Data("stamp me", convergence="some convergence string")))
525             d.addCallback(self.stall, 0.1)
526             def _stop(res):
527                 self._stop_timestamp = time.time()
528             d.addCallback(_stop)
529
530             d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
531             def _check_timestamp1(metadata):
532                 self.failUnless("ctime" in metadata)
533                 self.failUnless("mtime" in metadata)
534                 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
535                                                   self._start_timestamp)
536                 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
537                                                   metadata["ctime"])
538                 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
539                                                   self._start_timestamp)
540                 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
541                                                   metadata["mtime"])
542                 # Our current timestamp rules say that replacing an existing
543                 # child should preserve the 'ctime' but update the mtime
544                 self._old_ctime = metadata["ctime"]
545                 self._old_mtime = metadata["mtime"]
546             d.addCallback(_check_timestamp1)
547             d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
548             d.addCallback(lambda res: n.set_node(u"timestamps", n))
549             d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
550             def _check_timestamp2(metadata):
551                 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
552                                      "%s != %s" % (metadata["ctime"],
553                                                    self._old_ctime))
554                 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
555                 return n.delete(u"timestamps")
556             d.addCallback(_check_timestamp2)
557
558             # also make sure we can add/update timestamps on a
559             # previously-existing child that didn't have any, since there are
560             # a lot of 0.7.0-generated edges around out there
561             d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
562             d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
563             d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
564             d.addCallback(lambda metadata:
565                           self.failUnlessEqual(set(metadata.keys()),
566                                                set(["tahoe", "ctime", "mtime"])))
567             d.addCallback(lambda res: n.delete(u"no_timestamps"))
568
569             d.addCallback(lambda res: n.delete(u"subdir"))
570             d.addCallback(lambda old_child:
571                           self.failUnlessEqual(old_child.get_uri(),
572                                                self.subdir.get_uri()))
573
574             d.addCallback(lambda res: n.list())
575             d.addCallback(lambda children:
576                           self.failUnlessEqual(sorted(children.keys()),
577                                                sorted([u"child"])))
578
579             uploadable1 = upload.Data("some data", convergence="converge")
580             d.addCallback(lambda res: n.add_file(u"newfile", uploadable1))
581             d.addCallback(lambda newnode:
582                           self.failUnless(IFileNode.providedBy(newnode)))
583             uploadable2 = upload.Data("some data", convergence="stuff")
584             d.addCallback(lambda res:
585                           self.shouldFail(ExistingChildError, "add_file-no",
586                                           "child 'newfile' already exists",
587                                           n.add_file, u"newfile",
588                                           uploadable2,
589                                           overwrite=False))
590             d.addCallback(lambda res: n.list())
591             d.addCallback(lambda children:
592                           self.failUnlessEqual(sorted(children.keys()),
593                                                sorted([u"child", u"newfile"])))
594             d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
595             d.addCallback(lambda metadata:
596                           self.failUnlessEqual(set(metadata.keys()),
597                                                set(["tahoe", "ctime", "mtime"])))
598
599             uploadable3 = upload.Data("some data", convergence="converge")
600             d.addCallback(lambda res: n.add_file(u"newfile-metadata",
601                                                  uploadable3,
602                                                  {"key": "value"}))
603             d.addCallback(lambda newnode:
604                           self.failUnless(IFileNode.providedBy(newnode)))
605             d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
606             d.addCallback(lambda metadata:
607                               self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
608                                               (metadata['key'] == "value"), metadata))
609             d.addCallback(lambda res: n.delete(u"newfile-metadata"))
610
611             d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
612             def _created2(subdir2):
613                 self.subdir2 = subdir2
614                 # put something in the way, to make sure it gets overwritten
615                 return subdir2.add_file(u"child", upload.Data("overwrite me",
616                                                               "converge"))
617             d.addCallback(_created2)
618
619             d.addCallback(lambda res:
620                           n.move_child_to(u"child", self.subdir2))
621             d.addCallback(lambda res: n.list())
622             d.addCallback(lambda children:
623                           self.failUnlessEqual(sorted(children.keys()),
624                                                sorted([u"newfile", u"subdir2"])))
625             d.addCallback(lambda res: self.subdir2.list())
626             d.addCallback(lambda children:
627                           self.failUnlessEqual(sorted(children.keys()),
628                                                sorted([u"child"])))
629             d.addCallback(lambda res: self.subdir2.get(u"child"))
630             d.addCallback(lambda child:
631                           self.failUnlessEqual(child.get_uri(),
632                                                fake_file_uri))
633
634             # move it back, using new_child_name=
635             d.addCallback(lambda res:
636                           self.subdir2.move_child_to(u"child", n, u"newchild"))
637             d.addCallback(lambda res: n.list())
638             d.addCallback(lambda children:
639                           self.failUnlessEqual(sorted(children.keys()),
640                                                sorted([u"newchild", u"newfile",
641                                                        u"subdir2"])))
642             d.addCallback(lambda res: self.subdir2.list())
643             d.addCallback(lambda children:
644                           self.failUnlessEqual(sorted(children.keys()), []))
645
646             # now make sure that we honor overwrite=False
647             d.addCallback(lambda res:
648                           self.subdir2.set_uri(u"newchild", other_file_uri))
649
650             d.addCallback(lambda res:
651                           self.shouldFail(ExistingChildError, "move_child_to-no",
652                                           "child 'newchild' already exists",
653                                           n.move_child_to, u"newchild",
654                                           self.subdir2,
655                                           overwrite=False))
656             d.addCallback(lambda res: self.subdir2.get(u"newchild"))
657             d.addCallback(lambda child:
658                           self.failUnlessEqual(child.get_uri(),
659                                                other_file_uri))
660
661             return d
662
663         d.addCallback(_then)
664
665         d.addErrback(self.explain_error)
666         return d
667
668 class Packing(unittest.TestCase):
669     # This is a base32-encoded representation of the directory tree
670     # root/file1
671     # root/file2
672     # root/file3
673     # as represented after being fed to _pack_contents.
674     # We have it here so we can decode it, feed it to
675     # _unpack_contents, and verify that _unpack_contents
676     # works correctly.
677
678     known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
679
680     def test_unpack_and_pack_behavior(self):
681         known_tree = b32decode(self.known_tree)
682         nodemaker = NodeMaker(None, None, None,
683                               None, None, None,
684                               {"k": 3, "n": 10}, None)
685         writecap = "URI:SSK-RO:e3mdrzfwhoq42hy5ubcz6rp3o4:ybyibhnp3vvwuq2vaw2ckjmesgkklfs6ghxleztqidihjyofgw7q"
686         filenode = nodemaker.create_from_cap(writecap)
687         node = dirnode.DirectoryNode(filenode, nodemaker, None)
688         children = node._unpack_contents(known_tree)
689         self._check_children(children)
690
691         packed_children = node._pack_contents(children)
692         children = node._unpack_contents(packed_children)
693         self._check_children(children)
694
695     def _check_children(self, children):
696         # Are all the expected child nodes there?
697         self.failUnless(children.has_key(u'file1'))
698         self.failUnless(children.has_key(u'file2'))
699         self.failUnless(children.has_key(u'file3'))
700
701         # Are the metadata for child 3 right?
702         file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
703         file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
704         file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
705         self.failUnlessEqual(file3_metadata, children[u'file3'][1])
706         self.failUnlessEqual(file3_rocap,
707                              children[u'file3'][0].get_readonly_uri())
708         self.failUnlessEqual(file3_rwcap,
709                              children[u'file3'][0].get_uri())
710
711         # Are the metadata for child 2 right?
712         file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
713         file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
714         file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
715         self.failUnlessEqual(file2_metadata, children[u'file2'][1])
716         self.failUnlessEqual(file2_rocap,
717                              children[u'file2'][0].get_readonly_uri())
718         self.failUnlessEqual(file2_rwcap,
719                              children[u'file2'][0].get_uri())
720
721         # Are the metadata for child 1 right?
722         file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
723         file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
724         file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
725         self.failUnlessEqual(file1_metadata, children[u'file1'][1])
726         self.failUnlessEqual(file1_rocap,
727                              children[u'file1'][0].get_readonly_uri())
728         self.failUnlessEqual(file1_rwcap,
729                              children[u'file1'][0].get_uri())
730
731     def test_caching_dict(self):
732         d = dirnode.CachingDict()
733         d.set_both_items("test", "test2", ("test3", "test4"))
734         cached, value = d.get_both_items("test")
735
736         self.failUnlessEqual(cached, "test2")
737         self.failUnlessEqual(value, ("test3", "test4"))
738
739         d['test'] = ("test3", "test2")
740
741         cached, value = d.get_both_items("test")
742
743         self.failUnlessEqual(cached, None)
744         self.failUnlessEqual(value, ("test3", "test2"))
745
746 class FakeMutableFile:
747     counter = 0
748     def __init__(self, initial_contents=""):
749         self.data = initial_contents
750         counter = FakeMutableFile.counter
751         FakeMutableFile.counter += 1
752         writekey = hashutil.ssk_writekey_hash(str(counter))
753         fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
754         self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
755     def get_uri(self):
756         return self.uri.to_string()
757     def download_best_version(self):
758         return defer.succeed(self.data)
759     def get_writekey(self):
760         return "writekey"
761     def is_readonly(self):
762         return False
763     def is_mutable(self):
764         return True
765     def modify(self, modifier):
766         self.data = modifier(self.data, None, True)
767         return defer.succeed(None)
768
769 class FakeNodeMaker(NodeMaker):
770     def create_mutable_file(self, contents="", keysize=None):
771         return defer.succeed(FakeMutableFile(contents))
772
773 class FakeClient2(Client):
774     def __init__(self):
775         self.nodemaker = FakeNodeMaker(None, None, None,
776                                        None, None, None,
777                                        {"k":3,"n":10}, None)
778     def create_node_from_uri(self, rwcap, rocap):
779         return self.nodemaker.create_from_cap(rwcap, rocap)
780
781 class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
782     def setUp(self):
783         self.client = FakeClient2()
784         self.nodemaker = self.client.nodemaker
785
786     def test_from_future(self):
787         # create a dirnode that contains unknown URI types, and make sure we
788         # tolerate them properly. Since dirnodes aren't allowed to add
789         # unknown node types, we have to be tricky.
790         d = self.nodemaker.create_new_mutable_directory()
791         future_writecap = "x-tahoe-crazy://I_am_from_the_future."
792         future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
793         future_node = UnknownNode(future_writecap, future_readcap)
794         def _then(n):
795             self._node = n
796             return n.set_node(u"future", future_node)
797         d.addCallback(_then)
798
799         # we should be prohibited from adding an unknown URI to a directory,
800         # since we don't know how to diminish the cap to a readcap (for the
801         # dirnode's rocap slot), and we don't want to accidentally grant
802         # write access to a holder of the dirnode's readcap.
803         d.addCallback(lambda ign:
804              self.shouldFail(CannotPackUnknownNodeError,
805                              "copy unknown",
806                              "cannot pack unknown node as child add",
807                              self._node.set_uri, u"add", future_writecap))
808         d.addCallback(lambda ign: self._node.list())
809         def _check(children):
810             self.failUnlessEqual(len(children), 1)
811             (fn, metadata) = children[u"future"]
812             self.failUnless(isinstance(fn, UnknownNode), fn)
813             self.failUnlessEqual(fn.get_uri(), future_writecap)
814             self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
815             # but we *should* be allowed to copy this node, because the
816             # UnknownNode contains all the information that was in the
817             # original directory (readcap and writecap), so we're preserving
818             # everything.
819             return self._node.set_node(u"copy", fn)
820         d.addCallback(_check)
821         d.addCallback(lambda ign: self._node.list())
822         def _check2(children):
823             self.failUnlessEqual(len(children), 2)
824             (fn, metadata) = children[u"copy"]
825             self.failUnless(isinstance(fn, UnknownNode), fn)
826             self.failUnlessEqual(fn.get_uri(), future_writecap)
827             self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
828         return d
829
830 class DeepStats(unittest.TestCase):
831     timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
832     def test_stats(self):
833         ds = dirnode.DeepStats(None)
834         ds.add("count-files")
835         ds.add("size-immutable-files", 123)
836         ds.histogram("size-files-histogram", 123)
837         ds.max("largest-directory", 444)
838
839         s = ds.get_results()
840         self.failUnlessEqual(s["count-files"], 1)
841         self.failUnlessEqual(s["size-immutable-files"], 123)
842         self.failUnlessEqual(s["largest-directory"], 444)
843         self.failUnlessEqual(s["count-literal-files"], 0)
844
845         ds.add("count-files")
846         ds.add("size-immutable-files", 321)
847         ds.histogram("size-files-histogram", 321)
848         ds.max("largest-directory", 2)
849
850         s = ds.get_results()
851         self.failUnlessEqual(s["count-files"], 2)
852         self.failUnlessEqual(s["size-immutable-files"], 444)
853         self.failUnlessEqual(s["largest-directory"], 444)
854         self.failUnlessEqual(s["count-literal-files"], 0)
855         self.failUnlessEqual(s["size-files-histogram"],
856                              [ (101, 316, 1), (317, 1000, 1) ])
857
858         ds = dirnode.DeepStats(None)
859         for i in range(1, 1100):
860             ds.histogram("size-files-histogram", i)
861         ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
862         s = ds.get_results()
863         self.failUnlessEqual(s["size-files-histogram"],
864                              [ (1, 3, 3),
865                                (4, 10, 7),
866                                (11, 31, 21),
867                                (32, 100, 69),
868                                (101, 316, 216),
869                                (317, 1000, 684),
870                                (1001, 3162, 99),
871                                (3162277660169L, 10000000000000L, 1),
872                                ])
873
874 class UCWEingMutableFileNode(MutableFileNode):
875     please_ucwe_after_next_upload = False
876
877     def _upload(self, new_contents, servermap):
878         d = MutableFileNode._upload(self, new_contents, servermap)
879         def _ucwe(res):
880             if self.please_ucwe_after_next_upload:
881                 self.please_ucwe_after_next_upload = False
882                 raise UncoordinatedWriteError()
883             return res
884         d.addCallback(_ucwe)
885         return d
886
887 class UCWEingNodeMaker(NodeMaker):
888     def _create_mutable(self, cap):
889         n = UCWEingMutableFileNode(self.storage_broker, self.secret_holder,
890                                    self.default_encoding_parameters,
891                                    self.history)
892         return n.init_from_uri(cap)
893
894
895 class Deleter(GridTestMixin, unittest.TestCase):
896     timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
897     def test_retry(self):
898         # ticket #550, a dirnode.delete which experiences an
899         # UncoordinatedWriteError will fail with an incorrect "you're
900         # deleting something which isn't there" NoSuchChildError exception.
901
902         # to trigger this, we start by creating a directory with a single
903         # file in it. Then we create a special dirnode that uses a modified
904         # MutableFileNode which will raise UncoordinatedWriteError once on
905         # demand. We then call dirnode.delete, which ought to retry and
906         # succeed.
907
908         self.basedir = self.mktemp()
909         self.set_up_grid()
910         c0 = self.g.clients[0]
911         d = c0.create_empty_dirnode()
912         small = upload.Data("Small enough for a LIT", None)
913         def _created_dir(dn):
914             self.root = dn
915             self.root_uri = dn.get_uri()
916             return dn.add_file(u"file", small)
917         d.addCallback(_created_dir)
918         def _do_delete(ignored):
919             nm = UCWEingNodeMaker(c0.storage_broker, c0._secret_holder,
920                                   c0.get_history(), c0.getServiceNamed("uploader"),
921                                   c0.downloader,
922                                   c0.download_cache_dirman,
923                                   c0.get_encoding_parameters(),
924                                   c0._key_generator)
925             n = nm.create_from_cap(self.root_uri)
926             assert n._node.please_ucwe_after_next_upload == False
927             n._node.please_ucwe_after_next_upload = True
928             # This should succeed, not raise an exception
929             return n.delete(u"file")
930         d.addCallback(_do_delete)
931
932         return d
933
934 class Adder(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
935
936     def test_overwrite(self):
937         # note: This functionality could be tested without actually creating
938         # several RSA keys. It would be faster without the GridTestMixin: use
939         # dn.set_node(nodemaker.create_from_cap(make_chk_file_uri())) instead
940         # of dn.add_file, and use a special NodeMaker that creates fake
941         # mutable files.
942         self.basedir = "dirnode/Adder/test_overwrite"
943         self.set_up_grid()
944         c = self.g.clients[0]
945         fileuri = make_chk_file_uri(1234)
946         filenode = c.nodemaker.create_from_cap(fileuri)
947         d = c.create_empty_dirnode()
948
949         def _create_directory_tree(root_node):
950             # Build
951             # root/file1
952             # root/file2
953             # root/dir1
954             d = root_node.add_file(u'file1', upload.Data("Important Things",
955                 None))
956             d.addCallback(lambda res:
957                 root_node.add_file(u'file2', upload.Data("Sekrit Codes", None)))
958             d.addCallback(lambda res:
959                 root_node.create_empty_directory(u"dir1"))
960             d.addCallback(lambda res: root_node)
961             return d
962
963         d.addCallback(_create_directory_tree)
964
965         def _test_adder(root_node):
966             d = root_node.set_node(u'file1', filenode)
967             # We've overwritten file1. Let's try it with a directory
968             d.addCallback(lambda res:
969                 root_node.create_empty_directory(u'dir2'))
970             d.addCallback(lambda res:
971                 root_node.set_node(u'dir2', filenode))
972             # We try overwriting a file with a child while also specifying
973             # overwrite=False. We should receive an ExistingChildError
974             # when we do this.
975             d.addCallback(lambda res:
976                 self.shouldFail(ExistingChildError, "set_node",
977                                 "child 'file1' already exists",
978                                root_node.set_node, u"file1",
979                                filenode, overwrite=False))
980             # If we try with a directory, we should see the same thing
981             d.addCallback(lambda res:
982                 self.shouldFail(ExistingChildError, "set_node",
983                                 "child 'dir1' already exists",
984                                 root_node.set_node, u'dir1', filenode,
985                                 overwrite=False))
986             d.addCallback(lambda res:
987                  root_node.set_node(u'file1', filenode,
988                                     overwrite="only-files"))
989             d.addCallback(lambda res:
990                  self.shouldFail(ExistingChildError, "set_node",
991                                 "child 'dir1' already exists",
992                                 root_node.set_node, u'dir1', filenode,
993                                 overwrite="only-files"))
994             return d
995
996         d.addCallback(_test_adder)
997         return d