3 from zope.interface import implements
4 from twisted.trial import unittest
5 from twisted.internet import defer
6 from allmydata import uri, dirnode
7 from allmydata.client import Client
8 from allmydata.immutable import upload
9 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
10 IDirectoryURI, IReadonlyDirectoryURI, IFileNode, \
11 ExistingChildError, NoSuchChildError, \
12 IDeepCheckResults, IDeepCheckAndRepairResults, CannotPackUnknownNodeError
13 from allmydata.mutable.filenode import MutableFileNode
14 from allmydata.mutable.common import UncoordinatedWriteError
15 from allmydata.util import hashutil, base32
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
18 FakeDirectoryNode, create_chk_filenode, ErrorMixin
19 from allmydata.test.no_network import GridTestMixin
20 from allmydata.check_results import CheckResults, CheckAndRepairResults
21 from allmydata.unknown import UnknownNode
22 from base64 import b32decode
23 import common_util as testutil
25 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
26 # contain pointers to fake files. We start with a fake MutableFileNode that
27 # stores all of its data in a static table.
30 implements(IFileNode, IMutableFileNode) # sure, why not
31 def __init__(self, nodeuri):
32 if not isinstance(nodeuri, str):
33 nodeuri = nodeuri.to_string()
34 self.nodeuri = nodeuri
35 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
36 self.storage_index = si
37 fp = hashutil.tagged_hash("tag2", nodeuri)
38 self.verifieruri = uri.SSKVerifierURI(storage_index=si, fingerprint=fp)
41 def get_readonly_uri(self):
43 def get_verify_cap(self):
44 return self.verifieruri
45 def get_storage_index(self):
46 return self.storage_index
48 def check(self, monitor, verify=False, add_lease=False):
49 r = CheckResults(uri.from_string(self.nodeuri), None)
51 r.set_recoverable(True)
52 return defer.succeed(r)
54 def check_and_repair(self, monitor, verify=False, add_lease=False):
55 d = self.check(verify)
57 r = CheckAndRepairResults(None)
58 r.pre_repair_results = r.post_repair_results = cr
63 # dirnode requires three methods from the client: upload(),
64 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
65 # only used by the convenience composite method add_file().
70 def upload(self, uploadable):
71 d = uploadable.get_size()
72 d.addCallback(lambda size: uploadable.read(size))
75 n = create_chk_filenode(self, data)
76 results = upload.UploadResults()
77 results.uri = n.get_uri()
79 d.addCallback(_got_data)
82 def create_node_from_uri(self, u, readcap=None):
86 if (IDirectoryURI.providedBy(u)
87 or IReadonlyDirectoryURI.providedBy(u)):
88 return FakeDirectoryNode(self).init_from_uri(u)
89 return Marker(u.to_string())
91 def create_empty_dirnode(self):
92 n = FakeDirectoryNode(self)
94 d.addCallback(lambda res: n)
97 class Dirnode(unittest.TestCase,
98 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
99 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
101 self.client = FakeClient()
102 # This is a base32-encoded representation of the directory tree
106 # as represented after being fed to _pack_contents.
107 # We have it here so we can decode it, feed it to
108 # _unpack_contents, and verify that _unpack_contents
111 self.known_tree = "GM4TOORVHJTGS3DFGEWDSNJ2KVJESOSDJBFTU33MPB2GS3LZNVYG6N3GGI3WU5TIORTXC3DOMJ2G4NB2MVWXUZDONBVTE5LNGRZWK2LYN55GY23XGNYXQMTOMZUWU5TENN4DG23ZG5UTO2L2NQ2DO6LFMRWDMZJWGRQTUMZ2GEYDUMJQFQYTIMZ22XZKZORX5XS7CAQCSK3URR6QOHISHRCMGER5LRFSZRNAS5ZSALCS6TWFQAE754IVOIKJVK73WZPP3VUUEDTX3WHTBBZ5YX3CEKHCPG3ZWQLYA4QM6LDRCF7TJQYWLIZHKGN5ROA3AUZPXESBNLQQ6JTC2DBJU2D47IZJTLR3PKZ4RVF57XLPWY7FX7SZV3T6IJ3ORFW37FXUPGOE3ROPFNUX5DCGMAQJ3PGGULBRGM3TU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGI3TKNRWGEWCAITUMFUG6ZJCHIQHWITMNFXGW3LPORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBSG42TMNRRFQQCE3DJNZVWG4TUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQQCE3LUNFWWKIR2EAYTENBWGY3DGOBZG4XDIMRXGU3DMML5FQWDGOJRHI2TUZTJNRSTELBZGQ5FKUSJHJBUQSZ2MFYGKZ3SOBSWQ43IO52WO23CNAZWU3DUGVSWSNTIOE5DK33POVTW4ZLNMNWDK6DHPA2GS2THNF2W25DEN5VGY2LQNFRGG5DKNNRHO5TZPFTWI6LNMRYGQ2LCGJTHM4J2GM5DCMB2GQWDCNBSHKVVQBGRYMACKJ27CVQ6O6B4QPR72RFVTGOZUI76XUSWAX73JRV5PYRHMIFYZIA25MXDPGUGML6M2NMRSG4YD4W4K37ZDYSXHMJ3IUVT4F64YTQQVBJFFFOUC7J7LAB2VFCL5UKKGMR2D3F4EPOYC7UYWQZNR5KXHBSNXLCNBX2SNF22DCXJIHSMEKWEWOG5XCJEVVZ7UW5IB6I64XXQSJ34B5CAYZGZIIMR6LBRGMZTU6ZCMN2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYFQQCE5DBNBXWKIR2EB5SE3DJNZVW233UNFWWKIR2EAYTENBWGY3DGOBZG4XDIMZQGIYTQLBAEJWGS3TLMNZHI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTAMRRHB6SYIBCNV2GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMYDEMJYPUWCYMZZGU5DKOTGNFWGKMZMHE2DUVKSJE5EGSCLHJRW25DDPBYTO2DXPB3GM6DBNYZTI6LJMV3DM2LWNB4TU4LWMNSWW3LKORXWK5DEMN3TI23NNE3WEM3SORRGY5THPA3TKNBUMNZG453BOF2GSZLXMVWWI3DJOFZW623RHIZTUMJQHI2SYMJUGI5BOSHWDPG3WKPAVXCF3XMKA7QVIWPRMWJHDTQHD27AHDCPJWDQENQ5H5ZZILTXQNIXXCIW4LKQABU2GCFRG5FHQN7CHD7HF4EKNRZFIV2ZYQIBM7IQU7F4RGB3XCX3FREPBKQ7UCICHVWPCYFGA6OLH3J45LXQ6GWWICJ3PGWJNLZ7PCRNLAPNYUGU6BENS7OXMBEOOFRIZV3PF2FFWZ5WHDPKXERYP7GNHKRMGEZTOOT3EJRXI2LNMURDUIBRGI2DMNRWGM4DSNZOGQZTGNRSGY4SYIBCORQWQ33FEI5CA6ZCNRUW423NN52GS3LFEI5CAMJSGQ3DMNRTHA4TOLRUGMZTMMRWHEWCAITMNFXGWY3SORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCAITNORUW2ZJCHIQDCMRUGY3DMMZYHE3S4NBTGM3DENRZPUWCY==="
113 def test_basic(self):
114 d = self.client.create_empty_dirnode()
116 self.failUnless(isinstance(res, FakeDirectoryNode))
118 self.failUnless("RW" in rep)
122 def test_check(self):
123 d = self.client.create_empty_dirnode()
124 d.addCallback(lambda dn: dn.check(Monitor()))
126 self.failUnless(res.is_healthy())
130 def _test_deepcheck_create(self):
131 # create a small tree with a loop, and some non-directories
135 # root/subdir/link -> root
137 d = self.client.create_empty_dirnode()
138 def _created_root(rootnode):
139 self._rootnode = rootnode
140 return rootnode.create_empty_directory(u"subdir")
141 d.addCallback(_created_root)
142 def _created_subdir(subdir):
143 self._subdir = subdir
144 d = subdir.add_file(u"file1", upload.Data("data", None))
145 d.addCallback(lambda res: subdir.set_node(u"link", self._rootnode))
146 d.addCallback(lambda res: self.client.create_empty_dirnode())
147 d.addCallback(lambda dn:
148 self._rootnode.set_uri(u"rodir",
149 dn.get_readonly_uri()))
151 d.addCallback(_created_subdir)
153 return self._rootnode
157 def test_deepcheck(self):
158 d = self._test_deepcheck_create()
159 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
160 def _check_results(r):
161 self.failUnless(IDeepCheckResults.providedBy(r))
163 self.failUnlessEqual(c,
164 {"count-objects-checked": 4,
165 "count-objects-healthy": 4,
166 "count-objects-unhealthy": 0,
167 "count-objects-unrecoverable": 0,
168 "count-corrupt-shares": 0,
170 self.failIf(r.get_corrupt_shares())
171 self.failUnlessEqual(len(r.get_all_results()), 4)
172 d.addCallback(_check_results)
175 def test_deepcheck_and_repair(self):
176 d = self._test_deepcheck_create()
177 d.addCallback(lambda rootnode:
178 rootnode.start_deep_check_and_repair().when_done())
179 def _check_results(r):
180 self.failUnless(IDeepCheckAndRepairResults.providedBy(r))
182 self.failUnlessEqual(c,
183 {"count-objects-checked": 4,
184 "count-objects-healthy-pre-repair": 4,
185 "count-objects-unhealthy-pre-repair": 0,
186 "count-objects-unrecoverable-pre-repair": 0,
187 "count-corrupt-shares-pre-repair": 0,
188 "count-objects-healthy-post-repair": 4,
189 "count-objects-unhealthy-post-repair": 0,
190 "count-objects-unrecoverable-post-repair": 0,
191 "count-corrupt-shares-post-repair": 0,
192 "count-repairs-attempted": 0,
193 "count-repairs-successful": 0,
194 "count-repairs-unsuccessful": 0,
196 self.failIf(r.get_corrupt_shares())
197 self.failIf(r.get_remaining_corrupt_shares())
198 self.failUnlessEqual(len(r.get_all_results()), 4)
199 d.addCallback(_check_results)
202 def _mark_file_bad(self, rootnode):
203 si = IURI(rootnode.get_uri())._filenode_uri.storage_index
204 rootnode._node.bad_shares[si] = "unhealthy"
207 def test_deepcheck_problems(self):
208 d = self._test_deepcheck_create()
209 d.addCallback(lambda rootnode: self._mark_file_bad(rootnode))
210 d.addCallback(lambda rootnode: rootnode.start_deep_check().when_done())
211 def _check_results(r):
213 self.failUnlessEqual(c,
214 {"count-objects-checked": 4,
215 "count-objects-healthy": 3,
216 "count-objects-unhealthy": 1,
217 "count-objects-unrecoverable": 0,
218 "count-corrupt-shares": 0,
220 #self.failUnlessEqual(len(r.get_problems()), 1) # TODO
221 d.addCallback(_check_results)
224 def test_readonly(self):
225 fileuri = make_chk_file_uri(1234)
226 filenode = self.client.create_node_from_uri(fileuri)
227 uploadable = upload.Data("some data", convergence="some convergence string")
229 d = self.client.create_empty_dirnode()
231 d2 = rw_dn.set_uri(u"child", fileuri.to_string())
232 d2.addCallback(lambda res: rw_dn)
234 d.addCallback(_created)
237 ro_uri = rw_dn.get_readonly_uri()
238 ro_dn = self.client.create_node_from_uri(ro_uri)
239 self.failUnless(ro_dn.is_readonly())
240 self.failUnless(ro_dn.is_mutable())
242 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
243 ro_dn.set_uri, u"newchild", fileuri.to_string())
244 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
245 ro_dn.set_node, u"newchild", filenode)
246 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
247 ro_dn.set_nodes, [ (u"newchild", filenode) ])
248 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
249 ro_dn.add_file, u"newchild", uploadable)
250 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
251 ro_dn.delete, u"child")
252 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
253 ro_dn.create_empty_directory, u"newchild")
254 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
255 ro_dn.set_metadata_for, u"child", {})
256 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
257 ro_dn.move_child_to, u"child", rw_dn)
258 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
259 rw_dn.move_child_to, u"child", ro_dn)
261 d.addCallback(_ready)
262 def _listed(children):
263 self.failUnless(u"child" in children)
264 d.addCallback(_listed)
267 def failUnlessGreaterThan(self, a, b):
268 self.failUnless(a > b, "%r should be > %r" % (a, b))
270 def failUnlessGreaterOrEqualThan(self, a, b):
271 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
273 def test_create(self):
274 self.expected_manifest = []
275 self.expected_verifycaps = set()
276 self.expected_storage_indexes = set()
278 d = self.client.create_empty_dirnode()
281 self.failUnless(n.is_mutable())
284 self.failUnless(u.startswith("URI:DIR2:"), u)
285 u_ro = n.get_readonly_uri()
286 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
287 u_v = n.get_verify_cap().to_string()
288 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
289 u_r = n.get_repair_cap().to_string()
290 self.failUnlessEqual(u_r, u)
291 self.expected_manifest.append( ((), u) )
292 self.expected_verifycaps.add(u_v)
293 si = n.get_storage_index()
294 self.expected_storage_indexes.add(base32.b2a(si))
295 expected_si = n._uri._filenode_uri.storage_index
296 self.failUnlessEqual(si, expected_si)
299 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
300 d.addCallback(lambda res: n.has_child(u"missing"))
301 d.addCallback(lambda res: self.failIf(res))
302 fake_file_uri = make_mutable_file_uri()
303 other_file_uri = make_mutable_file_uri()
304 m = Marker(fake_file_uri)
305 ffu_v = m.get_verify_cap().to_string()
306 self.expected_manifest.append( ((u"child",) , m.get_uri()) )
307 self.expected_verifycaps.add(ffu_v)
308 self.expected_storage_indexes.add(base32.b2a(m.get_storage_index()))
309 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri.to_string()))
310 d.addCallback(lambda res:
311 self.shouldFail(ExistingChildError, "set_uri-no",
312 "child 'child' already exists",
313 n.set_uri, u"child", other_file_uri.to_string(),
318 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
322 # /subdir = directory
323 def _created(subdir):
324 self.failUnless(isinstance(subdir, FakeDirectoryNode))
326 new_v = subdir.get_verify_cap().to_string()
327 assert isinstance(new_v, str)
328 self.expected_manifest.append( ((u"subdir",), subdir.get_uri()) )
329 self.expected_verifycaps.add(new_v)
330 si = subdir.get_storage_index()
331 self.expected_storage_indexes.add(base32.b2a(si))
332 d.addCallback(_created)
334 d.addCallback(lambda res:
335 self.shouldFail(ExistingChildError, "mkdir-no",
336 "child 'subdir' already exists",
337 n.create_empty_directory, u"subdir",
340 d.addCallback(lambda res: n.list())
341 d.addCallback(lambda children:
342 self.failUnlessEqual(sorted(children.keys()),
343 sorted([u"child", u"subdir"])))
345 d.addCallback(lambda res: n.start_deep_stats().when_done())
346 def _check_deepstats(stats):
347 self.failUnless(isinstance(stats, dict))
348 expected = {"count-immutable-files": 0,
349 "count-mutable-files": 1,
350 "count-literal-files": 0,
352 "count-directories": 2,
353 "size-immutable-files": 0,
354 "size-literal-files": 0,
355 #"size-directories": 616, # varies
356 #"largest-directory": 616,
357 "largest-directory-children": 2,
358 "largest-immutable-file": 0,
360 for k,v in expected.iteritems():
361 self.failUnlessEqual(stats[k], v,
362 "stats[%s] was %s, not %s" %
364 self.failUnless(stats["size-directories"] > 500,
365 stats["size-directories"])
366 self.failUnless(stats["largest-directory"] > 500,
367 stats["largest-directory"])
368 self.failUnlessEqual(stats["size-files-histogram"], [])
369 d.addCallback(_check_deepstats)
371 d.addCallback(lambda res: n.build_manifest().when_done())
372 def _check_manifest(res):
373 manifest = res["manifest"]
374 self.failUnlessEqual(sorted(manifest),
375 sorted(self.expected_manifest))
377 _check_deepstats(stats)
378 self.failUnlessEqual(self.expected_verifycaps,
380 self.failUnlessEqual(self.expected_storage_indexes,
381 res["storage-index"])
382 d.addCallback(_check_manifest)
384 def _add_subsubdir(res):
385 return self.subdir.create_empty_directory(u"subsubdir")
386 d.addCallback(_add_subsubdir)
389 # /subdir = directory
390 # /subdir/subsubdir = directory
391 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
392 d.addCallback(lambda subsubdir:
393 self.failUnless(isinstance(subsubdir,
395 d.addCallback(lambda res: n.get_child_at_path(u""))
396 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
399 d.addCallback(lambda res: n.get_metadata_for(u"child"))
400 d.addCallback(lambda metadata:
401 self.failUnlessEqual(set(metadata.keys()),
402 set(["tahoe", "ctime", "mtime"])))
404 d.addCallback(lambda res:
405 self.shouldFail(NoSuchChildError, "gcamap-no",
407 n.get_child_and_metadata_at_path,
409 d.addCallback(lambda res:
410 n.get_child_and_metadata_at_path(u""))
411 def _check_child_and_metadata1(res):
412 child, metadata = res
413 self.failUnless(isinstance(child, FakeDirectoryNode))
414 # edge-metadata needs at least one path segment
415 self.failUnlessEqual(sorted(metadata.keys()), [])
416 d.addCallback(_check_child_and_metadata1)
417 d.addCallback(lambda res:
418 n.get_child_and_metadata_at_path(u"child"))
420 def _check_child_and_metadata2(res):
421 child, metadata = res
422 self.failUnlessEqual(child.get_uri(),
423 fake_file_uri.to_string())
424 self.failUnlessEqual(set(metadata.keys()),
425 set(["tahoe", "ctime", "mtime"]))
426 d.addCallback(_check_child_and_metadata2)
428 d.addCallback(lambda res:
429 n.get_child_and_metadata_at_path(u"subdir/subsubdir"))
430 def _check_child_and_metadata3(res):
431 child, metadata = res
432 self.failUnless(isinstance(child, FakeDirectoryNode))
433 self.failUnlessEqual(set(metadata.keys()),
434 set(["tahoe", "ctime", "mtime"]))
435 d.addCallback(_check_child_and_metadata3)
438 # it should be possible to add a child without any metadata
439 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), {}))
440 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
441 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
443 # You can't override the link timestamps.
444 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri.to_string(), { 'tahoe': {'linkcrtime': "bogus"}}))
445 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
446 def _has_good_linkcrtime(metadata):
447 self.failUnless(metadata.has_key('tahoe'))
448 self.failUnless(metadata['tahoe'].has_key('linkcrtime'))
449 self.failIfEqual(metadata['tahoe']['linkcrtime'], 'bogus')
450 d.addCallback(_has_good_linkcrtime)
452 # if we don't set any defaults, the child should get timestamps
453 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri.to_string()))
454 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
455 d.addCallback(lambda metadata:
456 self.failUnlessEqual(set(metadata.keys()),
457 set(["tahoe", "ctime", "mtime"])))
459 # or we can add specific metadata at set_uri() time, which
460 # overrides the timestamps
461 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri.to_string(),
463 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
464 d.addCallback(lambda metadata:
465 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
466 (metadata['key'] == "value"), metadata))
468 d.addCallback(lambda res: n.delete(u"c2"))
469 d.addCallback(lambda res: n.delete(u"c3"))
470 d.addCallback(lambda res: n.delete(u"c4"))
472 # set_node + metadata
473 # it should be possible to add a child without any metadata
474 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
475 d.addCallback(lambda res: self.client.create_empty_dirnode())
476 d.addCallback(lambda n2:
477 self.shouldFail(ExistingChildError, "set_node-no",
478 "child 'd2' already exists",
479 n.set_node, u"d2", n2,
481 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
482 d.addCallback(lambda metadata: self.failUnlessEqual(metadata.keys(), ['tahoe']))
484 # if we don't set any defaults, the child should get timestamps
485 d.addCallback(lambda res: n.set_node(u"d3", n))
486 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
487 d.addCallback(lambda metadata:
488 self.failUnlessEqual(set(metadata.keys()),
489 set(["tahoe", "ctime", "mtime"])))
491 # or we can add specific metadata at set_node() time, which
492 # overrides the timestamps
493 d.addCallback(lambda res: n.set_node(u"d4", n,
495 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
496 d.addCallback(lambda metadata:
497 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
498 (metadata['key'] == "value"), metadata))
500 d.addCallback(lambda res: n.delete(u"d2"))
501 d.addCallback(lambda res: n.delete(u"d3"))
502 d.addCallback(lambda res: n.delete(u"d4"))
504 # metadata through set_children()
505 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri.to_string()),
506 (u"e2", fake_file_uri.to_string(), {}),
507 (u"e3", fake_file_uri.to_string(),
510 d.addCallback(lambda res:
511 self.shouldFail(ExistingChildError, "set_children-no",
512 "child 'e1' already exists",
514 [ (u"e1", other_file_uri),
515 (u"new", other_file_uri), ],
517 # and 'new' should not have been created
518 d.addCallback(lambda res: n.list())
519 d.addCallback(lambda children: self.failIf(u"new" in children))
520 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
521 d.addCallback(lambda metadata:
522 self.failUnlessEqual(set(metadata.keys()),
523 set(["tahoe", "ctime", "mtime"])))
524 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
525 d.addCallback(lambda metadata:
526 self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
527 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
528 d.addCallback(lambda metadata:
529 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"]))
530 and (metadata['key'] == "value"), metadata))
532 d.addCallback(lambda res: n.delete(u"e1"))
533 d.addCallback(lambda res: n.delete(u"e2"))
534 d.addCallback(lambda res: n.delete(u"e3"))
536 # metadata through set_nodes()
537 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
542 d.addCallback(lambda res:
543 self.shouldFail(ExistingChildError, "set_nodes-no",
544 "child 'f1' already exists",
549 # and 'new' should not have been created
550 d.addCallback(lambda res: n.list())
551 d.addCallback(lambda children: self.failIf(u"new" in children))
552 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
553 d.addCallback(lambda metadata:
554 self.failUnlessEqual(set(metadata.keys()),
555 set(["tahoe", "ctime", "mtime"])))
556 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
558 lambda metadata: self.failUnlessEqual(set(metadata.keys()), set(['tahoe'])))
559 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
560 d.addCallback(lambda metadata:
561 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
562 (metadata['key'] == "value"), metadata))
564 d.addCallback(lambda res: n.delete(u"f1"))
565 d.addCallback(lambda res: n.delete(u"f2"))
566 d.addCallback(lambda res: n.delete(u"f3"))
569 d.addCallback(lambda res:
570 n.set_metadata_for(u"child",
571 {"tags": ["web2.0-compatible"]}))
572 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
573 d.addCallback(lambda metadata:
574 self.failUnlessEqual(metadata,
575 {"tags": ["web2.0-compatible"]}))
578 self._start_timestamp = time.time()
579 d.addCallback(_start)
580 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
581 # floats to hundredeths (it uses str(num) instead of repr(num)).
582 # simplejson-1.7.3 does not have this bug. To prevent this bug
583 # from causing the test to fail, stall for more than a few
584 # hundrededths of a second.
585 d.addCallback(self.stall, 0.1)
586 d.addCallback(lambda res: n.add_file(u"timestamps",
587 upload.Data("stamp me", convergence="some convergence string")))
588 d.addCallback(self.stall, 0.1)
590 self._stop_timestamp = time.time()
593 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
594 def _check_timestamp1(metadata):
595 self.failUnless("ctime" in metadata)
596 self.failUnless("mtime" in metadata)
597 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
598 self._start_timestamp)
599 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
601 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
602 self._start_timestamp)
603 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
605 # Our current timestamp rules say that replacing an existing
606 # child should preserve the 'ctime' but update the mtime
607 self._old_ctime = metadata["ctime"]
608 self._old_mtime = metadata["mtime"]
609 d.addCallback(_check_timestamp1)
610 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
611 d.addCallback(lambda res: n.set_node(u"timestamps", n))
612 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
613 def _check_timestamp2(metadata):
614 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
615 "%s != %s" % (metadata["ctime"],
617 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
618 return n.delete(u"timestamps")
619 d.addCallback(_check_timestamp2)
621 # also make sure we can add/update timestamps on a
622 # previously-existing child that didn't have any, since there are
623 # a lot of 0.7.0-generated edges around out there
624 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
625 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
626 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
627 d.addCallback(lambda metadata:
628 self.failUnlessEqual(set(metadata.keys()),
629 set(["tahoe", "ctime", "mtime"])))
630 d.addCallback(lambda res: n.delete(u"no_timestamps"))
632 d.addCallback(lambda res: n.delete(u"subdir"))
633 d.addCallback(lambda old_child:
634 self.failUnlessEqual(old_child.get_uri(),
635 self.subdir.get_uri()))
637 d.addCallback(lambda res: n.list())
638 d.addCallback(lambda children:
639 self.failUnlessEqual(sorted(children.keys()),
642 uploadable = upload.Data("some data", convergence="some convergence string")
643 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
644 d.addCallback(lambda newnode:
645 self.failUnless(IFileNode.providedBy(newnode)))
646 other_uploadable = upload.Data("some data", convergence="stuff")
647 d.addCallback(lambda res:
648 self.shouldFail(ExistingChildError, "add_file-no",
649 "child 'newfile' already exists",
650 n.add_file, u"newfile",
653 d.addCallback(lambda res: n.list())
654 d.addCallback(lambda children:
655 self.failUnlessEqual(sorted(children.keys()),
656 sorted([u"child", u"newfile"])))
657 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
658 d.addCallback(lambda metadata:
659 self.failUnlessEqual(set(metadata.keys()),
660 set(["tahoe", "ctime", "mtime"])))
662 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
665 d.addCallback(lambda newnode:
666 self.failUnless(IFileNode.providedBy(newnode)))
667 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
668 d.addCallback(lambda metadata:
669 self.failUnless((set(metadata.keys()) == set(["key", "tahoe"])) and
670 (metadata['key'] == "value"), metadata))
671 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
673 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
674 def _created2(subdir2):
675 self.subdir2 = subdir2
676 # put something in the way, to make sure it gets overwritten
677 return subdir2.add_file(u"child", upload.Data("overwrite me",
679 d.addCallback(_created2)
681 d.addCallback(lambda res:
682 n.move_child_to(u"child", self.subdir2))
683 d.addCallback(lambda res: n.list())
684 d.addCallback(lambda children:
685 self.failUnlessEqual(sorted(children.keys()),
686 sorted([u"newfile", u"subdir2"])))
687 d.addCallback(lambda res: self.subdir2.list())
688 d.addCallback(lambda children:
689 self.failUnlessEqual(sorted(children.keys()),
691 d.addCallback(lambda res: self.subdir2.get(u"child"))
692 d.addCallback(lambda child:
693 self.failUnlessEqual(child.get_uri(),
694 fake_file_uri.to_string()))
696 # move it back, using new_child_name=
697 d.addCallback(lambda res:
698 self.subdir2.move_child_to(u"child", n, u"newchild"))
699 d.addCallback(lambda res: n.list())
700 d.addCallback(lambda children:
701 self.failUnlessEqual(sorted(children.keys()),
702 sorted([u"newchild", u"newfile",
704 d.addCallback(lambda res: self.subdir2.list())
705 d.addCallback(lambda children:
706 self.failUnlessEqual(sorted(children.keys()), []))
708 # now make sure that we honor overwrite=False
709 d.addCallback(lambda res:
710 self.subdir2.set_uri(u"newchild", other_file_uri.to_string()))
712 d.addCallback(lambda res:
713 self.shouldFail(ExistingChildError, "move_child_to-no",
714 "child 'newchild' already exists",
715 n.move_child_to, u"newchild",
718 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
719 d.addCallback(lambda child:
720 self.failUnlessEqual(child.get_uri(),
721 other_file_uri.to_string()))
727 d.addErrback(self.explain_error)
730 def test_unpack_and_pack_behavior(self):
731 known_tree = b32decode(self.known_tree)
732 d = self.client.create_empty_dirnode()
734 def _check_tree(node):
735 def check_children(children):
736 # Are all the expected child nodes there?
737 self.failUnless(children.has_key(u'file1'))
738 self.failUnless(children.has_key(u'file2'))
739 self.failUnless(children.has_key(u'file3'))
741 # Are the metadata for child 3 right?
742 file3_rocap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
743 file3_rwcap = "URI:CHK:cmtcxq7hwxvfxan34yiev6ivhy:qvcekmjtoetdcw4kmi7b3rtblvgx7544crnwaqtiewemdliqsokq:3:10:5"
744 file3_metadata = {'ctime': 1246663897.4336269, 'tahoe': {'linkmotime': 1246663897.4336269, 'linkcrtime': 1246663897.4336269}, 'mtime': 1246663897.4336269}
745 self.failUnlessEqual(file3_metadata, children[u'file3'][1])
746 self.failUnlessEqual(file3_rocap,
747 children[u'file3'][0].get_readonly_uri())
748 self.failUnlessEqual(file3_rwcap,
749 children[u'file3'][0].get_uri())
751 # Are the metadata for child 2 right?
752 file2_rocap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
753 file2_rwcap = "URI:CHK:apegrpehshwugkbh3jlt5ei6hq:5oougnemcl5xgx4ijgiumtdojlipibctjkbwvyygdymdphib2fvq:3:10:4"
754 file2_metadata = {'ctime': 1246663897.430218, 'tahoe': {'linkmotime': 1246663897.430218, 'linkcrtime': 1246663897.430218}, 'mtime': 1246663897.430218}
755 self.failUnlessEqual(file2_metadata, children[u'file2'][1])
756 self.failUnlessEqual(file2_rocap,
757 children[u'file2'][0].get_readonly_uri())
758 self.failUnlessEqual(file2_rwcap,
759 children[u'file2'][0].get_uri())
761 # Are the metadata for child 1 right?
762 file1_rocap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
763 file1_rwcap = "URI:CHK:olxtimympo7f27jvhtgqlnbtn4:emzdnhk2um4seixozlkw3qx2nfijvdkx3ky7i7izl47yedl6e64a:3:10:10"
764 file1_metadata = {'ctime': 1246663897.4275661, 'tahoe': {'linkmotime': 1246663897.4275661, 'linkcrtime': 1246663897.4275661}, 'mtime': 1246663897.4275661}
765 self.failUnlessEqual(file1_metadata, children[u'file1'][1])
766 self.failUnlessEqual(file1_rocap,
767 children[u'file1'][0].get_readonly_uri())
768 self.failUnlessEqual(file1_rwcap,
769 children[u'file1'][0].get_uri())
771 children = node._unpack_contents(known_tree)
773 check_children(children)
775 packed_children = node._pack_contents(children)
777 children = node._unpack_contents(packed_children)
779 check_children(children)
781 d.addCallback(_check_tree)
784 def test_caching_dict(self):
785 d = dirnode.CachingDict()
786 d.set_both_items("test", "test2", ("test3", "test4"))
787 cached, value = d.get_both_items("test")
789 self.failUnlessEqual(cached, "test2")
790 self.failUnlessEqual(value, ("test3", "test4"))
792 d['test'] = ("test3", "test2")
794 cached, value = d.get_both_items("test")
796 self.failUnlessEqual(cached, None)
797 self.failUnlessEqual(value, ("test3", "test2"))
799 class FakeMutableFile:
801 def __init__(self, initial_contents=""):
802 self.data = initial_contents
803 counter = FakeMutableFile.counter
804 FakeMutableFile.counter += 1
805 writekey = hashutil.ssk_writekey_hash(str(counter))
806 fingerprint = hashutil.ssk_pubkey_fingerprint_hash(str(counter))
807 self.uri = uri.WriteableSSKFileURI(writekey, fingerprint)
809 return self.uri.to_string()
810 def download_best_version(self):
811 return defer.succeed(self.data)
812 def get_writekey(self):
814 def is_readonly(self):
816 def is_mutable(self):
818 def modify(self, modifier):
819 self.data = modifier(self.data, None, True)
820 return defer.succeed(None)
822 class FakeClient2(Client):
825 def create_mutable_file(self, initial_contents=""):
826 return defer.succeed(FakeMutableFile(initial_contents))
828 class Dirnode2(unittest.TestCase, testutil.ShouldFailMixin):
830 self.client = FakeClient2()
832 def test_from_future(self):
833 # create a dirnode that contains unknown URI types, and make sure we
834 # tolerate them properly. Since dirnodes aren't allowed to add
835 # unknown node types, we have to be tricky.
836 d = self.client.create_empty_dirnode()
837 future_writecap = "x-tahoe-crazy://I_am_from_the_future."
838 future_readcap = "x-tahoe-crazy-readonly://I_am_from_the_future."
839 future_node = UnknownNode(future_writecap, future_readcap)
842 return n.set_node(u"future", future_node)
845 # we should be prohibited from adding an unknown URI to a directory,
846 # since we don't know how to diminish the cap to a readcap (for the
847 # dirnode's rocap slot), and we don't want to accidentally grant
848 # write access to a holder of the dirnode's readcap.
849 d.addCallback(lambda ign:
850 self.shouldFail(CannotPackUnknownNodeError,
852 "cannot pack unknown node as child add",
853 self._node.set_uri, u"add", future_writecap))
854 d.addCallback(lambda ign: self._node.list())
855 def _check(children):
856 self.failUnlessEqual(len(children), 1)
857 (fn, metadata) = children[u"future"]
858 self.failUnless(isinstance(fn, UnknownNode), fn)
859 self.failUnlessEqual(fn.get_uri(), future_writecap)
860 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
861 # but we *should* be allowed to copy this node, because the
862 # UnknownNode contains all the information that was in the
863 # original directory (readcap and writecap), so we're preserving
865 return self._node.set_node(u"copy", fn)
866 d.addCallback(_check)
867 d.addCallback(lambda ign: self._node.list())
868 def _check2(children):
869 self.failUnlessEqual(len(children), 2)
870 (fn, metadata) = children[u"copy"]
871 self.failUnless(isinstance(fn, UnknownNode), fn)
872 self.failUnlessEqual(fn.get_uri(), future_writecap)
873 self.failUnlessEqual(fn.get_readonly_uri(), future_readcap)
876 class DeepStats(unittest.TestCase):
877 timeout = 240 # It takes longer than 120 seconds on Francois's arm box.
878 def test_stats(self):
879 ds = dirnode.DeepStats(None)
880 ds.add("count-files")
881 ds.add("size-immutable-files", 123)
882 ds.histogram("size-files-histogram", 123)
883 ds.max("largest-directory", 444)
886 self.failUnlessEqual(s["count-files"], 1)
887 self.failUnlessEqual(s["size-immutable-files"], 123)
888 self.failUnlessEqual(s["largest-directory"], 444)
889 self.failUnlessEqual(s["count-literal-files"], 0)
891 ds.add("count-files")
892 ds.add("size-immutable-files", 321)
893 ds.histogram("size-files-histogram", 321)
894 ds.max("largest-directory", 2)
897 self.failUnlessEqual(s["count-files"], 2)
898 self.failUnlessEqual(s["size-immutable-files"], 444)
899 self.failUnlessEqual(s["largest-directory"], 444)
900 self.failUnlessEqual(s["count-literal-files"], 0)
901 self.failUnlessEqual(s["size-files-histogram"],
902 [ (101, 316, 1), (317, 1000, 1) ])
904 ds = dirnode.DeepStats(None)
905 for i in range(1, 1100):
906 ds.histogram("size-files-histogram", i)
907 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
909 self.failUnlessEqual(s["size-files-histogram"],
917 (3162277660169L, 10000000000000L, 1),
920 class UCWEingMutableFileNode(MutableFileNode):
921 please_ucwe_after_next_upload = False
923 def _upload(self, new_contents, servermap):
924 d = MutableFileNode._upload(self, new_contents, servermap)
926 if self.please_ucwe_after_next_upload:
927 self.please_ucwe_after_next_upload = False
928 raise UncoordinatedWriteError()
932 class UCWEingDirectoryNode(dirnode.DirectoryNode):
933 filenode_class = UCWEingMutableFileNode
936 class Deleter(GridTestMixin, unittest.TestCase):
937 timeout = 3600 # It takes longer than 433 seconds on Zandr's ARM box.
938 def test_retry(self):
939 # ticket #550, a dirnode.delete which experiences an
940 # UncoordinatedWriteError will fail with an incorrect "you're
941 # deleting something which isn't there" NoSuchChildError exception.
943 # to trigger this, we start by creating a directory with a single
944 # file in it. Then we create a special dirnode that uses a modified
945 # MutableFileNode which will raise UncoordinatedWriteError once on
946 # demand. We then call dirnode.delete, which ought to retry and
949 self.basedir = self.mktemp()
951 c0 = self.g.clients[0]
952 d = c0.create_empty_dirnode()
953 small = upload.Data("Small enough for a LIT", None)
954 def _created_dir(dn):
956 self.root_uri = dn.get_uri()
957 return dn.add_file(u"file", small)
958 d.addCallback(_created_dir)
959 def _do_delete(ignored):
960 n = UCWEingDirectoryNode(c0).init_from_uri(self.root_uri)
961 assert n._node.please_ucwe_after_next_upload == False
962 n._node.please_ucwe_after_next_upload = True
963 # This should succeed, not raise an exception
964 return n.delete(u"file")
965 d.addCallback(_do_delete)
969 class Adder(unittest.TestCase,
970 testutil.ShouldFailMixin, testutil.StallMixin, ErrorMixin):
973 self.client = FakeClient()
975 def test_overwrite(self):
976 fileuri = make_chk_file_uri(1234)
977 filenode = self.client.create_node_from_uri(fileuri)
978 d = self.client.create_empty_dirnode()
980 def _create_directory_tree(root_node):
985 d = root_node.add_file(u'file1', upload.Data("Important Things",
987 d.addCallback(lambda res:
988 root_node.add_file(u'file2', upload.Data("Sekrit Codes", None)))
989 d.addCallback(lambda res:
990 root_node.create_empty_directory(u"dir1"))
991 d.addCallback(lambda res: root_node)
994 d.addCallback(_create_directory_tree)
996 def _test_adder(root_node):
997 d = root_node.set_node(u'file1', filenode)
998 # We've overwritten file1. Let's try it with a directory
999 d.addCallback(lambda res:
1000 root_node.create_empty_directory(u'dir2'))
1001 d.addCallback(lambda res:
1002 root_node.set_node(u'dir2', filenode))
1003 # We try overwriting a file with a child while also specifying
1004 # overwrite=False. We should receive an ExistingChildError
1006 d.addCallback(lambda res:
1007 self.shouldFail(ExistingChildError, "set_node",
1008 "child 'file1' already exists",
1009 root_node.set_node, u"file1",
1010 filenode, overwrite=False))
1011 # If we try with a directory, we should see the same thing
1012 d.addCallback(lambda res:
1013 self.shouldFail(ExistingChildError, "set_node",
1014 "child 'dir1' already exists",
1015 root_node.set_node, u'dir1', filenode,
1017 d.addCallback(lambda res:
1018 root_node.set_node(u'file1', filenode,
1019 overwrite="only-files"))
1020 d.addCallback(lambda res:
1021 self.shouldFail(ExistingChildError, "set_node",
1022 "child 'dir1' already exists",
1023 root_node.set_node, u'dir1', filenode,
1024 overwrite="only-files"))
1027 d.addCallback(_test_adder)