3 from zope.interface import implements
4 from twisted.trial import unittest
5 from allmydata import uri, dirnode
6 from allmydata.immutable import upload
7 from allmydata.interfaces import IURI, IClient, IMutableFileNode, \
8 INewDirectoryURI, IReadonlyNewDirectoryURI, IFileNode, ExistingChildError
9 from allmydata.util import hashutil, testutil
10 from allmydata.test.common import make_chk_file_uri, make_mutable_file_uri, \
11 FakeDirectoryNode, create_chk_filenode
13 # to test dirnode.py, we want to construct a tree of real DirectoryNodes that
14 # contain pointers to fake files. We start with a fake MutableFileNode that
15 # stores all of its data in a static table.
18 implements(IFileNode, IMutableFileNode) # sure, why not
19 def __init__(self, nodeuri):
20 if not isinstance(nodeuri, str):
21 nodeuri = nodeuri.to_string()
22 self.nodeuri = nodeuri
23 si = hashutil.tagged_hash("tag1", nodeuri)[:16]
24 fp = hashutil.tagged_hash("tag2", nodeuri)
25 self.verifieruri = uri.SSKVerifierURI(storage_index=si,
26 fingerprint=fp).to_string()
29 def get_readonly_uri(self):
31 def get_verifier(self):
32 return self.verifieruri
34 # dirnode requires three methods from the client: upload(),
35 # create_node_from_uri(), and create_empty_dirnode(). Of these, upload() is
36 # only used by the convenience composite method add_file().
41 def upload(self, uploadable):
42 d = uploadable.get_size()
43 d.addCallback(lambda size: uploadable.read(size))
46 n = create_chk_filenode(self, data)
47 results = upload.UploadResults()
48 results.uri = n.get_uri()
50 d.addCallback(_got_data)
53 def create_node_from_uri(self, u):
55 if (INewDirectoryURI.providedBy(u)
56 or IReadonlyNewDirectoryURI.providedBy(u)):
57 return FakeDirectoryNode(self).init_from_uri(u)
58 return Marker(u.to_string())
60 def create_empty_dirnode(self):
61 n = FakeDirectoryNode(self)
63 d.addCallback(lambda res: n)
67 class Dirnode(unittest.TestCase, testutil.ShouldFailMixin, testutil.StallMixin):
69 self.client = FakeClient()
72 d = self.client.create_empty_dirnode()
74 self.failUnless(isinstance(res, FakeDirectoryNode))
76 self.failUnless("RW" in rep)
80 def test_corrupt(self):
81 d = self.client.create_empty_dirnode()
83 u = make_mutable_file_uri()
84 d = dn.set_uri(u"child", u, {})
85 d.addCallback(lambda res: dn.list())
86 def _check1(children):
87 self.failUnless(u"child" in children)
88 d.addCallback(_check1)
89 d.addCallback(lambda res:
90 self.shouldFail(KeyError, "get bogus", None,
94 si = IURI(filenode.get_uri()).storage_index
95 old_contents = filenode.all_contents[si]
96 # we happen to know that the writecap is encrypted near the
97 # end of the string. Flip one of its bits and make sure we
98 # detect the corruption.
99 new_contents = testutil.flip_bit(old_contents, -10)
100 # TODO: also test flipping bits in the other portions
101 filenode.all_contents[si] = new_contents
102 d.addCallback(_corrupt)
104 self.shouldFail(hashutil.IntegrityCheckError, "corrupt",
105 "HMAC does not match, crypttext is corrupted",
107 d.addCallback(_check2)
109 d.addCallback(_created)
112 def test_check(self):
113 d = self.client.create_empty_dirnode()
114 d.addCallback(lambda dn: dn.check())
116 self.failUnless(res.is_healthy())
120 def test_readonly(self):
121 fileuri = make_chk_file_uri(1234)
122 filenode = self.client.create_node_from_uri(fileuri)
123 uploadable = upload.Data("some data", convergence="some convergence string")
125 d = self.client.create_empty_dirnode()
127 d2 = rw_dn.set_uri(u"child", fileuri)
128 d2.addCallback(lambda res: rw_dn)
130 d.addCallback(_created)
133 ro_uri = rw_dn.get_readonly_uri()
134 ro_dn = self.client.create_node_from_uri(ro_uri)
135 self.failUnless(ro_dn.is_readonly())
136 self.failUnless(ro_dn.is_mutable())
138 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
139 ro_dn.set_uri, u"newchild", fileuri)
140 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
141 ro_dn.set_node, u"newchild", filenode)
142 self.shouldFail(dirnode.NotMutableError, "set_nodes ro", None,
143 ro_dn.set_nodes, [ (u"newchild", filenode) ])
144 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
145 ro_dn.add_file, u"newchild", uploadable)
146 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
147 ro_dn.delete, u"child")
148 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
149 ro_dn.create_empty_directory, u"newchild")
150 self.shouldFail(dirnode.NotMutableError, "set_metadata_for ro", None,
151 ro_dn.set_metadata_for, u"child", {})
152 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
153 ro_dn.move_child_to, u"child", rw_dn)
154 self.shouldFail(dirnode.NotMutableError, "set_uri ro", None,
155 rw_dn.move_child_to, u"child", ro_dn)
157 d.addCallback(_ready)
158 def _listed(children):
159 self.failUnless(u"child" in children)
160 d.addCallback(_listed)
163 def failUnlessGreaterThan(self, a, b):
164 self.failUnless(a > b, "%r should be > %r" % (a, b))
166 def failUnlessGreaterOrEqualThan(self, a, b):
167 self.failUnless(a >= b, "%r should be >= %r" % (a, b))
169 def test_create(self):
170 self.expected_manifest = []
172 d = self.client.create_empty_dirnode()
175 self.failUnless(n.is_mutable())
178 self.failUnless(u.startswith("URI:DIR2:"), u)
179 u_ro = n.get_readonly_uri()
180 self.failUnless(u_ro.startswith("URI:DIR2-RO:"), u_ro)
181 u_v = n.get_verifier()
182 self.failUnless(u_v.startswith("URI:DIR2-Verifier:"), u_v)
183 self.expected_manifest.append(u_v)
186 d.addCallback(lambda res: self.failUnlessEqual(res, {}))
187 d.addCallback(lambda res: n.has_child(u"missing"))
188 d.addCallback(lambda res: self.failIf(res))
189 fake_file_uri = make_mutable_file_uri()
190 other_file_uri = make_mutable_file_uri()
191 m = Marker(fake_file_uri)
192 ffu_v = m.get_verifier()
193 assert isinstance(ffu_v, str)
194 self.expected_manifest.append(ffu_v)
195 d.addCallback(lambda res: n.set_uri(u"child", fake_file_uri))
196 d.addCallback(lambda res:
197 self.shouldFail(ExistingChildError, "set_uri-no",
198 "child 'child' already exists",
199 n.set_uri, u"child", other_file_uri,
204 d.addCallback(lambda res: n.create_empty_directory(u"subdir"))
208 # /subdir = directory
209 def _created(subdir):
210 self.failUnless(isinstance(subdir, FakeDirectoryNode))
212 new_v = subdir.get_verifier()
213 assert isinstance(new_v, str)
214 self.expected_manifest.append(new_v)
215 d.addCallback(_created)
217 d.addCallback(lambda res:
218 self.shouldFail(ExistingChildError, "mkdir-no",
219 "child 'subdir' already exists",
220 n.create_empty_directory, u"subdir",
223 d.addCallback(lambda res: n.list())
224 d.addCallback(lambda children:
225 self.failUnlessEqual(sorted(children.keys()),
226 sorted([u"child", u"subdir"])))
228 d.addCallback(lambda res: n.build_manifest())
229 def _check_manifest(manifest):
230 self.failUnlessEqual(sorted(manifest),
231 sorted(self.expected_manifest))
232 d.addCallback(_check_manifest)
234 d.addCallback(lambda res: n.deep_stats())
235 def _check_deepstats(stats):
236 self.failUnless(isinstance(stats, dict))
237 expected = {"count-immutable-files": 0,
238 "count-mutable-files": 1,
239 "count-literal-files": 0,
241 "count-directories": 2,
242 "size-immutable-files": 0,
243 "size-literal-files": 0,
244 #"size-directories": 616, # varies
245 #"largest-directory": 616,
246 "largest-directory-children": 2,
247 "largest-immutable-file": 0,
249 for k,v in expected.iteritems():
250 self.failUnlessEqual(stats[k], v,
251 "stats[%s] was %s, not %s" %
253 self.failUnless(stats["size-directories"] > 500,
254 stats["size-directories"])
255 self.failUnless(stats["largest-directory"] > 500,
256 stats["largest-directory"])
257 self.failUnlessEqual(stats["size-files-histogram"], [])
258 d.addCallback(_check_deepstats)
260 def _add_subsubdir(res):
261 return self.subdir.create_empty_directory(u"subsubdir")
262 d.addCallback(_add_subsubdir)
263 d.addCallback(lambda res: n.get_child_at_path(u"subdir/subsubdir"))
264 d.addCallback(lambda subsubdir:
265 self.failUnless(isinstance(subsubdir,
267 d.addCallback(lambda res: n.get_child_at_path(u""))
268 d.addCallback(lambda res: self.failUnlessEqual(res.get_uri(),
271 d.addCallback(lambda res: n.get_metadata_for(u"child"))
272 d.addCallback(lambda metadata:
273 self.failUnlessEqual(sorted(metadata.keys()),
277 # it should be possible to add a child without any metadata
278 d.addCallback(lambda res: n.set_uri(u"c2", fake_file_uri, {}))
279 d.addCallback(lambda res: n.get_metadata_for(u"c2"))
280 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
282 # if we don't set any defaults, the child should get timestamps
283 d.addCallback(lambda res: n.set_uri(u"c3", fake_file_uri))
284 d.addCallback(lambda res: n.get_metadata_for(u"c3"))
285 d.addCallback(lambda metadata:
286 self.failUnlessEqual(sorted(metadata.keys()),
289 # or we can add specific metadata at set_uri() time, which
290 # overrides the timestamps
291 d.addCallback(lambda res: n.set_uri(u"c4", fake_file_uri,
293 d.addCallback(lambda res: n.get_metadata_for(u"c4"))
294 d.addCallback(lambda metadata:
295 self.failUnlessEqual(metadata, {"key": "value"}))
297 d.addCallback(lambda res: n.delete(u"c2"))
298 d.addCallback(lambda res: n.delete(u"c3"))
299 d.addCallback(lambda res: n.delete(u"c4"))
301 # set_node + metadata
302 # it should be possible to add a child without any metadata
303 d.addCallback(lambda res: n.set_node(u"d2", n, {}))
304 d.addCallback(lambda res: self.client.create_empty_dirnode())
305 d.addCallback(lambda n2:
306 self.shouldFail(ExistingChildError, "set_node-no",
307 "child 'd2' already exists",
308 n.set_node, u"d2", n2,
310 d.addCallback(lambda res: n.get_metadata_for(u"d2"))
311 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
313 # if we don't set any defaults, the child should get timestamps
314 d.addCallback(lambda res: n.set_node(u"d3", n))
315 d.addCallback(lambda res: n.get_metadata_for(u"d3"))
316 d.addCallback(lambda metadata:
317 self.failUnlessEqual(sorted(metadata.keys()),
320 # or we can add specific metadata at set_node() time, which
321 # overrides the timestamps
322 d.addCallback(lambda res: n.set_node(u"d4", n,
324 d.addCallback(lambda res: n.get_metadata_for(u"d4"))
325 d.addCallback(lambda metadata:
326 self.failUnlessEqual(metadata, {"key": "value"}))
328 d.addCallback(lambda res: n.delete(u"d2"))
329 d.addCallback(lambda res: n.delete(u"d3"))
330 d.addCallback(lambda res: n.delete(u"d4"))
332 # metadata through set_children()
333 d.addCallback(lambda res: n.set_children([ (u"e1", fake_file_uri),
334 (u"e2", fake_file_uri, {}),
335 (u"e3", fake_file_uri,
338 d.addCallback(lambda res:
339 self.shouldFail(ExistingChildError, "set_children-no",
340 "child 'e1' already exists",
342 [ (u"e1", other_file_uri),
343 (u"new", other_file_uri), ],
345 # and 'new' should not have been created
346 d.addCallback(lambda res: n.list())
347 d.addCallback(lambda children: self.failIf(u"new" in children))
348 d.addCallback(lambda res: n.get_metadata_for(u"e1"))
349 d.addCallback(lambda metadata:
350 self.failUnlessEqual(sorted(metadata.keys()),
352 d.addCallback(lambda res: n.get_metadata_for(u"e2"))
353 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
354 d.addCallback(lambda res: n.get_metadata_for(u"e3"))
355 d.addCallback(lambda metadata:
356 self.failUnlessEqual(metadata, {"key": "value"}))
358 d.addCallback(lambda res: n.delete(u"e1"))
359 d.addCallback(lambda res: n.delete(u"e2"))
360 d.addCallback(lambda res: n.delete(u"e3"))
362 # metadata through set_nodes()
363 d.addCallback(lambda res: n.set_nodes([ (u"f1", n),
368 d.addCallback(lambda res:
369 self.shouldFail(ExistingChildError, "set_nodes-no",
370 "child 'f1' already exists",
375 # and 'new' should not have been created
376 d.addCallback(lambda res: n.list())
377 d.addCallback(lambda children: self.failIf(u"new" in children))
378 d.addCallback(lambda res: n.get_metadata_for(u"f1"))
379 d.addCallback(lambda metadata:
380 self.failUnlessEqual(sorted(metadata.keys()),
382 d.addCallback(lambda res: n.get_metadata_for(u"f2"))
383 d.addCallback(lambda metadata: self.failUnlessEqual(metadata, {}))
384 d.addCallback(lambda res: n.get_metadata_for(u"f3"))
385 d.addCallback(lambda metadata:
386 self.failUnlessEqual(metadata, {"key": "value"}))
388 d.addCallback(lambda res: n.delete(u"f1"))
389 d.addCallback(lambda res: n.delete(u"f2"))
390 d.addCallback(lambda res: n.delete(u"f3"))
393 d.addCallback(lambda res:
394 n.set_metadata_for(u"child",
395 {"tags": ["web2.0-compatible"]}))
396 d.addCallback(lambda n1: n1.get_metadata_for(u"child"))
397 d.addCallback(lambda metadata:
398 self.failUnlessEqual(metadata,
399 {"tags": ["web2.0-compatible"]}))
402 self._start_timestamp = time.time()
403 d.addCallback(_start)
404 # simplejson-1.7.1 (as shipped on Ubuntu 'gutsy') rounds all
405 # floats to hundredeths (it uses str(num) instead of repr(num)).
406 # simplejson-1.7.3 does not have this bug. To prevent this bug
407 # from causing the test to fail, stall for more than a few
408 # hundrededths of a second.
409 d.addCallback(self.stall, 0.1)
410 d.addCallback(lambda res: n.add_file(u"timestamps",
411 upload.Data("stamp me", convergence="some convergence string")))
412 d.addCallback(self.stall, 0.1)
414 self._stop_timestamp = time.time()
417 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
418 def _check_timestamp1(metadata):
419 self.failUnless("ctime" in metadata)
420 self.failUnless("mtime" in metadata)
421 self.failUnlessGreaterOrEqualThan(metadata["ctime"],
422 self._start_timestamp)
423 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
425 self.failUnlessGreaterOrEqualThan(metadata["mtime"],
426 self._start_timestamp)
427 self.failUnlessGreaterOrEqualThan(self._stop_timestamp,
429 # Our current timestamp rules say that replacing an existing
430 # child should preserve the 'ctime' but update the mtime
431 self._old_ctime = metadata["ctime"]
432 self._old_mtime = metadata["mtime"]
433 d.addCallback(_check_timestamp1)
434 d.addCallback(self.stall, 2.0) # accomodate low-res timestamps
435 d.addCallback(lambda res: n.set_node(u"timestamps", n))
436 d.addCallback(lambda res: n.get_metadata_for(u"timestamps"))
437 def _check_timestamp2(metadata):
438 self.failUnlessEqual(metadata["ctime"], self._old_ctime,
439 "%s != %s" % (metadata["ctime"],
441 self.failUnlessGreaterThan(metadata["mtime"], self._old_mtime)
442 return n.delete(u"timestamps")
443 d.addCallback(_check_timestamp2)
445 # also make sure we can add/update timestamps on a
446 # previously-existing child that didn't have any, since there are
447 # a lot of 0.7.0-generated edges around out there
448 d.addCallback(lambda res: n.set_node(u"no_timestamps", n, {}))
449 d.addCallback(lambda res: n.set_node(u"no_timestamps", n))
450 d.addCallback(lambda res: n.get_metadata_for(u"no_timestamps"))
451 d.addCallback(lambda metadata:
452 self.failUnlessEqual(sorted(metadata.keys()),
454 d.addCallback(lambda res: n.delete(u"no_timestamps"))
456 d.addCallback(lambda res: n.delete(u"subdir"))
457 d.addCallback(lambda old_child:
458 self.failUnlessEqual(old_child.get_uri(),
459 self.subdir.get_uri()))
461 d.addCallback(lambda res: n.list())
462 d.addCallback(lambda children:
463 self.failUnlessEqual(sorted(children.keys()),
466 uploadable = upload.Data("some data", convergence="some convergence string")
467 d.addCallback(lambda res: n.add_file(u"newfile", uploadable))
468 d.addCallback(lambda newnode:
469 self.failUnless(IFileNode.providedBy(newnode)))
470 other_uploadable = upload.Data("some data", convergence="stuff")
471 d.addCallback(lambda res:
472 self.shouldFail(ExistingChildError, "add_file-no",
473 "child 'newfile' already exists",
474 n.add_file, u"newfile",
477 d.addCallback(lambda res: n.list())
478 d.addCallback(lambda children:
479 self.failUnlessEqual(sorted(children.keys()),
480 sorted([u"child", u"newfile"])))
481 d.addCallback(lambda res: n.get_metadata_for(u"newfile"))
482 d.addCallback(lambda metadata:
483 self.failUnlessEqual(sorted(metadata.keys()),
486 d.addCallback(lambda res: n.add_file(u"newfile-metadata",
489 d.addCallback(lambda newnode:
490 self.failUnless(IFileNode.providedBy(newnode)))
491 d.addCallback(lambda res: n.get_metadata_for(u"newfile-metadata"))
492 d.addCallback(lambda metadata:
493 self.failUnlessEqual(metadata, {"key": "value"}))
494 d.addCallback(lambda res: n.delete(u"newfile-metadata"))
496 d.addCallback(lambda res: n.create_empty_directory(u"subdir2"))
497 def _created2(subdir2):
498 self.subdir2 = subdir2
499 # put something in the way, to make sure it gets overwritten
500 return subdir2.add_file(u"child", upload.Data("overwrite me",
502 d.addCallback(_created2)
504 d.addCallback(lambda res:
505 n.move_child_to(u"child", self.subdir2))
506 d.addCallback(lambda res: n.list())
507 d.addCallback(lambda children:
508 self.failUnlessEqual(sorted(children.keys()),
509 sorted([u"newfile", u"subdir2"])))
510 d.addCallback(lambda res: self.subdir2.list())
511 d.addCallback(lambda children:
512 self.failUnlessEqual(sorted(children.keys()),
514 d.addCallback(lambda res: self.subdir2.get(u"child"))
515 d.addCallback(lambda child:
516 self.failUnlessEqual(child.get_uri(),
517 fake_file_uri.to_string()))
519 # move it back, using new_child_name=
520 d.addCallback(lambda res:
521 self.subdir2.move_child_to(u"child", n, u"newchild"))
522 d.addCallback(lambda res: n.list())
523 d.addCallback(lambda children:
524 self.failUnlessEqual(sorted(children.keys()),
525 sorted([u"newchild", u"newfile",
527 d.addCallback(lambda res: self.subdir2.list())
528 d.addCallback(lambda children:
529 self.failUnlessEqual(sorted(children.keys()), []))
531 # now make sure that we honor overwrite=False
532 d.addCallback(lambda res:
533 self.subdir2.set_uri(u"newchild", other_file_uri))
535 d.addCallback(lambda res:
536 self.shouldFail(ExistingChildError, "move_child_to-no",
537 "child 'newchild' already exists",
538 n.move_child_to, u"newchild",
541 d.addCallback(lambda res: self.subdir2.get(u"newchild"))
542 d.addCallback(lambda child:
543 self.failUnlessEqual(child.get_uri(),
544 other_file_uri.to_string()))
552 class DeepStats(unittest.TestCase):
553 def test_stats(self):
554 ds = dirnode.DeepStats()
555 ds.add("count-files")
556 ds.add("size-immutable-files", 123)
557 ds.histogram("size-files-histogram", 123)
558 ds.max("largest-directory", 444)
561 self.failUnlessEqual(s["count-files"], 1)
562 self.failUnlessEqual(s["size-immutable-files"], 123)
563 self.failUnlessEqual(s["largest-directory"], 444)
564 self.failUnlessEqual(s["count-literal-files"], 0)
566 ds.add("count-files")
567 ds.add("size-immutable-files", 321)
568 ds.histogram("size-files-histogram", 321)
569 ds.max("largest-directory", 2)
572 self.failUnlessEqual(s["count-files"], 2)
573 self.failUnlessEqual(s["size-immutable-files"], 444)
574 self.failUnlessEqual(s["largest-directory"], 444)
575 self.failUnlessEqual(s["count-literal-files"], 0)
576 self.failUnlessEqual(s["size-files-histogram"],
577 [ (101, 316, 1), (317, 1000, 1) ])
579 ds = dirnode.DeepStats()
580 for i in range(1, 1100):
581 ds.histogram("size-files-histogram", i)
582 ds.histogram("size-files-histogram", 4*1000*1000*1000*1000) # 4TB
584 self.failUnlessEqual(s["size-files-histogram"],
592 (3162277660169L, 10000000000000L, 1),
596 netstring = hashutil.netstring
597 split_netstring = dirnode.split_netstring
599 class Netstring(unittest.TestCase):
600 def test_split(self):
601 a = netstring("hello") + netstring("world")
602 self.failUnlessEqual(split_netstring(a, 2), ("hello", "world"))
603 self.failUnlessEqual(split_netstring(a, 2, False), ("hello", "world"))
604 self.failUnlessEqual(split_netstring(a, 2, True),
605 ("hello", "world", ""))
606 self.failUnlessRaises(ValueError, split_netstring, a, 3)
607 self.failUnlessRaises(ValueError, split_netstring, a+" extra", 2)
608 self.failUnlessRaises(ValueError, split_netstring, a+" extra", 2, False)
610 def test_extra(self):
611 a = netstring("hello")
612 self.failUnlessEqual(split_netstring(a, 1, True), ("hello", ""))
613 b = netstring("hello") + "extra stuff"
614 self.failUnlessEqual(split_netstring(b, 1, True),
615 ("hello", "extra stuff"))
617 def test_nested(self):
618 a = netstring("hello") + netstring("world") + "extra stuff"
619 b = netstring("a") + netstring("is") + netstring(a) + netstring(".")
620 top = split_netstring(b, 4)
621 self.failUnlessEqual(len(top), 4)
622 self.failUnlessEqual(top[0], "a")
623 self.failUnlessEqual(top[1], "is")
624 self.failUnlessEqual(top[2], a)
625 self.failUnlessEqual(top[3], ".")
626 self.failUnlessRaises(ValueError, split_netstring, a, 2)
627 self.failUnlessRaises(ValueError, split_netstring, a, 2, False)
628 bottom = split_netstring(a, 2, True)
629 self.failUnlessEqual(bottom, ("hello", "world", "extra stuff"))