2 import os, re, sys, time, simplejson
3 from cStringIO import StringIO
5 from twisted.trial import unittest
6 from twisted.internet import defer
7 from twisted.internet import threads # CLI tests use deferToThread
10 from allmydata import uri
11 from allmydata.storage.mutable import MutableShareFile
12 from allmydata.storage.server import si_a2b
13 from allmydata.immutable import offloaded, upload
14 from allmydata.immutable.literal import LiteralFileNode
15 from allmydata.immutable.filenode import ImmutableFileNode
16 from allmydata.util import idlib, mathutil
17 from allmydata.util import log, base32
18 from allmydata.util.verlib import NormalizedVersion
19 from allmydata.util.encodingutil import quote_output, unicode_to_argv, get_filesystem_encoding
20 from allmydata.util.fileutil import abspath_expanduser_unicode
21 from allmydata.util.consumer import MemoryConsumer, download_to_data
22 from allmydata.scripts import runner
23 from allmydata.interfaces import IDirectoryNode, IFileNode, \
24 NoSuchChildError, NoSharesError
25 from allmydata.monitor import Monitor
26 from allmydata.mutable.common import NotWriteableError
27 from allmydata.mutable import layout as mutable_layout
28 from allmydata.mutable.publish import MutableData
31 from foolscap.api import DeadReferenceError, fireEventually
32 from twisted.python.failure import Failure
33 from twisted.web.client import getPage
34 from twisted.web.error import Error
36 from allmydata.test.common import SystemTestMixin
38 # TODO: move this to common or common_util
39 from allmydata.test.test_runner import RunBinTahoeMixin
42 This is some data to publish to the remote grid.., which needs to be large
43 enough to not fit inside a LIT uri.
46 class CountingDataUploadable(upload.Data):
48 interrupt_after = None
49 interrupt_after_d = None
51 def read(self, length):
52 self.bytes_read += length
53 if self.interrupt_after is not None:
54 if self.bytes_read > self.interrupt_after:
55 self.interrupt_after = None
56 self.interrupt_after_d.callback(self)
57 return upload.Data.read(self, length)
59 class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
60 timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box.
62 def test_connections(self):
63 self.basedir = "system/SystemTest/test_connections"
64 d = self.set_up_nodes()
65 self.extra_node = None
66 d.addCallback(lambda res: self.add_extra_node(self.numclients))
67 def _check(extra_node):
68 self.extra_node = extra_node
69 for c in self.clients:
70 all_peerids = c.get_storage_broker().get_all_serverids()
71 self.failUnlessEqual(len(all_peerids), self.numclients+1)
73 permuted_peers = sb.get_servers_for_psi("a")
74 self.failUnlessEqual(len(permuted_peers), self.numclients+1)
77 def _shutdown_extra_node(res):
79 return self.extra_node.stopService()
81 d.addBoth(_shutdown_extra_node)
83 # test_connections is subsumed by test_upload_and_download, and takes
84 # quite a while to run on a slow machine (because of all the TLS
85 # connections that must be established). If we ever rework the introducer
86 # code to such an extent that we're not sure if it works anymore, we can
87 # reinstate this test until it does.
90 def test_upload_and_download_random_key(self):
91 self.basedir = "system/SystemTest/test_upload_and_download_random_key"
92 return self._test_upload_and_download(convergence=None)
94 def test_upload_and_download_convergent(self):
95 self.basedir = "system/SystemTest/test_upload_and_download_convergent"
96 return self._test_upload_and_download(convergence="some convergence string")
98 def _test_upload_and_download(self, convergence):
99 # we use 4000 bytes of data, which will result in about 400k written
100 # to disk among all our simulated nodes
101 DATA = "Some data to upload\n" * 200
102 d = self.set_up_nodes()
103 def _check_connections(res):
104 for c in self.clients:
105 c.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
106 all_peerids = c.get_storage_broker().get_all_serverids()
107 self.failUnlessEqual(len(all_peerids), self.numclients)
108 sb = c.storage_broker
109 permuted_peers = sb.get_servers_for_psi("a")
110 self.failUnlessEqual(len(permuted_peers), self.numclients)
111 d.addCallback(_check_connections)
115 u = self.clients[0].getServiceNamed("uploader")
117 # we crank the max segsize down to 1024b for the duration of this
118 # test, so we can exercise multiple segments. It is important
119 # that this is not a multiple of the segment size, so that the
120 # tail segment is not the same length as the others. This actualy
121 # gets rounded up to 1025 to be a multiple of the number of
122 # required shares (since we use 25 out of 100 FEC).
123 up = upload.Data(DATA, convergence=convergence)
124 up.max_segment_size = 1024
127 d.addCallback(_do_upload)
128 def _upload_done(results):
129 theuri = results.get_uri()
130 log.msg("upload finished: uri is %s" % (theuri,))
132 assert isinstance(self.uri, str), self.uri
133 self.cap = uri.from_string(self.uri)
134 self.n = self.clients[1].create_node_from_uri(self.uri)
135 d.addCallback(_upload_done)
137 def _upload_again(res):
138 # Upload again. If using convergent encryption then this ought to be
139 # short-circuited, however with the way we currently generate URIs
140 # (i.e. because they include the roothash), we have to do all of the
141 # encoding work, and only get to save on the upload part.
142 log.msg("UPLOADING AGAIN")
143 up = upload.Data(DATA, convergence=convergence)
144 up.max_segment_size = 1024
145 return self.uploader.upload(up)
146 d.addCallback(_upload_again)
148 def _download_to_data(res):
149 log.msg("DOWNLOADING")
150 return download_to_data(self.n)
151 d.addCallback(_download_to_data)
152 def _download_to_data_done(data):
153 log.msg("download finished")
154 self.failUnlessEqual(data, DATA)
155 d.addCallback(_download_to_data_done)
158 n = self.clients[1].create_node_from_uri(self.uri)
159 d = download_to_data(n)
160 def _read_done(data):
161 self.failUnlessEqual(data, DATA)
162 d.addCallback(_read_done)
163 d.addCallback(lambda ign:
164 n.read(MemoryConsumer(), offset=1, size=4))
165 def _read_portion_done(mc):
166 self.failUnlessEqual("".join(mc.chunks), DATA[1:1+4])
167 d.addCallback(_read_portion_done)
168 d.addCallback(lambda ign:
169 n.read(MemoryConsumer(), offset=2, size=None))
170 def _read_tail_done(mc):
171 self.failUnlessEqual("".join(mc.chunks), DATA[2:])
172 d.addCallback(_read_tail_done)
173 d.addCallback(lambda ign:
174 n.read(MemoryConsumer(), size=len(DATA)+1000))
175 def _read_too_much(mc):
176 self.failUnlessEqual("".join(mc.chunks), DATA)
177 d.addCallback(_read_too_much)
180 d.addCallback(_test_read)
182 def _test_bad_read(res):
183 bad_u = uri.from_string_filenode(self.uri)
184 bad_u.key = self.flip_bit(bad_u.key)
185 bad_n = self.clients[1].create_node_from_uri(bad_u.to_string())
186 # this should cause an error during download
188 d = self.shouldFail2(NoSharesError, "'download bad node'",
190 bad_n.read, MemoryConsumer(), offset=2)
192 d.addCallback(_test_bad_read)
194 def _download_nonexistent_uri(res):
195 baduri = self.mangle_uri(self.uri)
196 badnode = self.clients[1].create_node_from_uri(baduri)
197 log.msg("about to download non-existent URI", level=log.UNUSUAL,
198 facility="tahoe.tests")
199 d1 = download_to_data(badnode)
200 def _baduri_should_fail(res):
201 log.msg("finished downloading non-existent URI",
202 level=log.UNUSUAL, facility="tahoe.tests")
203 self.failUnless(isinstance(res, Failure))
204 self.failUnless(res.check(NoSharesError),
205 "expected NoSharesError, got %s" % res)
206 d1.addBoth(_baduri_should_fail)
208 d.addCallback(_download_nonexistent_uri)
210 # add a new node, which doesn't accept shares, and only uses the
212 d.addCallback(lambda res: self.add_extra_node(self.numclients,
214 add_to_sparent=True))
215 def _added(extra_node):
216 self.extra_node = extra_node
217 self.extra_node.DEFAULT_ENCODING_PARAMETERS['happy'] = 5
218 d.addCallback(_added)
221 uploader = self.extra_node.getServiceNamed("uploader")
222 furl, connected = uploader.get_helper_info()
224 d.addCallback(lambda ign: self.poll(_has_helper))
226 HELPER_DATA = "Data that needs help to upload" * 1000
227 def _upload_with_helper(res):
228 u = upload.Data(HELPER_DATA, convergence=convergence)
229 d = self.extra_node.upload(u)
230 def _uploaded(results):
231 n = self.clients[1].create_node_from_uri(results.get_uri())
232 return download_to_data(n)
233 d.addCallback(_uploaded)
235 self.failUnlessEqual(newdata, HELPER_DATA)
236 d.addCallback(_check)
238 d.addCallback(_upload_with_helper)
240 def _upload_duplicate_with_helper(res):
241 u = upload.Data(HELPER_DATA, convergence=convergence)
242 u.debug_stash_RemoteEncryptedUploadable = True
243 d = self.extra_node.upload(u)
244 def _uploaded(results):
245 n = self.clients[1].create_node_from_uri(results.get_uri())
246 return download_to_data(n)
247 d.addCallback(_uploaded)
249 self.failUnlessEqual(newdata, HELPER_DATA)
250 self.failIf(hasattr(u, "debug_RemoteEncryptedUploadable"),
251 "uploadable started uploading, should have been avoided")
252 d.addCallback(_check)
254 if convergence is not None:
255 d.addCallback(_upload_duplicate_with_helper)
257 d.addCallback(fireEventually)
259 def _upload_resumable(res):
260 DATA = "Data that needs help to upload and gets interrupted" * 1000
261 u1 = CountingDataUploadable(DATA, convergence=convergence)
262 u2 = CountingDataUploadable(DATA, convergence=convergence)
264 # we interrupt the connection after about 5kB by shutting down
265 # the helper, then restarting it.
266 u1.interrupt_after = 5000
267 u1.interrupt_after_d = defer.Deferred()
268 bounced_d = defer.Deferred()
270 d = self.bounce_client(0)
271 d.addBoth(bounced_d.callback)
272 u1.interrupt_after_d.addCallback(_do_bounce)
274 # sneak into the helper and reduce its chunk size, so that our
275 # debug_interrupt will sever the connection on about the fifth
276 # chunk fetched. This makes sure that we've started to write the
277 # new shares before we abandon them, which exercises the
278 # abort/delete-partial-share code. TODO: find a cleaner way to do
279 # this. I know that this will affect later uses of the helper in
280 # this same test run, but I'm not currently worried about it.
281 offloaded.CHKCiphertextFetcher.CHUNK_SIZE = 1000
283 upload_d = self.extra_node.upload(u1)
284 # The upload will start, and bounce_client() will be called after
285 # about 5kB. bounced_d will fire after bounce_client() finishes
286 # shutting down and restarting the node.
289 # By this point, the upload should have failed because of the
290 # interruption. upload_d will fire in a moment
291 def _should_not_finish(res):
292 self.fail("interrupted upload should have failed, not"
293 " finished with result %s" % (res,))
295 f.trap(DeadReferenceError)
296 # make sure we actually interrupted it before finishing
298 self.failUnless(u1.bytes_read < len(DATA),
299 "read %d out of %d total" %
300 (u1.bytes_read, len(DATA)))
301 upload_d.addCallbacks(_should_not_finish, _interrupted)
303 d.addCallback(_bounced)
305 def _disconnected(res):
306 # check to make sure the storage servers aren't still hanging
307 # on to the partial share: their incoming/ directories should
309 log.msg("disconnected", level=log.NOISY,
310 facility="tahoe.test.test_system")
311 for i in range(self.numclients):
312 incdir = os.path.join(self.getdir("client%d" % i),
313 "storage", "shares", "incoming")
314 self.failIf(os.path.exists(incdir) and os.listdir(incdir))
315 d.addCallback(_disconnected)
317 d.addCallback(lambda res:
318 log.msg("wait_for_helper", level=log.NOISY,
319 facility="tahoe.test.test_system"))
320 # then we need to wait for the extra node to reestablish its
321 # connection to the helper.
322 d.addCallback(lambda ign: self.poll(_has_helper))
324 d.addCallback(lambda res:
325 log.msg("uploading again", level=log.NOISY,
326 facility="tahoe.test.test_system"))
327 d.addCallback(lambda res: self.extra_node.upload(u2))
329 def _uploaded(results):
330 cap = results.get_uri()
331 log.msg("Second upload complete", level=log.NOISY,
332 facility="tahoe.test.test_system")
334 # this is really bytes received rather than sent, but it's
335 # convenient and basically measures the same thing
336 bytes_sent = results.get_ciphertext_fetched()
337 self.failUnless(isinstance(bytes_sent, (int, long)), bytes_sent)
339 # We currently don't support resumption of upload if the data is
340 # encrypted with a random key. (Because that would require us
341 # to store the key locally and re-use it on the next upload of
342 # this file, which isn't a bad thing to do, but we currently
344 if convergence is not None:
345 # Make sure we did not have to read the whole file the
346 # second time around .
347 self.failUnless(bytes_sent < len(DATA),
348 "resumption didn't save us any work:"
349 " read %r bytes out of %r total" %
350 (bytes_sent, len(DATA)))
352 # Make sure we did have to read the whole file the second
353 # time around -- because the one that we partially uploaded
354 # earlier was encrypted with a different random key.
355 self.failIf(bytes_sent < len(DATA),
356 "resumption saved us some work even though we were using random keys:"
357 " read %r bytes out of %r total" %
358 (bytes_sent, len(DATA)))
359 n = self.clients[1].create_node_from_uri(cap)
360 return download_to_data(n)
361 d.addCallback(_uploaded)
364 self.failUnlessEqual(newdata, DATA)
365 # If using convergent encryption, then also check that the
366 # helper has removed the temp file from its directories.
367 if convergence is not None:
368 basedir = os.path.join(self.getdir("client0"), "helper")
369 files = os.listdir(os.path.join(basedir, "CHK_encoding"))
370 self.failUnlessEqual(files, [])
371 files = os.listdir(os.path.join(basedir, "CHK_incoming"))
372 self.failUnlessEqual(files, [])
373 d.addCallback(_check)
375 d.addCallback(_upload_resumable)
377 def _grab_stats(ignored):
378 # the StatsProvider doesn't normally publish a FURL:
379 # instead it passes a live reference to the StatsGatherer
380 # (if and when it connects). To exercise the remote stats
381 # interface, we manually publish client0's StatsProvider
382 # and use client1 to query it.
383 sp = self.clients[0].stats_provider
384 sp_furl = self.clients[0].tub.registerReference(sp)
385 d = self.clients[1].tub.getReference(sp_furl)
386 d.addCallback(lambda sp_rref: sp_rref.callRemote("get_stats"))
387 def _got_stats(stats):
389 #from pprint import pprint
392 self.failUnlessEqual(s["storage_server.accepting_immutable_shares"], 1)
393 c = stats["counters"]
394 self.failUnless("storage_server.allocate" in c)
395 d.addCallback(_got_stats)
397 d.addCallback(_grab_stats)
401 def _find_all_shares(self, basedir):
403 for (dirpath, dirnames, filenames) in os.walk(basedir):
404 if "storage" not in dirpath:
408 pieces = dirpath.split(os.sep)
410 and pieces[-4] == "storage"
411 and pieces[-3] == "shares"):
412 # we're sitting in .../storage/shares/$START/$SINDEX , and there
413 # are sharefiles here
414 assert pieces[-5].startswith("client")
415 client_num = int(pieces[-5][-1])
416 storage_index_s = pieces[-1]
417 storage_index = si_a2b(storage_index_s)
418 for sharename in filenames:
419 shnum = int(sharename)
420 filename = os.path.join(dirpath, sharename)
421 data = (client_num, storage_index, filename, shnum)
424 self.fail("unable to find any share files in %s" % basedir)
427 def _corrupt_mutable_share(self, filename, which):
428 msf = MutableShareFile(filename)
429 datav = msf.readv([ (0, 1000000) ])
430 final_share = datav[0]
431 assert len(final_share) < 1000000 # ought to be truncated
432 pieces = mutable_layout.unpack_share(final_share)
433 (seqnum, root_hash, IV, k, N, segsize, datalen,
434 verification_key, signature, share_hash_chain, block_hash_tree,
435 share_data, enc_privkey) = pieces
437 if which == "seqnum":
440 root_hash = self.flip_bit(root_hash)
442 IV = self.flip_bit(IV)
443 elif which == "segsize":
444 segsize = segsize + 15
445 elif which == "pubkey":
446 verification_key = self.flip_bit(verification_key)
447 elif which == "signature":
448 signature = self.flip_bit(signature)
449 elif which == "share_hash_chain":
450 nodenum = share_hash_chain.keys()[0]
451 share_hash_chain[nodenum] = self.flip_bit(share_hash_chain[nodenum])
452 elif which == "block_hash_tree":
453 block_hash_tree[-1] = self.flip_bit(block_hash_tree[-1])
454 elif which == "share_data":
455 share_data = self.flip_bit(share_data)
456 elif which == "encprivkey":
457 enc_privkey = self.flip_bit(enc_privkey)
459 prefix = mutable_layout.pack_prefix(seqnum, root_hash, IV, k, N,
461 final_share = mutable_layout.pack_share(prefix,
468 msf.writev( [(0, final_share)], None)
471 def test_mutable(self):
472 self.basedir = "system/SystemTest/test_mutable"
473 DATA = "initial contents go here." # 25 bytes % 3 != 0
474 DATA_uploadable = MutableData(DATA)
475 NEWDATA = "new contents yay"
476 NEWDATA_uploadable = MutableData(NEWDATA)
477 NEWERDATA = "this is getting old"
478 NEWERDATA_uploadable = MutableData(NEWERDATA)
480 d = self.set_up_nodes(use_key_generator=True)
482 def _create_mutable(res):
484 log.msg("starting create_mutable_file")
485 d1 = c.create_mutable_file(DATA_uploadable)
487 log.msg("DONE: %s" % (res,))
488 self._mutable_node_1 = res
489 d1.addCallback(_done)
491 d.addCallback(_create_mutable)
493 def _test_debug(res):
494 # find a share. It is important to run this while there is only
495 # one slot in the grid.
496 shares = self._find_all_shares(self.basedir)
497 (client_num, storage_index, filename, shnum) = shares[0]
498 log.msg("test_system.SystemTest.test_mutable._test_debug using %s"
500 log.msg(" for clients[%d]" % client_num)
502 out,err = StringIO(), StringIO()
503 rc = runner.runner(["debug", "dump-share", "--offsets",
505 stdout=out, stderr=err)
506 output = out.getvalue()
507 self.failUnlessEqual(rc, 0)
509 self.failUnless("Mutable slot found:\n" in output)
510 self.failUnless("share_type: SDMF\n" in output)
511 peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
512 self.failUnless(" WE for nodeid: %s\n" % peerid in output)
513 self.failUnless(" num_extra_leases: 0\n" in output)
514 self.failUnless(" secrets are for nodeid: %s\n" % peerid
516 self.failUnless(" SDMF contents:\n" in output)
517 self.failUnless(" seqnum: 1\n" in output)
518 self.failUnless(" required_shares: 3\n" in output)
519 self.failUnless(" total_shares: 10\n" in output)
520 self.failUnless(" segsize: 27\n" in output, (output, filename))
521 self.failUnless(" datalen: 25\n" in output)
522 # the exact share_hash_chain nodes depends upon the sharenum,
523 # and is more of a hassle to compute than I want to deal with
525 self.failUnless(" share_hash_chain: " in output)
526 self.failUnless(" block_hash_tree: 1 nodes\n" in output)
527 expected = (" verify-cap: URI:SSK-Verifier:%s:" %
528 base32.b2a(storage_index))
529 self.failUnless(expected in output)
530 except unittest.FailTest:
532 print "dump-share output was:"
535 d.addCallback(_test_debug)
539 # first, let's see if we can use the existing node to retrieve the
540 # contents. This allows it to use the cached pubkey and maybe the
541 # latest-known sharemap.
543 d.addCallback(lambda res: self._mutable_node_1.download_best_version())
544 def _check_download_1(res):
545 self.failUnlessEqual(res, DATA)
546 # now we see if we can retrieve the data from a new node,
547 # constructed using the URI of the original one. We do this test
548 # on the same client that uploaded the data.
549 uri = self._mutable_node_1.get_uri()
550 log.msg("starting retrieve1")
551 newnode = self.clients[0].create_node_from_uri(uri)
552 newnode_2 = self.clients[0].create_node_from_uri(uri)
553 self.failUnlessIdentical(newnode, newnode_2)
554 return newnode.download_best_version()
555 d.addCallback(_check_download_1)
557 def _check_download_2(res):
558 self.failUnlessEqual(res, DATA)
559 # same thing, but with a different client
560 uri = self._mutable_node_1.get_uri()
561 newnode = self.clients[1].create_node_from_uri(uri)
562 log.msg("starting retrieve2")
563 d1 = newnode.download_best_version()
564 d1.addCallback(lambda res: (res, newnode))
566 d.addCallback(_check_download_2)
568 def _check_download_3((res, newnode)):
569 self.failUnlessEqual(res, DATA)
571 log.msg("starting replace1")
572 d1 = newnode.overwrite(NEWDATA_uploadable)
573 d1.addCallback(lambda res: newnode.download_best_version())
575 d.addCallback(_check_download_3)
577 def _check_download_4(res):
578 self.failUnlessEqual(res, NEWDATA)
579 # now create an even newer node and replace the data on it. This
580 # new node has never been used for download before.
581 uri = self._mutable_node_1.get_uri()
582 newnode1 = self.clients[2].create_node_from_uri(uri)
583 newnode2 = self.clients[3].create_node_from_uri(uri)
584 self._newnode3 = self.clients[3].create_node_from_uri(uri)
585 log.msg("starting replace2")
586 d1 = newnode1.overwrite(NEWERDATA_uploadable)
587 d1.addCallback(lambda res: newnode2.download_best_version())
589 d.addCallback(_check_download_4)
591 def _check_download_5(res):
592 log.msg("finished replace2")
593 self.failUnlessEqual(res, NEWERDATA)
594 d.addCallback(_check_download_5)
596 def _corrupt_shares(res):
597 # run around and flip bits in all but k of the shares, to test
599 shares = self._find_all_shares(self.basedir)
600 ## sort by share number
601 #shares.sort( lambda a,b: cmp(a[3], b[3]) )
602 where = dict([ (shnum, filename)
603 for (client_num, storage_index, filename, shnum)
605 assert len(where) == 10 # this test is designed for 3-of-10
606 for shnum, filename in where.items():
607 # shares 7,8,9 are left alone. read will check
608 # (share_hash_chain, block_hash_tree, share_data). New
609 # seqnum+R pairs will trigger a check of (seqnum, R, IV,
610 # segsize, signature).
612 # read: this will trigger "pubkey doesn't match
614 self._corrupt_mutable_share(filename, "pubkey")
615 self._corrupt_mutable_share(filename, "encprivkey")
617 # triggers "signature is invalid"
618 self._corrupt_mutable_share(filename, "seqnum")
620 # triggers "signature is invalid"
621 self._corrupt_mutable_share(filename, "R")
623 # triggers "signature is invalid"
624 self._corrupt_mutable_share(filename, "segsize")
626 self._corrupt_mutable_share(filename, "share_hash_chain")
628 self._corrupt_mutable_share(filename, "block_hash_tree")
630 self._corrupt_mutable_share(filename, "share_data")
631 # other things to correct: IV, signature
632 # 7,8,9 are left alone
634 # note that initial_query_count=5 means that we'll hit the
635 # first 5 servers in effectively random order (based upon
636 # response time), so we won't necessarily ever get a "pubkey
637 # doesn't match fingerprint" error (if we hit shnum>=1 before
638 # shnum=0, we pull the pubkey from there). To get repeatable
639 # specific failures, we need to set initial_query_count=1,
640 # but of course that will change the sequencing behavior of
641 # the retrieval process. TODO: find a reasonable way to make
642 # this a parameter, probably when we expand this test to test
643 # for one failure mode at a time.
645 # when we retrieve this, we should get three signature
646 # failures (where we've mangled seqnum, R, and segsize). The
648 d.addCallback(_corrupt_shares)
650 d.addCallback(lambda res: self._newnode3.download_best_version())
651 d.addCallback(_check_download_5)
653 def _check_empty_file(res):
654 # make sure we can create empty files, this usually screws up the
656 d1 = self.clients[2].create_mutable_file(MutableData(""))
657 d1.addCallback(lambda newnode: newnode.download_best_version())
658 d1.addCallback(lambda res: self.failUnlessEqual("", res))
660 d.addCallback(_check_empty_file)
662 d.addCallback(lambda res: self.clients[0].create_dirnode())
663 def _created_dirnode(dnode):
664 log.msg("_created_dirnode(%s)" % (dnode,))
666 d1.addCallback(lambda children: self.failUnlessEqual(children, {}))
667 d1.addCallback(lambda res: dnode.has_child(u"edgar"))
668 d1.addCallback(lambda answer: self.failUnlessEqual(answer, False))
669 d1.addCallback(lambda res: dnode.set_node(u"see recursive", dnode))
670 d1.addCallback(lambda res: dnode.has_child(u"see recursive"))
671 d1.addCallback(lambda answer: self.failUnlessEqual(answer, True))
672 d1.addCallback(lambda res: dnode.build_manifest().when_done())
673 d1.addCallback(lambda res:
674 self.failUnlessEqual(len(res["manifest"]), 1))
676 d.addCallback(_created_dirnode)
678 def wait_for_c3_kg_conn():
679 return self.clients[3]._key_generator is not None
680 d.addCallback(lambda junk: self.poll(wait_for_c3_kg_conn))
682 def check_kg_poolsize(junk, size_delta):
683 self.failUnlessEqual(len(self.key_generator_svc.key_generator.keypool),
684 self.key_generator_svc.key_generator.pool_size + size_delta)
686 d.addCallback(check_kg_poolsize, 0)
687 d.addCallback(lambda junk:
688 self.clients[3].create_mutable_file(MutableData('hello, world')))
689 d.addCallback(check_kg_poolsize, -1)
690 d.addCallback(lambda junk: self.clients[3].create_dirnode())
691 d.addCallback(check_kg_poolsize, -2)
692 # use_helper induces use of clients[3], which is the using-key_gen client
693 d.addCallback(lambda junk:
694 self.POST("uri?t=mkdir&name=george", use_helper=True))
695 d.addCallback(check_kg_poolsize, -3)
699 def flip_bit(self, good):
700 return good[:-1] + chr(ord(good[-1]) ^ 0x01)
702 def mangle_uri(self, gooduri):
703 # change the key, which changes the storage index, which means we'll
704 # be asking about the wrong file, so nobody will have any shares
705 u = uri.from_string(gooduri)
706 u2 = uri.CHKFileURI(key=self.flip_bit(u.key),
707 uri_extension_hash=u.uri_extension_hash,
708 needed_shares=u.needed_shares,
709 total_shares=u.total_shares,
711 return u2.to_string()
713 # TODO: add a test which mangles the uri_extension_hash instead, and
714 # should fail due to not being able to get a valid uri_extension block.
715 # Also a test which sneakily mangles the uri_extension block to change
716 # some of the validation data, so it will fail in the post-download phase
717 # when the file's crypttext integrity check fails. Do the same thing for
718 # the key, which should cause the download to fail the post-download
719 # plaintext_hash check.
721 def test_filesystem(self):
722 self.basedir = "system/SystemTest/test_filesystem"
723 self.data = LARGE_DATA
724 d = self.set_up_nodes(use_stats_gatherer=True)
725 def _new_happy_semantics(ign):
726 for c in self.clients:
727 c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
728 d.addCallback(_new_happy_semantics)
729 d.addCallback(self._test_introweb)
730 d.addCallback(self.log, "starting publish")
731 d.addCallback(self._do_publish1)
732 d.addCallback(self._test_runner)
733 d.addCallback(self._do_publish2)
734 # at this point, we have the following filesystem (where "R" denotes
735 # self._root_directory_uri):
738 # R/subdir1/mydata567
740 # R/subdir1/subdir2/mydata992
742 d.addCallback(lambda res: self.bounce_client(0))
743 d.addCallback(self.log, "bounced client0")
745 d.addCallback(self._check_publish1)
746 d.addCallback(self.log, "did _check_publish1")
747 d.addCallback(self._check_publish2)
748 d.addCallback(self.log, "did _check_publish2")
749 d.addCallback(self._do_publish_private)
750 d.addCallback(self.log, "did _do_publish_private")
751 # now we also have (where "P" denotes a new dir):
752 # P/personal/sekrit data
753 # P/s2-rw -> /subdir1/subdir2/
754 # P/s2-ro -> /subdir1/subdir2/ (read-only)
755 d.addCallback(self._check_publish_private)
756 d.addCallback(self.log, "did _check_publish_private")
757 d.addCallback(self._test_web)
758 d.addCallback(self._test_control)
759 d.addCallback(self._test_cli)
760 # P now has four top-level children:
761 # P/personal/sekrit data
764 # P/test_put/ (empty)
765 d.addCallback(self._test_checker)
768 def _test_introweb(self, res):
769 d = getPage(self.introweb_url, method="GET", followRedirect=True)
772 self.failUnless("%s: %s" % (allmydata.__appname__, allmydata.__version__) in res)
773 verstr = str(allmydata.__version__)
775 # The Python "rational version numbering" convention
776 # disallows "-r$REV" but allows ".post$REV"
777 # instead. Eventually we'll probably move to
778 # that. When we do, this test won't go red:
779 ix = verstr.rfind('-r')
781 altverstr = verstr[:ix] + '.post' + verstr[ix+2:]
783 ix = verstr.rfind('.post')
785 altverstr = verstr[:ix] + '-r' + verstr[ix+5:]
789 appverstr = "%s: %s" % (allmydata.__appname__, verstr)
790 newappverstr = "%s: %s" % (allmydata.__appname__, altverstr)
792 self.failUnless((appverstr in res) or (newappverstr in res), (appverstr, newappverstr, res))
793 self.failUnless("Announcement Summary: storage: 5" in res)
794 self.failUnless("Subscription Summary: storage: 5" in res)
795 self.failUnless("tahoe.css" in res)
796 except unittest.FailTest:
798 print "GET %s output was:" % self.introweb_url
801 d.addCallback(_check)
802 # make sure it serves the CSS too
803 d.addCallback(lambda res:
804 getPage(self.introweb_url+"tahoe.css", method="GET"))
805 d.addCallback(lambda res:
806 getPage(self.introweb_url + "?t=json",
807 method="GET", followRedirect=True))
808 def _check_json(res):
809 data = simplejson.loads(res)
811 self.failUnlessEqual(data["subscription_summary"],
813 self.failUnlessEqual(data["announcement_summary"],
815 self.failUnlessEqual(data["announcement_distinct_hosts"],
817 except unittest.FailTest:
819 print "GET %s?t=json output was:" % self.introweb_url
822 d.addCallback(_check_json)
825 def _do_publish1(self, res):
826 ut = upload.Data(self.data, convergence=None)
828 d = c0.create_dirnode()
829 def _made_root(new_dirnode):
830 self._root_directory_uri = new_dirnode.get_uri()
831 return c0.create_node_from_uri(self._root_directory_uri)
832 d.addCallback(_made_root)
833 d.addCallback(lambda root: root.create_subdirectory(u"subdir1"))
834 def _made_subdir1(subdir1_node):
835 self._subdir1_node = subdir1_node
836 d1 = subdir1_node.add_file(u"mydata567", ut)
837 d1.addCallback(self.log, "publish finished")
838 def _stash_uri(filenode):
839 self.uri = filenode.get_uri()
840 assert isinstance(self.uri, str), (self.uri, filenode)
841 d1.addCallback(_stash_uri)
843 d.addCallback(_made_subdir1)
846 def _do_publish2(self, res):
847 ut = upload.Data(self.data, convergence=None)
848 d = self._subdir1_node.create_subdirectory(u"subdir2")
849 d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut))
852 def log(self, res, *args, **kwargs):
853 # print "MSG: %s RES: %s" % (msg, args)
854 log.msg(*args, **kwargs)
857 def _do_publish_private(self, res):
858 self.smalldata = "sssh, very secret stuff"
859 ut = upload.Data(self.smalldata, convergence=None)
860 d = self.clients[0].create_dirnode()
861 d.addCallback(self.log, "GOT private directory")
862 def _got_new_dir(privnode):
863 rootnode = self.clients[0].create_node_from_uri(self._root_directory_uri)
864 d1 = privnode.create_subdirectory(u"personal")
865 d1.addCallback(self.log, "made P/personal")
866 d1.addCallback(lambda node: node.add_file(u"sekrit data", ut))
867 d1.addCallback(self.log, "made P/personal/sekrit data")
868 d1.addCallback(lambda res: rootnode.get_child_at_path([u"subdir1", u"subdir2"]))
870 d2 = privnode.set_uri(u"s2-rw", s2node.get_uri(),
871 s2node.get_readonly_uri())
872 d2.addCallback(lambda node:
873 privnode.set_uri(u"s2-ro",
874 s2node.get_readonly_uri(),
875 s2node.get_readonly_uri()))
877 d1.addCallback(_got_s2)
878 d1.addCallback(lambda res: privnode)
880 d.addCallback(_got_new_dir)
883 def _check_publish1(self, res):
884 # this one uses the iterative API
886 d = defer.succeed(c1.create_node_from_uri(self._root_directory_uri))
887 d.addCallback(self.log, "check_publish1 got /")
888 d.addCallback(lambda root: root.get(u"subdir1"))
889 d.addCallback(lambda subdir1: subdir1.get(u"mydata567"))
890 d.addCallback(lambda filenode: download_to_data(filenode))
891 d.addCallback(self.log, "get finished")
893 self.failUnlessEqual(data, self.data)
894 d.addCallback(_get_done)
897 def _check_publish2(self, res):
898 # this one uses the path-based API
899 rootnode = self.clients[1].create_node_from_uri(self._root_directory_uri)
900 d = rootnode.get_child_at_path(u"subdir1")
901 d.addCallback(lambda dirnode:
902 self.failUnless(IDirectoryNode.providedBy(dirnode)))
903 d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
904 d.addCallback(lambda filenode: download_to_data(filenode))
905 d.addCallback(lambda data: self.failUnlessEqual(data, self.data))
907 d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
908 def _got_filenode(filenode):
909 fnode = self.clients[1].create_node_from_uri(filenode.get_uri())
910 assert fnode == filenode
911 d.addCallback(_got_filenode)
914 def _check_publish_private(self, resnode):
915 # this one uses the path-based API
916 self._private_node = resnode
918 d = self._private_node.get_child_at_path(u"personal")
919 def _got_personal(personal):
920 self._personal_node = personal
922 d.addCallback(_got_personal)
924 d.addCallback(lambda dirnode:
925 self.failUnless(IDirectoryNode.providedBy(dirnode), dirnode))
927 return self._private_node.get_child_at_path(path)
929 d.addCallback(lambda res: get_path(u"personal/sekrit data"))
930 d.addCallback(lambda filenode: download_to_data(filenode))
931 d.addCallback(lambda data: self.failUnlessEqual(data, self.smalldata))
932 d.addCallback(lambda res: get_path(u"s2-rw"))
933 d.addCallback(lambda dirnode: self.failUnless(dirnode.is_mutable()))
934 d.addCallback(lambda res: get_path(u"s2-ro"))
935 def _got_s2ro(dirnode):
936 self.failUnless(dirnode.is_mutable(), dirnode)
937 self.failUnless(dirnode.is_readonly(), dirnode)
938 d1 = defer.succeed(None)
939 d1.addCallback(lambda res: dirnode.list())
940 d1.addCallback(self.log, "dirnode.list")
942 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mkdir(nope)", None, dirnode.create_subdirectory, u"nope"))
944 d1.addCallback(self.log, "doing add_file(ro)")
945 ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)")
946 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
948 d1.addCallback(self.log, "doing get(ro)")
949 d1.addCallback(lambda res: dirnode.get(u"mydata992"))
950 d1.addCallback(lambda filenode:
951 self.failUnless(IFileNode.providedBy(filenode)))
953 d1.addCallback(self.log, "doing delete(ro)")
954 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "delete(nope)", None, dirnode.delete, u"mydata992"))
956 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri, self.uri))
958 d1.addCallback(lambda res: self.shouldFail2(NoSuchChildError, "get(missing)", "missing", dirnode.get, u"missing"))
960 personal = self._personal_node
961 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope"))
963 d1.addCallback(self.log, "doing move_child_to(ro)2")
964 d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope"))
966 d1.addCallback(self.log, "finished with _got_s2ro")
968 d.addCallback(_got_s2ro)
969 def _got_home(dummy):
970 home = self._private_node
971 personal = self._personal_node
972 d1 = defer.succeed(None)
973 d1.addCallback(self.log, "mv 'P/personal/sekrit data' to P/sekrit")
974 d1.addCallback(lambda res:
975 personal.move_child_to(u"sekrit data",home,u"sekrit"))
977 d1.addCallback(self.log, "mv P/sekrit 'P/sekrit data'")
978 d1.addCallback(lambda res:
979 home.move_child_to(u"sekrit", home, u"sekrit data"))
981 d1.addCallback(self.log, "mv 'P/sekret data' P/personal/")
982 d1.addCallback(lambda res:
983 home.move_child_to(u"sekrit data", personal))
985 d1.addCallback(lambda res: home.build_manifest().when_done())
986 d1.addCallback(self.log, "manifest")
990 # P/personal/sekrit data
991 # P/s2-rw (same as P/s2-ro)
992 # P/s2-rw/mydata992 (same as P/s2-rw/mydata992)
993 d1.addCallback(lambda res:
994 self.failUnlessEqual(len(res["manifest"]), 5))
995 d1.addCallback(lambda res: home.start_deep_stats().when_done())
996 def _check_stats(stats):
997 expected = {"count-immutable-files": 1,
998 "count-mutable-files": 0,
999 "count-literal-files": 1,
1001 "count-directories": 3,
1002 "size-immutable-files": 112,
1003 "size-literal-files": 23,
1004 #"size-directories": 616, # varies
1005 #"largest-directory": 616,
1006 "largest-directory-children": 3,
1007 "largest-immutable-file": 112,
1009 for k,v in expected.iteritems():
1010 self.failUnlessEqual(stats[k], v,
1011 "stats[%s] was %s, not %s" %
1013 self.failUnless(stats["size-directories"] > 1300,
1014 stats["size-directories"])
1015 self.failUnless(stats["largest-directory"] > 800,
1016 stats["largest-directory"])
1017 self.failUnlessEqual(stats["size-files-histogram"],
1018 [ (11, 31, 1), (101, 316, 1) ])
1019 d1.addCallback(_check_stats)
1021 d.addCallback(_got_home)
1024 def shouldFail(self, res, expected_failure, which, substring=None):
1025 if isinstance(res, Failure):
1026 res.trap(expected_failure)
1028 self.failUnless(substring in str(res),
1029 "substring '%s' not in '%s'"
1030 % (substring, str(res)))
1032 self.fail("%s was supposed to raise %s, not get '%s'" %
1033 (which, expected_failure, res))
1035 def shouldFail2(self, expected_failure, which, substring, callable, *args, **kwargs):
1036 assert substring is None or isinstance(substring, str)
1037 d = defer.maybeDeferred(callable, *args, **kwargs)
1039 if isinstance(res, Failure):
1040 res.trap(expected_failure)
1042 self.failUnless(substring in str(res),
1043 "substring '%s' not in '%s'"
1044 % (substring, str(res)))
1046 self.fail("%s was supposed to raise %s, not get '%s'" %
1047 (which, expected_failure, res))
1051 def PUT(self, urlpath, data):
1052 url = self.webish_url + urlpath
1053 return getPage(url, method="PUT", postdata=data)
1055 def GET(self, urlpath, followRedirect=False):
1056 url = self.webish_url + urlpath
1057 return getPage(url, method="GET", followRedirect=followRedirect)
1059 def POST(self, urlpath, followRedirect=False, use_helper=False, **fields):
1060 sepbase = "boogabooga"
1061 sep = "--" + sepbase
1064 form.append('Content-Disposition: form-data; name="_charset"')
1066 form.append('UTF-8')
1068 for name, value in fields.iteritems():
1069 if isinstance(value, tuple):
1070 filename, value = value
1071 form.append('Content-Disposition: form-data; name="%s"; '
1072 'filename="%s"' % (name, filename.encode("utf-8")))
1074 form.append('Content-Disposition: form-data; name="%s"' % name)
1076 form.append(str(value))
1082 body = "\r\n".join(form) + "\r\n"
1083 headers["content-type"] = "multipart/form-data; boundary=%s" % sepbase
1084 return self.POST2(urlpath, body, headers, followRedirect, use_helper)
1086 def POST2(self, urlpath, body="", headers={}, followRedirect=False,
1089 url = self.helper_webish_url + urlpath
1091 url = self.webish_url + urlpath
1092 return getPage(url, method="POST", postdata=body, headers=headers,
1093 followRedirect=followRedirect)
1095 def _test_web(self, res):
1096 base = self.webish_url
1097 public = "uri/" + self._root_directory_uri
1099 def _got_welcome(page):
1100 html = page.replace('\n', ' ')
1101 connected_re = r'Connected to <span>%d</span>\s*of <span>%d</span> known storage servers' % (self.numclients, self.numclients)
1102 self.failUnless(re.search(connected_re, html),
1103 "I didn't see the right '%s' message in:\n%s" % (connected_re, page))
1104 # nodeids/tubids don't have any regexp-special characters
1105 nodeid_re = r'<th>Node ID:</th>\s*<td title="TubID: %s">%s</td>' % (
1106 self.clients[0].get_long_tubid(), self.clients[0].get_long_nodeid())
1107 self.failUnless(re.search(nodeid_re, html),
1108 "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page))
1109 self.failUnless("Helper: 0 active uploads" in page)
1110 d.addCallback(_got_welcome)
1111 d.addCallback(self.log, "done with _got_welcome")
1113 # get the welcome page from the node that uses the helper too
1114 d.addCallback(lambda res: getPage(self.helper_webish_url))
1115 def _got_welcome_helper(page):
1116 html = page.replace('\n', ' ')
1117 self.failUnless(re.search(r'<div class="status-indicator connected-yes"></div>\s*<div>Helper</div>', html), page)
1118 self.failUnlessIn("Not running helper", page)
1119 d.addCallback(_got_welcome_helper)
1121 d.addCallback(lambda res: getPage(base + public))
1122 d.addCallback(lambda res: getPage(base + public + "/subdir1"))
1123 def _got_subdir1(page):
1124 # there ought to be an href for our file
1125 self.failUnlessIn('<td align="right">%d</td>' % len(self.data), page)
1126 self.failUnless(">mydata567</a>" in page)
1127 d.addCallback(_got_subdir1)
1128 d.addCallback(self.log, "done with _got_subdir1")
1129 d.addCallback(lambda res:
1130 getPage(base + public + "/subdir1/mydata567"))
1131 def _got_data(page):
1132 self.failUnlessEqual(page, self.data)
1133 d.addCallback(_got_data)
1135 # download from a URI embedded in a URL
1136 d.addCallback(self.log, "_get_from_uri")
1137 def _get_from_uri(res):
1138 return getPage(base + "uri/%s?filename=%s"
1139 % (self.uri, "mydata567"))
1140 d.addCallback(_get_from_uri)
1141 def _got_from_uri(page):
1142 self.failUnlessEqual(page, self.data)
1143 d.addCallback(_got_from_uri)
1145 # download from a URI embedded in a URL, second form
1146 d.addCallback(self.log, "_get_from_uri2")
1147 def _get_from_uri2(res):
1148 return getPage(base + "uri?uri=%s" % (self.uri,))
1149 d.addCallback(_get_from_uri2)
1150 d.addCallback(_got_from_uri)
1152 # download from a bogus URI, make sure we get a reasonable error
1153 d.addCallback(self.log, "_get_from_bogus_uri", level=log.UNUSUAL)
1154 def _get_from_bogus_uri(res):
1155 d1 = getPage(base + "uri/%s?filename=%s"
1156 % (self.mangle_uri(self.uri), "mydata567"))
1157 d1.addBoth(self.shouldFail, Error, "downloading bogus URI",
1160 d.addCallback(_get_from_bogus_uri)
1161 d.addCallback(self.log, "_got_from_bogus_uri", level=log.UNUSUAL)
1163 # upload a file with PUT
1164 d.addCallback(self.log, "about to try PUT")
1165 d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt",
1166 "new.txt contents"))
1167 d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt"))
1168 d.addCallback(self.failUnlessEqual, "new.txt contents")
1169 # and again with something large enough to use multiple segments,
1170 # and hopefully trigger pauseProducing too
1171 def _new_happy_semantics(ign):
1172 for c in self.clients:
1173 # these get reset somewhere? Whatever.
1174 c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
1175 d.addCallback(_new_happy_semantics)
1176 d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt",
1177 "big" * 500000)) # 1.5MB
1178 d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt"))
1179 d.addCallback(lambda res: self.failUnlessEqual(len(res), 1500000))
1181 # can we replace files in place?
1182 d.addCallback(lambda res: self.PUT(public + "/subdir3/new.txt",
1184 d.addCallback(lambda res: self.GET(public + "/subdir3/new.txt"))
1185 d.addCallback(self.failUnlessEqual, "NEWER contents")
1187 # test unlinked POST
1188 d.addCallback(lambda res: self.POST("uri", t="upload",
1189 file=("new.txt", "data" * 10000)))
1190 # and again using the helper, which exercises different upload-status
1192 d.addCallback(lambda res: self.POST("uri", use_helper=True, t="upload",
1193 file=("foo.txt", "data2" * 10000)))
1195 # check that the status page exists
1196 d.addCallback(lambda res: self.GET("status", followRedirect=True))
1197 def _got_status(res):
1198 # find an interesting upload and download to look at. LIT files
1199 # are not interesting.
1200 h = self.clients[0].get_history()
1201 for ds in h.list_all_download_statuses():
1202 if ds.get_size() > 200:
1203 self._down_status = ds.get_counter()
1204 for us in h.list_all_upload_statuses():
1205 if us.get_size() > 200:
1206 self._up_status = us.get_counter()
1207 rs = list(h.list_all_retrieve_statuses())[0]
1208 self._retrieve_status = rs.get_counter()
1209 ps = list(h.list_all_publish_statuses())[0]
1210 self._publish_status = ps.get_counter()
1211 us = list(h.list_all_mapupdate_statuses())[0]
1212 self._update_status = us.get_counter()
1214 # and that there are some upload- and download- status pages
1215 return self.GET("status/up-%d" % self._up_status)
1216 d.addCallback(_got_status)
1218 return self.GET("status/down-%d" % self._down_status)
1219 d.addCallback(_got_up)
1221 return self.GET("status/mapupdate-%d" % self._update_status)
1222 d.addCallback(_got_down)
1223 def _got_update(res):
1224 return self.GET("status/publish-%d" % self._publish_status)
1225 d.addCallback(_got_update)
1226 def _got_publish(res):
1227 self.failUnlessIn("Publish Results", res)
1228 return self.GET("status/retrieve-%d" % self._retrieve_status)
1229 d.addCallback(_got_publish)
1230 def _got_retrieve(res):
1231 self.failUnlessIn("Retrieve Results", res)
1232 d.addCallback(_got_retrieve)
1234 # check that the helper status page exists
1235 d.addCallback(lambda res:
1236 self.GET("helper_status", followRedirect=True))
1237 def _got_helper_status(res):
1238 self.failUnless("Bytes Fetched:" in res)
1239 # touch a couple of files in the helper's working directory to
1240 # exercise more code paths
1241 workdir = os.path.join(self.getdir("client0"), "helper")
1242 incfile = os.path.join(workdir, "CHK_incoming", "spurious")
1243 f = open(incfile, "wb")
1244 f.write("small file")
1246 then = time.time() - 86400*3
1248 os.utime(incfile, (now, then))
1249 encfile = os.path.join(workdir, "CHK_encoding", "spurious")
1250 f = open(encfile, "wb")
1251 f.write("less small file")
1253 os.utime(encfile, (now, then))
1254 d.addCallback(_got_helper_status)
1255 # and that the json form exists
1256 d.addCallback(lambda res:
1257 self.GET("helper_status?t=json", followRedirect=True))
1258 def _got_helper_status_json(res):
1259 data = simplejson.loads(res)
1260 self.failUnlessEqual(data["chk_upload_helper.upload_need_upload"],
1262 self.failUnlessEqual(data["chk_upload_helper.incoming_count"], 1)
1263 self.failUnlessEqual(data["chk_upload_helper.incoming_size"], 10)
1264 self.failUnlessEqual(data["chk_upload_helper.incoming_size_old"],
1266 self.failUnlessEqual(data["chk_upload_helper.encoding_count"], 1)
1267 self.failUnlessEqual(data["chk_upload_helper.encoding_size"], 15)
1268 self.failUnlessEqual(data["chk_upload_helper.encoding_size_old"],
1270 d.addCallback(_got_helper_status_json)
1272 # and check that client[3] (which uses a helper but does not run one
1273 # itself) doesn't explode when you ask for its status
1274 d.addCallback(lambda res: getPage(self.helper_webish_url + "status/"))
1275 def _got_non_helper_status(res):
1276 self.failUnlessIn("Recent and Active Operations", res)
1277 d.addCallback(_got_non_helper_status)
1279 # or for helper status with t=json
1280 d.addCallback(lambda res:
1281 getPage(self.helper_webish_url + "helper_status?t=json"))
1282 def _got_non_helper_status_json(res):
1283 data = simplejson.loads(res)
1284 self.failUnlessEqual(data, {})
1285 d.addCallback(_got_non_helper_status_json)
1287 # see if the statistics page exists
1288 d.addCallback(lambda res: self.GET("statistics"))
1289 def _got_stats(res):
1290 self.failUnlessIn("Operational Statistics", res)
1291 self.failUnlessIn(" 'downloader.files_downloaded': 5,", res)
1292 d.addCallback(_got_stats)
1293 d.addCallback(lambda res: self.GET("statistics?t=json"))
1294 def _got_stats_json(res):
1295 data = simplejson.loads(res)
1296 self.failUnlessEqual(data["counters"]["uploader.files_uploaded"], 5)
1297 self.failUnlessEqual(data["stats"]["chk_upload_helper.upload_need_upload"], 1)
1298 d.addCallback(_got_stats_json)
1300 # TODO: mangle the second segment of a file, to test errors that
1301 # occur after we've already sent some good data, which uses a
1302 # different error path.
1304 # TODO: download a URI with a form
1305 # TODO: create a directory by using a form
1306 # TODO: upload by using a form on the directory page
1307 # url = base + "somedir/subdir1/freeform_post!!upload"
1308 # TODO: delete a file by using a button on the directory page
1312 def _test_runner(self, res):
1313 # exercise some of the diagnostic tools in runner.py
1316 for (dirpath, dirnames, filenames) in os.walk(unicode(self.basedir)):
1317 if "storage" not in dirpath:
1321 pieces = dirpath.split(os.sep)
1322 if (len(pieces) >= 4
1323 and pieces[-4] == "storage"
1324 and pieces[-3] == "shares"):
1325 # we're sitting in .../storage/shares/$START/$SINDEX , and there
1326 # are sharefiles here
1327 filename = os.path.join(dirpath, filenames[0])
1328 # peek at the magic to see if it is a chk share
1329 magic = open(filename, "rb").read(4)
1330 if magic == '\x00\x00\x00\x01':
1333 self.fail("unable to find any uri_extension files in %r"
1335 log.msg("test_system.SystemTest._test_runner using %r" % filename)
1337 out,err = StringIO(), StringIO()
1338 rc = runner.runner(["debug", "dump-share", "--offsets",
1339 unicode_to_argv(filename)],
1340 stdout=out, stderr=err)
1341 output = out.getvalue()
1342 self.failUnlessEqual(rc, 0)
1344 # we only upload a single file, so we can assert some things about
1345 # its size and shares.
1346 self.failUnlessIn("share filename: %s" % quote_output(abspath_expanduser_unicode(filename)), output)
1347 self.failUnlessIn("size: %d\n" % len(self.data), output)
1348 self.failUnlessIn("num_segments: 1\n", output)
1349 # segment_size is always a multiple of needed_shares
1350 self.failUnlessIn("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3), output)
1351 self.failUnlessIn("total_shares: 10\n", output)
1352 # keys which are supposed to be present
1353 for key in ("size", "num_segments", "segment_size",
1354 "needed_shares", "total_shares",
1355 "codec_name", "codec_params", "tail_codec_params",
1356 #"plaintext_hash", "plaintext_root_hash",
1357 "crypttext_hash", "crypttext_root_hash",
1358 "share_root_hash", "UEB_hash"):
1359 self.failUnlessIn("%s: " % key, output)
1360 self.failUnlessIn(" verify-cap: URI:CHK-Verifier:", output)
1362 # now use its storage index to find the other shares using the
1363 # 'find-shares' tool
1364 sharedir, shnum = os.path.split(filename)
1365 storagedir, storage_index_s = os.path.split(sharedir)
1366 storage_index_s = str(storage_index_s)
1367 out,err = StringIO(), StringIO()
1368 nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)]
1369 cmd = ["debug", "find-shares", storage_index_s] + nodedirs
1370 rc = runner.runner(cmd, stdout=out, stderr=err)
1371 self.failUnlessEqual(rc, 0)
1373 sharefiles = [sfn.strip() for sfn in out.readlines()]
1374 self.failUnlessEqual(len(sharefiles), 10)
1376 # also exercise the 'catalog-shares' tool
1377 out,err = StringIO(), StringIO()
1378 nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)]
1379 cmd = ["debug", "catalog-shares"] + nodedirs
1380 rc = runner.runner(cmd, stdout=out, stderr=err)
1381 self.failUnlessEqual(rc, 0)
1383 descriptions = [sfn.strip() for sfn in out.readlines()]
1384 self.failUnlessEqual(len(descriptions), 30)
1386 for line in descriptions
1387 if line.startswith("CHK %s " % storage_index_s)]
1388 self.failUnlessEqual(len(matching), 10)
1390 def _test_control(self, res):
1391 # exercise the remote-control-the-client foolscap interfaces in
1392 # allmydata.control (mostly used for performance tests)
1393 c0 = self.clients[0]
1394 control_furl_file = os.path.join(c0.basedir, "private", "control.furl")
1395 control_furl = open(control_furl_file, "r").read().strip()
1396 # it doesn't really matter which Tub we use to connect to the client,
1397 # so let's just use our IntroducerNode's
1398 d = self.introducer.tub.getReference(control_furl)
1399 d.addCallback(self._test_control2, control_furl_file)
1401 def _test_control2(self, rref, filename):
1402 d = rref.callRemote("upload_from_file_to_uri",
1403 filename.encode(get_filesystem_encoding()), convergence=None)
1404 downfile = os.path.join(self.basedir, "control.downfile").encode(get_filesystem_encoding())
1405 d.addCallback(lambda uri:
1406 rref.callRemote("download_from_uri_to_file",
1409 self.failUnlessEqual(res, downfile)
1410 data = open(downfile, "r").read()
1411 expected_data = open(filename, "r").read()
1412 self.failUnlessEqual(data, expected_data)
1413 d.addCallback(_check)
1414 d.addCallback(lambda res: rref.callRemote("speed_test", 1, 200, False))
1415 if sys.platform in ("linux2", "linux3"):
1416 d.addCallback(lambda res: rref.callRemote("get_memory_usage"))
1417 d.addCallback(lambda res: rref.callRemote("measure_peer_response_time"))
1420 def _test_cli(self, res):
1421 # run various CLI commands (in a thread, since they use blocking
1424 private_uri = self._private_node.get_uri()
1425 client0_basedir = self.getdir("client0")
1428 "--node-directory", client0_basedir,
1431 d = defer.succeed(None)
1433 # for compatibility with earlier versions, private/root_dir.cap is
1434 # supposed to be treated as an alias named "tahoe:". Start by making
1435 # sure that works, before we add other aliases.
1437 root_file = os.path.join(client0_basedir, "private", "root_dir.cap")
1438 f = open(root_file, "w")
1439 f.write(private_uri)
1442 def run(ignored, verb, *args, **kwargs):
1443 stdin = kwargs.get("stdin", "")
1444 newargs = nodeargs + [verb] + list(args)
1445 return self._run_cli(newargs, stdin=stdin)
1447 def _check_ls((out,err), expected_children, unexpected_children=[]):
1448 self.failUnlessEqual(err, "")
1449 for s in expected_children:
1450 self.failUnless(s in out, (s,out))
1451 for s in unexpected_children:
1452 self.failIf(s in out, (s,out))
1454 def _check_ls_root((out,err)):
1455 self.failUnless("personal" in out)
1456 self.failUnless("s2-ro" in out)
1457 self.failUnless("s2-rw" in out)
1458 self.failUnlessEqual(err, "")
1460 # this should reference private_uri
1461 d.addCallback(run, "ls")
1462 d.addCallback(_check_ls, ["personal", "s2-ro", "s2-rw"])
1464 d.addCallback(run, "list-aliases")
1465 def _check_aliases_1((out,err)):
1466 self.failUnlessEqual(err, "")
1467 self.failUnlessEqual(out.strip(" \n"), "tahoe: %s" % private_uri)
1468 d.addCallback(_check_aliases_1)
1470 # now that that's out of the way, remove root_dir.cap and work with
1472 d.addCallback(lambda res: os.unlink(root_file))
1473 d.addCallback(run, "list-aliases")
1474 def _check_aliases_2((out,err)):
1475 self.failUnlessEqual(err, "")
1476 self.failUnlessEqual(out, "")
1477 d.addCallback(_check_aliases_2)
1479 d.addCallback(run, "mkdir")
1480 def _got_dir( (out,err) ):
1481 self.failUnless(uri.from_string_dirnode(out.strip()))
1483 d.addCallback(_got_dir)
1484 d.addCallback(lambda newcap: run(None, "add-alias", "tahoe", newcap))
1486 d.addCallback(run, "list-aliases")
1487 def _check_aliases_3((out,err)):
1488 self.failUnlessEqual(err, "")
1489 self.failUnless("tahoe: " in out)
1490 d.addCallback(_check_aliases_3)
1492 def _check_empty_dir((out,err)):
1493 self.failUnlessEqual(out, "")
1494 self.failUnlessEqual(err, "")
1495 d.addCallback(run, "ls")
1496 d.addCallback(_check_empty_dir)
1498 def _check_missing_dir((out,err)):
1499 # TODO: check that rc==2
1500 self.failUnlessEqual(out, "")
1501 self.failUnlessEqual(err, "No such file or directory\n")
1502 d.addCallback(run, "ls", "bogus")
1503 d.addCallback(_check_missing_dir)
1508 fn = os.path.join(self.basedir, "file%d" % i)
1510 data = "data to be uploaded: file%d\n" % i
1512 open(fn,"wb").write(data)
1514 def _check_stdout_against((out,err), filenum=None, data=None):
1515 self.failUnlessEqual(err, "")
1516 if filenum is not None:
1517 self.failUnlessEqual(out, datas[filenum])
1518 if data is not None:
1519 self.failUnlessEqual(out, data)
1521 # test all both forms of put: from a file, and from stdin
1523 d.addCallback(run, "put", files[0], "tahoe-file0")
1524 def _put_out((out,err)):
1525 self.failUnless("URI:LIT:" in out, out)
1526 self.failUnless("201 Created" in err, err)
1528 return run(None, "get", uri0)
1529 d.addCallback(_put_out)
1530 d.addCallback(lambda (out,err): self.failUnlessEqual(out, datas[0]))
1532 d.addCallback(run, "put", files[1], "subdir/tahoe-file1")
1533 # tahoe put bar tahoe:FOO
1534 d.addCallback(run, "put", files[2], "tahoe:file2")
1535 d.addCallback(run, "put", "--format=SDMF", files[3], "tahoe:file3")
1536 def _check_put_mutable((out,err)):
1537 self._mutable_file3_uri = out.strip()
1538 d.addCallback(_check_put_mutable)
1539 d.addCallback(run, "get", "tahoe:file3")
1540 d.addCallback(_check_stdout_against, 3)
1543 STDIN_DATA = "This is the file to upload from stdin."
1544 d.addCallback(run, "put", "-", "tahoe-file-stdin", stdin=STDIN_DATA)
1545 # tahoe put tahoe:FOO
1546 d.addCallback(run, "put", "-", "tahoe:from-stdin",
1547 stdin="Other file from stdin.")
1549 d.addCallback(run, "ls")
1550 d.addCallback(_check_ls, ["tahoe-file0", "file2", "file3", "subdir",
1551 "tahoe-file-stdin", "from-stdin"])
1552 d.addCallback(run, "ls", "subdir")
1553 d.addCallback(_check_ls, ["tahoe-file1"])
1556 d.addCallback(run, "mkdir", "subdir2")
1557 d.addCallback(run, "ls")
1558 # TODO: extract the URI, set an alias with it
1559 d.addCallback(_check_ls, ["subdir2"])
1561 # tahoe get: (to stdin and to a file)
1562 d.addCallback(run, "get", "tahoe-file0")
1563 d.addCallback(_check_stdout_against, 0)
1564 d.addCallback(run, "get", "tahoe:subdir/tahoe-file1")
1565 d.addCallback(_check_stdout_against, 1)
1566 outfile0 = os.path.join(self.basedir, "outfile0")
1567 d.addCallback(run, "get", "file2", outfile0)
1568 def _check_outfile0((out,err)):
1569 data = open(outfile0,"rb").read()
1570 self.failUnlessEqual(data, "data to be uploaded: file2\n")
1571 d.addCallback(_check_outfile0)
1572 outfile1 = os.path.join(self.basedir, "outfile0")
1573 d.addCallback(run, "get", "tahoe:subdir/tahoe-file1", outfile1)
1574 def _check_outfile1((out,err)):
1575 data = open(outfile1,"rb").read()
1576 self.failUnlessEqual(data, "data to be uploaded: file1\n")
1577 d.addCallback(_check_outfile1)
1579 d.addCallback(run, "rm", "tahoe-file0")
1580 d.addCallback(run, "rm", "tahoe:file2")
1581 d.addCallback(run, "ls")
1582 d.addCallback(_check_ls, [], ["tahoe-file0", "file2"])
1584 d.addCallback(run, "ls", "-l")
1585 def _check_ls_l((out,err)):
1586 lines = out.split("\n")
1588 if "tahoe-file-stdin" in l:
1589 self.failUnless(l.startswith("-r-- "), l)
1590 self.failUnless(" %d " % len(STDIN_DATA) in l)
1592 self.failUnless(l.startswith("-rw- "), l) # mutable
1593 d.addCallback(_check_ls_l)
1595 d.addCallback(run, "ls", "--uri")
1596 def _check_ls_uri((out,err)):
1597 lines = out.split("\n")
1600 self.failUnless(self._mutable_file3_uri in l)
1601 d.addCallback(_check_ls_uri)
1603 d.addCallback(run, "ls", "--readonly-uri")
1604 def _check_ls_rouri((out,err)):
1605 lines = out.split("\n")
1608 rw_uri = self._mutable_file3_uri
1609 u = uri.from_string_mutable_filenode(rw_uri)
1610 ro_uri = u.get_readonly().to_string()
1611 self.failUnless(ro_uri in l)
1612 d.addCallback(_check_ls_rouri)
1615 d.addCallback(run, "mv", "tahoe-file-stdin", "tahoe-moved")
1616 d.addCallback(run, "ls")
1617 d.addCallback(_check_ls, ["tahoe-moved"], ["tahoe-file-stdin"])
1619 d.addCallback(run, "ln", "tahoe-moved", "newlink")
1620 d.addCallback(run, "ls")
1621 d.addCallback(_check_ls, ["tahoe-moved", "newlink"])
1623 d.addCallback(run, "cp", "tahoe:file3", "tahoe:file3-copy")
1624 d.addCallback(run, "ls")
1625 d.addCallback(_check_ls, ["file3", "file3-copy"])
1626 d.addCallback(run, "get", "tahoe:file3-copy")
1627 d.addCallback(_check_stdout_against, 3)
1629 # copy from disk into tahoe
1630 d.addCallback(run, "cp", files[4], "tahoe:file4")
1631 d.addCallback(run, "ls")
1632 d.addCallback(_check_ls, ["file3", "file3-copy", "file4"])
1633 d.addCallback(run, "get", "tahoe:file4")
1634 d.addCallback(_check_stdout_against, 4)
1636 # copy from tahoe into disk
1637 target_filename = os.path.join(self.basedir, "file-out")
1638 d.addCallback(run, "cp", "tahoe:file4", target_filename)
1639 def _check_cp_out((out,err)):
1640 self.failUnless(os.path.exists(target_filename))
1641 got = open(target_filename,"rb").read()
1642 self.failUnlessEqual(got, datas[4])
1643 d.addCallback(_check_cp_out)
1645 # copy from disk to disk (silly case)
1646 target2_filename = os.path.join(self.basedir, "file-out-copy")
1647 d.addCallback(run, "cp", target_filename, target2_filename)
1648 def _check_cp_out2((out,err)):
1649 self.failUnless(os.path.exists(target2_filename))
1650 got = open(target2_filename,"rb").read()
1651 self.failUnlessEqual(got, datas[4])
1652 d.addCallback(_check_cp_out2)
1654 # copy from tahoe into disk, overwriting an existing file
1655 d.addCallback(run, "cp", "tahoe:file3", target_filename)
1656 def _check_cp_out3((out,err)):
1657 self.failUnless(os.path.exists(target_filename))
1658 got = open(target_filename,"rb").read()
1659 self.failUnlessEqual(got, datas[3])
1660 d.addCallback(_check_cp_out3)
1662 # copy from disk into tahoe, overwriting an existing immutable file
1663 d.addCallback(run, "cp", files[5], "tahoe:file4")
1664 d.addCallback(run, "ls")
1665 d.addCallback(_check_ls, ["file3", "file3-copy", "file4"])
1666 d.addCallback(run, "get", "tahoe:file4")
1667 d.addCallback(_check_stdout_against, 5)
1669 # copy from disk into tahoe, overwriting an existing mutable file
1670 d.addCallback(run, "cp", files[5], "tahoe:file3")
1671 d.addCallback(run, "ls")
1672 d.addCallback(_check_ls, ["file3", "file3-copy", "file4"])
1673 d.addCallback(run, "get", "tahoe:file3")
1674 d.addCallback(_check_stdout_against, 5)
1676 # recursive copy: setup
1677 dn = os.path.join(self.basedir, "dir1")
1679 open(os.path.join(dn, "rfile1"), "wb").write("rfile1")
1680 open(os.path.join(dn, "rfile2"), "wb").write("rfile2")
1681 open(os.path.join(dn, "rfile3"), "wb").write("rfile3")
1682 sdn2 = os.path.join(dn, "subdir2")
1684 open(os.path.join(sdn2, "rfile4"), "wb").write("rfile4")
1685 open(os.path.join(sdn2, "rfile5"), "wb").write("rfile5")
1687 # from disk into tahoe
1688 d.addCallback(run, "cp", "-r", dn, "tahoe:dir1")
1689 d.addCallback(run, "ls")
1690 d.addCallback(_check_ls, ["dir1"])
1691 d.addCallback(run, "ls", "dir1")
1692 d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"],
1693 ["rfile4", "rfile5"])
1694 d.addCallback(run, "ls", "tahoe:dir1/subdir2")
1695 d.addCallback(_check_ls, ["rfile4", "rfile5"],
1696 ["rfile1", "rfile2", "rfile3"])
1697 d.addCallback(run, "get", "dir1/subdir2/rfile4")
1698 d.addCallback(_check_stdout_against, data="rfile4")
1700 # and back out again
1701 dn_copy = os.path.join(self.basedir, "dir1-copy")
1702 d.addCallback(run, "cp", "--verbose", "-r", "tahoe:dir1", dn_copy)
1703 def _check_cp_r_out((out,err)):
1705 old = open(os.path.join(dn, name), "rb").read()
1706 newfn = os.path.join(dn_copy, name)
1707 self.failUnless(os.path.exists(newfn))
1708 new = open(newfn, "rb").read()
1709 self.failUnlessEqual(old, new)
1713 _cmp(os.path.join("subdir2", "rfile4"))
1714 _cmp(os.path.join("subdir2", "rfile5"))
1715 d.addCallback(_check_cp_r_out)
1717 # and copy it a second time, which ought to overwrite the same files
1718 d.addCallback(run, "cp", "-r", "tahoe:dir1", dn_copy)
1720 # and again, only writing filecaps
1721 dn_copy2 = os.path.join(self.basedir, "dir1-copy-capsonly")
1722 d.addCallback(run, "cp", "-r", "--caps-only", "tahoe:dir1", dn_copy2)
1723 def _check_capsonly((out,err)):
1724 # these should all be LITs
1725 x = open(os.path.join(dn_copy2, "subdir2", "rfile4")).read()
1726 y = uri.from_string_filenode(x)
1727 self.failUnlessEqual(y.data, "rfile4")
1728 d.addCallback(_check_capsonly)
1730 # and tahoe-to-tahoe
1731 d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy")
1732 d.addCallback(run, "ls")
1733 d.addCallback(_check_ls, ["dir1", "dir1-copy"])
1734 d.addCallback(run, "ls", "dir1-copy")
1735 d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"],
1736 ["rfile4", "rfile5"])
1737 d.addCallback(run, "ls", "tahoe:dir1-copy/subdir2")
1738 d.addCallback(_check_ls, ["rfile4", "rfile5"],
1739 ["rfile1", "rfile2", "rfile3"])
1740 d.addCallback(run, "get", "dir1-copy/subdir2/rfile4")
1741 d.addCallback(_check_stdout_against, data="rfile4")
1743 # and copy it a second time, which ought to overwrite the same files
1744 d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy")
1746 # tahoe_ls doesn't currently handle the error correctly: it tries to
1747 # JSON-parse a traceback.
1748 ## def _ls_missing(res):
1749 ## argv = nodeargs + ["ls", "bogus"]
1750 ## return self._run_cli(argv)
1751 ## d.addCallback(_ls_missing)
1752 ## def _check_ls_missing((out,err)):
1755 ## self.failUnlessEqual(err, "")
1756 ## d.addCallback(_check_ls_missing)
1760 def test_filesystem_with_cli_in_subprocess(self):
1761 # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe.
1763 self.basedir = "system/SystemTest/test_filesystem_with_cli_in_subprocess"
1764 d = self.set_up_nodes()
1765 def _new_happy_semantics(ign):
1766 for c in self.clients:
1767 c.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
1768 d.addCallback(_new_happy_semantics)
1770 def _run_in_subprocess(ignored, verb, *args, **kwargs):
1771 stdin = kwargs.get("stdin")
1772 env = kwargs.get("env")
1773 newargs = ["--node-directory", self.getdir("client0"), verb] + list(args)
1774 return self.run_bintahoe(newargs, stdin=stdin, env=env)
1776 def _check_succeeded(res, check_stderr=True):
1777 out, err, rc_or_sig = res
1778 self.failUnlessEqual(rc_or_sig, 0, str(res))
1780 self.failUnlessEqual(err, "")
1782 d.addCallback(_run_in_subprocess, "create-alias", "newalias")
1783 d.addCallback(_check_succeeded)
1785 STDIN_DATA = "This is the file to upload from stdin."
1786 d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA)
1787 d.addCallback(_check_succeeded, check_stderr=False)
1789 def _mv_with_http_proxy(ign):
1791 env['http_proxy'] = env['HTTP_PROXY'] = "http://127.0.0.0:12345" # invalid address
1792 return _run_in_subprocess(None, "mv", "newalias:tahoe-file", "newalias:tahoe-moved", env=env)
1793 d.addCallback(_mv_with_http_proxy)
1794 d.addCallback(_check_succeeded)
1796 d.addCallback(_run_in_subprocess, "ls", "newalias:")
1798 out, err, rc_or_sig = res
1799 self.failUnlessEqual(rc_or_sig, 0, str(res))
1800 self.failUnlessEqual(err, "", str(res))
1801 self.failUnlessIn("tahoe-moved", out)
1802 self.failIfIn("tahoe-file", out)
1803 d.addCallback(_check_ls)
1806 def test_debug_trial(self):
1807 def _check_for_line(lines, result, test):
1809 if result in l and test in l:
1811 self.fail("output (prefixed with '##') does not have a line containing both %r and %r:\n## %s"
1812 % (result, test, "\n## ".join(lines)))
1814 def _check_for_outcome(lines, out, outcome):
1815 self.failUnlessIn(outcome, out, "output (prefixed with '##') does not contain %r:\n## %s"
1816 % (outcome, "\n## ".join(lines)))
1818 d = self.run_bintahoe(['debug', 'trial', '--reporter=verbose',
1819 'allmydata.test.trialtest'])
1820 def _check_failure( (out, err, rc) ):
1821 self.failUnlessEqual(rc, 1)
1822 lines = out.split('\n')
1823 _check_for_line(lines, "[SKIPPED]", "test_skip")
1824 _check_for_line(lines, "[TODO]", "test_todo")
1825 _check_for_line(lines, "[FAIL]", "test_fail")
1826 _check_for_line(lines, "[ERROR]", "test_deferred_error")
1827 _check_for_line(lines, "[ERROR]", "test_error")
1828 _check_for_outcome(lines, out, "FAILED")
1829 d.addCallback(_check_failure)
1831 # the --quiet argument regression-tests a problem in finding which arguments to pass to trial
1832 d.addCallback(lambda ign: self.run_bintahoe(['--quiet', 'debug', 'trial', '--reporter=verbose',
1833 'allmydata.test.trialtest.Success']))
1834 def _check_success( (out, err, rc) ):
1835 self.failUnlessEqual(rc, 0)
1836 lines = out.split('\n')
1837 _check_for_line(lines, "[SKIPPED]", "test_skip")
1838 _check_for_line(lines, "[TODO]", "test_todo")
1839 _check_for_outcome(lines, out, "PASSED")
1840 d.addCallback(_check_success)
1843 def _run_cli(self, argv, stdin=""):
1845 stdout, stderr = StringIO(), StringIO()
1846 d = threads.deferToThread(runner.runner, argv, run_by_human=False,
1847 stdin=StringIO(stdin),
1848 stdout=stdout, stderr=stderr)
1850 return stdout.getvalue(), stderr.getvalue()
1851 d.addCallback(_done)
1854 def _test_checker(self, res):
1855 ut = upload.Data("too big to be literal" * 200, convergence=None)
1856 d = self._personal_node.add_file(u"big file", ut)
1858 d.addCallback(lambda res: self._personal_node.check(Monitor()))
1859 def _check_dirnode_results(r):
1860 self.failUnless(r.is_healthy())
1861 d.addCallback(_check_dirnode_results)
1862 d.addCallback(lambda res: self._personal_node.check(Monitor(), verify=True))
1863 d.addCallback(_check_dirnode_results)
1865 d.addCallback(lambda res: self._personal_node.get(u"big file"))
1866 def _got_chk_filenode(n):
1867 self.failUnless(isinstance(n, ImmutableFileNode))
1868 d = n.check(Monitor())
1869 def _check_filenode_results(r):
1870 self.failUnless(r.is_healthy())
1871 d.addCallback(_check_filenode_results)
1872 d.addCallback(lambda res: n.check(Monitor(), verify=True))
1873 d.addCallback(_check_filenode_results)
1875 d.addCallback(_got_chk_filenode)
1877 d.addCallback(lambda res: self._personal_node.get(u"sekrit data"))
1878 def _got_lit_filenode(n):
1879 self.failUnless(isinstance(n, LiteralFileNode))
1880 d = n.check(Monitor())
1881 def _check_lit_filenode_results(r):
1882 self.failUnlessEqual(r, None)
1883 d.addCallback(_check_lit_filenode_results)
1884 d.addCallback(lambda res: n.check(Monitor(), verify=True))
1885 d.addCallback(_check_lit_filenode_results)
1887 d.addCallback(_got_lit_filenode)
1891 class Connections(SystemTestMixin, unittest.TestCase):
1892 def test_rref(self):
1893 if NormalizedVersion(foolscap.__version__) < NormalizedVersion('0.6.4'):
1894 raise unittest.SkipTest("skipped due to http://foolscap.lothar.com/trac/ticket/196 "
1895 "(which does not affect normal usage of Tahoe-LAFS)")
1897 self.basedir = "system/Connections/rref"
1898 d = self.set_up_nodes(2)
1900 self.c0 = self.clients[0]
1901 nonclients = [s for s in self.c0.storage_broker.get_connected_servers()
1902 if s.get_serverid() != self.c0.nodeid]
1903 self.failUnlessEqual(len(nonclients), 1)
1905 self.s1 = nonclients[0] # s1 is the server, not c0
1906 self.s1_rref = self.s1.get_rref()
1907 self.failIfEqual(self.s1_rref, None)
1908 self.failUnless(self.s1.is_connected())
1909 d.addCallback(_start)
1911 # now shut down the server
1912 d.addCallback(lambda ign: self.clients[1].disownServiceParent())
1913 # and wait for the client to notice
1915 return len(self.c0.storage_broker.get_connected_servers()) < 2
1916 d.addCallback(lambda ign: self.poll(_poll))
1919 self.failIf(self.s1.is_connected())
1920 rref = self.s1.get_rref()
1921 self.failUnless(rref)
1922 self.failUnlessIdentical(rref, self.s1_rref)
1923 d.addCallback(_down)