-from base64 import b32encode
-import os, sys, time, re, simplejson, urllib
+
+import os, re, sys, time, simplejson
from cStringIO import StringIO
-from zope.interface import implements
+
from twisted.trial import unittest
from twisted.internet import defer
from twisted.internet import threads # CLI tests use deferToThread
-from twisted.internet.error import ConnectionDone, ConnectionLost
-from twisted.internet.interfaces import IConsumer, IPushProducer
+
import allmydata
-from allmydata import uri, storage
-from allmydata.immutable import download, filenode, offloaded, upload
+from allmydata import uri
+from allmydata.storage.mutable import MutableShareFile
+from allmydata.storage.server import si_a2b
+from allmydata.immutable import offloaded, upload
+from allmydata.immutable.literal import LiteralFileNode
+from allmydata.immutable.filenode import ImmutableFileNode
from allmydata.util import idlib, mathutil
from allmydata.util import log, base32
+from allmydata.util.verlib import NormalizedVersion
+from allmydata.util.encodingutil import quote_output, unicode_to_argv
+from allmydata.util.fileutil import abspath_expanduser_unicode
+from allmydata.util.consumer import MemoryConsumer, download_to_data
from allmydata.scripts import runner
-from allmydata.interfaces import IDirectoryNode, IFileNode, IFileURI, \
- ICheckResults, ICheckAndRepairResults, IDeepCheckResults, \
- IDeepCheckAndRepairResults, NoSuchChildError, NotEnoughSharesError
-from allmydata.monitor import Monitor, OperationCancelledError
-from allmydata.mutable.common import NotMutableError
+from allmydata.interfaces import IDirectoryNode, IFileNode, \
+ NoSuchChildError, NoSharesError
+from allmydata.monitor import Monitor
+from allmydata.mutable.common import NotWriteableError
from allmydata.mutable import layout as mutable_layout
-from foolscap import DeadReferenceError
+from allmydata.mutable.publish import MutableData
+
+import foolscap
+from foolscap.api import DeadReferenceError, fireEventually
from twisted.python.failure import Failure
from twisted.web.client import getPage
from twisted.web.error import Error
-from allmydata.test.common import SystemTestMixin, ErrorMixin, \
- MemoryConsumer, download_to_data
+from allmydata.test.common import SystemTestMixin
+
+# TODO: move this to common or common_util
+from allmydata.test.test_runner import RunBinTahoeMixin
LARGE_DATA = """
-This is some data to publish to the virtual drive, which needs to be large
+This is some data to publish to the remote grid.., which needs to be large
enough to not fit inside a LIT uri.
"""
self.interrupt_after_d.callback(self)
return upload.Data.read(self, length)
-class GrabEverythingConsumer:
- implements(IConsumer)
-
- def __init__(self):
- self.contents = ""
-
- def registerProducer(self, producer, streaming):
- assert streaming
- assert IPushProducer.providedBy(producer)
-
- def write(self, data):
- self.contents += data
-
- def unregisterProducer(self):
- pass
-
-class SystemTest(SystemTestMixin, unittest.TestCase):
+class SystemTest(SystemTestMixin, RunBinTahoeMixin, unittest.TestCase):
+ timeout = 3600 # It takes longer than 960 seconds on Zandr's ARM box.
def test_connections(self):
self.basedir = "system/SystemTest/test_connections"
def _check(extra_node):
self.extra_node = extra_node
for c in self.clients:
- all_peerids = list(c.get_all_peerids())
+ all_peerids = c.get_storage_broker().get_all_serverids()
self.failUnlessEqual(len(all_peerids), self.numclients+1)
- permuted_peers = list(c.get_permuted_peers("storage", "a"))
+ sb = c.storage_broker
+ permuted_peers = sb.get_servers_for_psi("a")
self.failUnlessEqual(len(permuted_peers), self.numclients+1)
d.addCallback(_check)
return res
d.addBoth(_shutdown_extra_node)
return d
- test_connections.timeout = 300
# test_connections is subsumed by test_upload_and_download, and takes
# quite a while to run on a slow machine (because of all the TLS
# connections that must be established). If we ever rework the introducer
def test_upload_and_download_random_key(self):
self.basedir = "system/SystemTest/test_upload_and_download_random_key"
return self._test_upload_and_download(convergence=None)
- test_upload_and_download_random_key.timeout = 4800
def test_upload_and_download_convergent(self):
self.basedir = "system/SystemTest/test_upload_and_download_convergent"
return self._test_upload_and_download(convergence="some convergence string")
- test_upload_and_download_convergent.timeout = 4800
def _test_upload_and_download(self, convergence):
# we use 4000 bytes of data, which will result in about 400k written
d = self.set_up_nodes()
def _check_connections(res):
for c in self.clients:
- all_peerids = list(c.get_all_peerids())
+ c.encoding_params['happy'] = 5
+ all_peerids = c.get_storage_broker().get_all_serverids()
self.failUnlessEqual(len(all_peerids), self.numclients)
- permuted_peers = list(c.get_permuted_peers("storage", "a"))
+ sb = c.storage_broker
+ permuted_peers = sb.get_servers_for_psi("a")
self.failUnlessEqual(len(permuted_peers), self.numclients)
d.addCallback(_check_connections)
return d1
d.addCallback(_do_upload)
def _upload_done(results):
- theuri = results.uri
+ theuri = results.get_uri()
log.msg("upload finished: uri is %s" % (theuri,))
self.uri = theuri
assert isinstance(self.uri, str), self.uri
- dl = self.clients[1].getServiceNamed("downloader")
- self.downloader = dl
+ self.cap = uri.from_string(self.uri)
+ self.n = self.clients[1].create_node_from_uri(self.uri)
d.addCallback(_upload_done)
def _upload_again(res):
log.msg("UPLOADING AGAIN")
up = upload.Data(DATA, convergence=convergence)
up.max_segment_size = 1024
- d1 = self.uploader.upload(up)
+ return self.uploader.upload(up)
d.addCallback(_upload_again)
def _download_to_data(res):
log.msg("DOWNLOADING")
- return self.downloader.download_to_data(self.uri)
+ return download_to_data(self.n)
d.addCallback(_download_to_data)
def _download_to_data_done(data):
log.msg("download finished")
self.failUnlessEqual(data, DATA)
d.addCallback(_download_to_data_done)
- target_filename = os.path.join(self.basedir, "download.target")
- def _download_to_filename(res):
- return self.downloader.download_to_filename(self.uri,
- target_filename)
- d.addCallback(_download_to_filename)
- def _download_to_filename_done(res):
- newdata = open(target_filename, "rb").read()
- self.failUnlessEqual(newdata, DATA)
- d.addCallback(_download_to_filename_done)
-
- target_filename2 = os.path.join(self.basedir, "download.target2")
- def _download_to_filehandle(res):
- fh = open(target_filename2, "wb")
- return self.downloader.download_to_filehandle(self.uri, fh)
- d.addCallback(_download_to_filehandle)
- def _download_to_filehandle_done(fh):
- fh.close()
- newdata = open(target_filename2, "rb").read()
- self.failUnlessEqual(newdata, DATA)
- d.addCallback(_download_to_filehandle_done)
-
- consumer = GrabEverythingConsumer()
- ct = download.ConsumerAdapter(consumer)
- d.addCallback(lambda res:
- self.downloader.download(self.uri, ct))
- def _download_to_consumer_done(ign):
- self.failUnlessEqual(consumer.contents, DATA)
- d.addCallback(_download_to_consumer_done)
-
def _test_read(res):
n = self.clients[1].create_node_from_uri(self.uri)
d = download_to_data(n)
bad_n = self.clients[1].create_node_from_uri(bad_u.to_string())
# this should cause an error during download
- d = self.shouldFail2(NotEnoughSharesError, "'download bad node'",
+ d = self.shouldFail2(NoSharesError, "'download bad node'",
None,
bad_n.read, MemoryConsumer(), offset=2)
return d
def _download_nonexistent_uri(res):
baduri = self.mangle_uri(self.uri)
+ badnode = self.clients[1].create_node_from_uri(baduri)
log.msg("about to download non-existent URI", level=log.UNUSUAL,
facility="tahoe.tests")
- d1 = self.downloader.download_to_data(baduri)
+ d1 = download_to_data(badnode)
def _baduri_should_fail(res):
- log.msg("finished downloading non-existend URI",
+ log.msg("finished downloading non-existent URI",
level=log.UNUSUAL, facility="tahoe.tests")
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(NotEnoughSharesError),
- "expected NotEnoughSharesError, got %s" % res)
- # TODO: files that have zero peers should get a special kind
- # of NotEnoughSharesError, which can be used to suggest that
- # the URI might be wrong or that they've never uploaded the
- # file in the first place.
+ self.failUnless(res.check(NoSharesError),
+ "expected NoSharesError, got %s" % res)
d1.addBoth(_baduri_should_fail)
return d1
d.addCallback(_download_nonexistent_uri)
add_to_sparent=True))
def _added(extra_node):
self.extra_node = extra_node
+ self.extra_node.encoding_params['happy'] = 5
d.addCallback(_added)
+ def _has_helper():
+ uploader = self.extra_node.getServiceNamed("uploader")
+ furl, connected = uploader.get_helper_info()
+ return connected
+ d.addCallback(lambda ign: self.poll(_has_helper))
+
HELPER_DATA = "Data that needs help to upload" * 1000
def _upload_with_helper(res):
u = upload.Data(HELPER_DATA, convergence=convergence)
d = self.extra_node.upload(u)
def _uploaded(results):
- uri = results.uri
- return self.downloader.download_to_data(uri)
+ n = self.clients[1].create_node_from_uri(results.get_uri())
+ return download_to_data(n)
d.addCallback(_uploaded)
def _check(newdata):
self.failUnlessEqual(newdata, HELPER_DATA)
u.debug_stash_RemoteEncryptedUploadable = True
d = self.extra_node.upload(u)
def _uploaded(results):
- uri = results.uri
- return self.downloader.download_to_data(uri)
+ n = self.clients[1].create_node_from_uri(results.get_uri())
+ return download_to_data(n)
d.addCallback(_uploaded)
def _check(newdata):
self.failUnlessEqual(newdata, HELPER_DATA)
if convergence is not None:
d.addCallback(_upload_duplicate_with_helper)
+ d.addCallback(fireEventually)
+
def _upload_resumable(res):
DATA = "Data that needs help to upload and gets interrupted" * 1000
u1 = CountingDataUploadable(DATA, convergence=convergence)
u2 = CountingDataUploadable(DATA, convergence=convergence)
# we interrupt the connection after about 5kB by shutting down
- # the helper, then restartingit.
+ # the helper, then restarting it.
u1.interrupt_after = 5000
u1.interrupt_after_d = defer.Deferred()
- u1.interrupt_after_d.addCallback(lambda res:
- self.bounce_client(0))
+ bounced_d = defer.Deferred()
+ def _do_bounce(res):
+ d = self.bounce_client(0)
+ d.addBoth(bounced_d.callback)
+ u1.interrupt_after_d.addCallback(_do_bounce)
# sneak into the helper and reduce its chunk size, so that our
# debug_interrupt will sever the connection on about the fifth
# this same test run, but I'm not currently worried about it.
offloaded.CHKCiphertextFetcher.CHUNK_SIZE = 1000
- d = self.extra_node.upload(u1)
-
- def _should_not_finish(res):
- self.fail("interrupted upload should have failed, not finished"
- " with result %s" % (res,))
- def _interrupted(f):
- f.trap(ConnectionLost, ConnectionDone, DeadReferenceError)
-
- # make sure we actually interrupted it before finishing the
- # file
- self.failUnless(u1.bytes_read < len(DATA),
- "read %d out of %d total" % (u1.bytes_read,
- len(DATA)))
-
- log.msg("waiting for reconnect", level=log.NOISY,
- facility="tahoe.test.test_system")
- # now, we need to give the nodes a chance to notice that this
- # connection has gone away. When this happens, the storage
- # servers will be told to abort their uploads, removing the
- # partial shares. Unfortunately this involves TCP messages
- # going through the loopback interface, and we can't easily
- # predict how long that will take. If it were all local, we
- # could use fireEventually() to stall. Since we don't have
- # the right introduction hooks, the best we can do is use a
- # fixed delay. TODO: this is fragile.
- u1.interrupt_after_d.addCallback(self.stall, 2.0)
- return u1.interrupt_after_d
- d.addCallbacks(_should_not_finish, _interrupted)
+ upload_d = self.extra_node.upload(u1)
+ # The upload will start, and bounce_client() will be called after
+ # about 5kB. bounced_d will fire after bounce_client() finishes
+ # shutting down and restarting the node.
+ d = bounced_d
+ def _bounced(ign):
+ # By this point, the upload should have failed because of the
+ # interruption. upload_d will fire in a moment
+ def _should_not_finish(res):
+ self.fail("interrupted upload should have failed, not"
+ " finished with result %s" % (res,))
+ def _interrupted(f):
+ f.trap(DeadReferenceError)
+ # make sure we actually interrupted it before finishing
+ # the file
+ self.failUnless(u1.bytes_read < len(DATA),
+ "read %d out of %d total" %
+ (u1.bytes_read, len(DATA)))
+ upload_d.addCallbacks(_should_not_finish, _interrupted)
+ return upload_d
+ d.addCallback(_bounced)
def _disconnected(res):
# check to make sure the storage servers aren't still hanging
self.failIf(os.path.exists(incdir) and os.listdir(incdir))
d.addCallback(_disconnected)
- # then we need to give the reconnector a chance to
- # reestablish the connection to the helper.
d.addCallback(lambda res:
- log.msg("wait_for_connections", level=log.NOISY,
+ log.msg("wait_for_helper", level=log.NOISY,
facility="tahoe.test.test_system"))
- d.addCallback(lambda res: self.wait_for_connections())
-
+ # then we need to wait for the extra node to reestablish its
+ # connection to the helper.
+ d.addCallback(lambda ign: self.poll(_has_helper))
d.addCallback(lambda res:
log.msg("uploading again", level=log.NOISY,
d.addCallback(lambda res: self.extra_node.upload(u2))
def _uploaded(results):
- uri = results.uri
+ cap = results.get_uri()
log.msg("Second upload complete", level=log.NOISY,
facility="tahoe.test.test_system")
# this is really bytes received rather than sent, but it's
# convenient and basically measures the same thing
- bytes_sent = results.ciphertext_fetched
+ bytes_sent = results.get_ciphertext_fetched()
+ self.failUnless(isinstance(bytes_sent, (int, long)), bytes_sent)
# We currently don't support resumption of upload if the data is
# encrypted with a random key. (Because that would require us
# Make sure we did not have to read the whole file the
# second time around .
self.failUnless(bytes_sent < len(DATA),
- "resumption didn't save us any work:"
- " read %d bytes out of %d total" %
- (bytes_sent, len(DATA)))
+ "resumption didn't save us any work:"
+ " read %r bytes out of %r total" %
+ (bytes_sent, len(DATA)))
else:
# Make sure we did have to read the whole file the second
# time around -- because the one that we partially uploaded
# earlier was encrypted with a different random key.
self.failIf(bytes_sent < len(DATA),
"resumption saved us some work even though we were using random keys:"
- " read %d bytes out of %d total" %
+ " read %r bytes out of %r total" %
(bytes_sent, len(DATA)))
- return self.downloader.download_to_data(uri)
+ n = self.clients[1].create_node_from_uri(cap)
+ return download_to_data(n)
d.addCallback(_uploaded)
def _check(newdata):
return d
- def _find_shares(self, basedir):
+ def _find_all_shares(self, basedir):
shares = []
for (dirpath, dirnames, filenames) in os.walk(basedir):
if "storage" not in dirpath:
if not filenames:
continue
pieces = dirpath.split(os.sep)
- if pieces[-4] == "storage" and pieces[-3] == "shares":
+ if (len(pieces) >= 5
+ and pieces[-4] == "storage"
+ and pieces[-3] == "shares"):
# we're sitting in .../storage/shares/$START/$SINDEX , and there
# are sharefiles here
assert pieces[-5].startswith("client")
client_num = int(pieces[-5][-1])
storage_index_s = pieces[-1]
- storage_index = storage.si_a2b(storage_index_s)
+ storage_index = si_a2b(storage_index_s)
for sharename in filenames:
shnum = int(sharename)
filename = os.path.join(dirpath, sharename)
return shares
def _corrupt_mutable_share(self, filename, which):
- msf = storage.MutableShareFile(filename)
+ msf = MutableShareFile(filename)
datav = msf.readv([ (0, 1000000) ])
final_share = datav[0]
assert len(final_share) < 1000000 # ought to be truncated
def test_mutable(self):
self.basedir = "system/SystemTest/test_mutable"
DATA = "initial contents go here." # 25 bytes % 3 != 0
+ DATA_uploadable = MutableData(DATA)
NEWDATA = "new contents yay"
+ NEWDATA_uploadable = MutableData(NEWDATA)
NEWERDATA = "this is getting old"
+ NEWERDATA_uploadable = MutableData(NEWERDATA)
d = self.set_up_nodes(use_key_generator=True)
def _create_mutable(res):
c = self.clients[0]
log.msg("starting create_mutable_file")
- d1 = c.create_mutable_file(DATA)
+ d1 = c.create_mutable_file(DATA_uploadable)
def _done(res):
log.msg("DONE: %s" % (res,))
self._mutable_node_1 = res
- uri = res.get_uri()
d1.addCallback(_done)
return d1
d.addCallback(_create_mutable)
def _test_debug(res):
# find a share. It is important to run this while there is only
# one slot in the grid.
- shares = self._find_shares(self.basedir)
+ shares = self._find_all_shares(self.basedir)
(client_num, storage_index, filename, shnum) = shares[0]
log.msg("test_system.SystemTest.test_mutable._test_debug using %s"
% filename)
peerid = idlib.nodeid_b2a(self.clients[client_num].nodeid)
self.failUnless(" WE for nodeid: %s\n" % peerid in output)
self.failUnless(" num_extra_leases: 0\n" in output)
- # the pubkey size can vary by a byte, so the container might
- # be a bit larger on some runs.
- m = re.search(r'^ container_size: (\d+)$', output, re.M)
- self.failUnless(m)
- container_size = int(m.group(1))
- self.failUnless(2037 <= container_size <= 2049, container_size)
- m = re.search(r'^ data_length: (\d+)$', output, re.M)
- self.failUnless(m)
- data_length = int(m.group(1))
- self.failUnless(2037 <= data_length <= 2049, data_length)
self.failUnless(" secrets are for nodeid: %s\n" % peerid
in output)
self.failUnless(" SDMF contents:\n" in output)
self.failUnlessEqual(res, DATA)
# replace the data
log.msg("starting replace1")
- d1 = newnode.overwrite(NEWDATA)
+ d1 = newnode.overwrite(NEWDATA_uploadable)
d1.addCallback(lambda res: newnode.download_best_version())
return d1
d.addCallback(_check_download_3)
newnode2 = self.clients[3].create_node_from_uri(uri)
self._newnode3 = self.clients[3].create_node_from_uri(uri)
log.msg("starting replace2")
- d1 = newnode1.overwrite(NEWERDATA)
+ d1 = newnode1.overwrite(NEWERDATA_uploadable)
d1.addCallback(lambda res: newnode2.download_best_version())
return d1
d.addCallback(_check_download_4)
def _corrupt_shares(res):
# run around and flip bits in all but k of the shares, to test
# the hash checks
- shares = self._find_shares(self.basedir)
+ shares = self._find_all_shares(self.basedir)
## sort by share number
#shares.sort( lambda a,b: cmp(a[3], b[3]) )
where = dict([ (shnum, filename)
def _check_empty_file(res):
# make sure we can create empty files, this usually screws up the
# segsize math
- d1 = self.clients[2].create_mutable_file("")
+ d1 = self.clients[2].create_mutable_file(MutableData(""))
d1.addCallback(lambda newnode: newnode.download_best_version())
d1.addCallback(lambda res: self.failUnlessEqual("", res))
return d1
d.addCallback(_check_empty_file)
- d.addCallback(lambda res: self.clients[0].create_empty_dirnode())
+ d.addCallback(lambda res: self.clients[0].create_dirnode())
def _created_dirnode(dnode):
log.msg("_created_dirnode(%s)" % (dnode,))
d1 = dnode.list()
self.key_generator_svc.key_generator.pool_size + size_delta)
d.addCallback(check_kg_poolsize, 0)
- d.addCallback(lambda junk: self.clients[3].create_mutable_file('hello, world'))
+ d.addCallback(lambda junk:
+ self.clients[3].create_mutable_file(MutableData('hello, world')))
d.addCallback(check_kg_poolsize, -1)
- d.addCallback(lambda junk: self.clients[3].create_empty_dirnode())
+ d.addCallback(lambda junk: self.clients[3].create_dirnode())
d.addCallback(check_kg_poolsize, -2)
# use_helper induces use of clients[3], which is the using-key_gen client
- d.addCallback(lambda junk: self.POST("uri", use_helper=True, t="mkdir", name='george'))
+ d.addCallback(lambda junk:
+ self.POST("uri?t=mkdir&name=george", use_helper=True))
d.addCallback(check_kg_poolsize, -3)
return d
- # The default 120 second timeout went off when running it under valgrind
- # on my old Windows laptop, so I'm bumping up the timeout.
- test_mutable.timeout = 240
def flip_bit(self, good):
return good[:-1] + chr(ord(good[-1]) ^ 0x01)
def mangle_uri(self, gooduri):
# change the key, which changes the storage index, which means we'll
# be asking about the wrong file, so nobody will have any shares
- u = IFileURI(gooduri)
+ u = uri.from_string(gooduri)
u2 = uri.CHKFileURI(key=self.flip_bit(u.key),
uri_extension_hash=u.uri_extension_hash,
needed_shares=u.needed_shares,
# the key, which should cause the download to fail the post-download
# plaintext_hash check.
- def test_vdrive(self):
- self.basedir = "system/SystemTest/test_vdrive"
+ def test_filesystem(self):
+ self.basedir = "system/SystemTest/test_filesystem"
self.data = LARGE_DATA
d = self.set_up_nodes(use_stats_gatherer=True)
+ def _new_happy_semantics(ign):
+ for c in self.clients:
+ c.encoding_params['happy'] = 1
+ d.addCallback(_new_happy_semantics)
d.addCallback(self._test_introweb)
d.addCallback(self.log, "starting publish")
d.addCallback(self._do_publish1)
# P/test_put/ (empty)
d.addCallback(self._test_checker)
return d
- test_vdrive.timeout = 1100
def _test_introweb(self, res):
d = getPage(self.introweb_url, method="GET", followRedirect=True)
def _check(res):
try:
- self.failUnless("allmydata: %s" % str(allmydata.__version__)
- in res)
- self.failUnless("Announcement Summary: storage: 5, stub_client: 5" in res)
+ self.failUnless("%s: %s" % (allmydata.__appname__, allmydata.__version__) in res)
+ verstr = str(allmydata.__version__)
+
+ # The Python "rational version numbering" convention
+ # disallows "-r$REV" but allows ".post$REV"
+ # instead. Eventually we'll probably move to
+ # that. When we do, this test won't go red:
+ ix = verstr.rfind('-r')
+ if ix != -1:
+ altverstr = verstr[:ix] + '.post' + verstr[ix+2:]
+ else:
+ ix = verstr.rfind('.post')
+ if ix != -1:
+ altverstr = verstr[:ix] + '-r' + verstr[ix+5:]
+ else:
+ altverstr = verstr
+
+ appverstr = "%s: %s" % (allmydata.__appname__, verstr)
+ newappverstr = "%s: %s" % (allmydata.__appname__, altverstr)
+
+ self.failUnless((appverstr in res) or (newappverstr in res), (appverstr, newappverstr, res))
+ self.failUnless("Announcement Summary: storage: 5" in res)
self.failUnless("Subscription Summary: storage: 5" in res)
+ self.failUnless("tahoe.css" in res)
except unittest.FailTest:
print
print "GET %s output was:" % self.introweb_url
print res
raise
d.addCallback(_check)
+ # make sure it serves the CSS too
+ d.addCallback(lambda res:
+ getPage(self.introweb_url+"tahoe.css", method="GET"))
d.addCallback(lambda res:
getPage(self.introweb_url + "?t=json",
method="GET", followRedirect=True))
self.failUnlessEqual(data["subscription_summary"],
{"storage": 5})
self.failUnlessEqual(data["announcement_summary"],
- {"storage": 5, "stub_client": 5})
- self.failUnlessEqual(data["announcement_distinct_hosts"],
- {"storage": 1, "stub_client": 1})
+ {"storage": 5})
except unittest.FailTest:
print
print "GET %s?t=json output was:" % self.introweb_url
def _do_publish1(self, res):
ut = upload.Data(self.data, convergence=None)
c0 = self.clients[0]
- d = c0.create_empty_dirnode()
+ d = c0.create_dirnode()
def _made_root(new_dirnode):
self._root_directory_uri = new_dirnode.get_uri()
return c0.create_node_from_uri(self._root_directory_uri)
d.addCallback(_made_root)
- d.addCallback(lambda root: root.create_empty_directory(u"subdir1"))
+ d.addCallback(lambda root: root.create_subdirectory(u"subdir1"))
def _made_subdir1(subdir1_node):
self._subdir1_node = subdir1_node
d1 = subdir1_node.add_file(u"mydata567", ut)
def _do_publish2(self, res):
ut = upload.Data(self.data, convergence=None)
- d = self._subdir1_node.create_empty_directory(u"subdir2")
+ d = self._subdir1_node.create_subdirectory(u"subdir2")
d.addCallback(lambda subdir2: subdir2.add_file(u"mydata992", ut))
return d
def _do_publish_private(self, res):
self.smalldata = "sssh, very secret stuff"
ut = upload.Data(self.smalldata, convergence=None)
- d = self.clients[0].create_empty_dirnode()
+ d = self.clients[0].create_dirnode()
d.addCallback(self.log, "GOT private directory")
def _got_new_dir(privnode):
rootnode = self.clients[0].create_node_from_uri(self._root_directory_uri)
- d1 = privnode.create_empty_directory(u"personal")
+ d1 = privnode.create_subdirectory(u"personal")
d1.addCallback(self.log, "made P/personal")
d1.addCallback(lambda node: node.add_file(u"sekrit data", ut))
d1.addCallback(self.log, "made P/personal/sekrit data")
d1.addCallback(lambda res: rootnode.get_child_at_path([u"subdir1", u"subdir2"]))
def _got_s2(s2node):
- d2 = privnode.set_uri(u"s2-rw", s2node.get_uri())
- d2.addCallback(lambda node: privnode.set_uri(u"s2-ro", s2node.get_readonly_uri()))
+ d2 = privnode.set_uri(u"s2-rw", s2node.get_uri(),
+ s2node.get_readonly_uri())
+ d2.addCallback(lambda node:
+ privnode.set_uri(u"s2-ro",
+ s2node.get_readonly_uri(),
+ s2node.get_readonly_uri()))
return d2
d1.addCallback(_got_s2)
d1.addCallback(lambda res: privnode)
d.addCallback(self.log, "check_publish1 got /")
d.addCallback(lambda root: root.get(u"subdir1"))
d.addCallback(lambda subdir1: subdir1.get(u"mydata567"))
- d.addCallback(lambda filenode: filenode.download_to_data())
+ d.addCallback(lambda filenode: download_to_data(filenode))
d.addCallback(self.log, "get finished")
def _get_done(data):
self.failUnlessEqual(data, self.data)
d.addCallback(lambda dirnode:
self.failUnless(IDirectoryNode.providedBy(dirnode)))
d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
- d.addCallback(lambda filenode: filenode.download_to_data())
+ d.addCallback(lambda filenode: download_to_data(filenode))
d.addCallback(lambda data: self.failUnlessEqual(data, self.data))
d.addCallback(lambda res: rootnode.get_child_at_path(u"subdir1/mydata567"))
return self._private_node.get_child_at_path(path)
d.addCallback(lambda res: get_path(u"personal/sekrit data"))
- d.addCallback(lambda filenode: filenode.download_to_data())
+ d.addCallback(lambda filenode: download_to_data(filenode))
d.addCallback(lambda data: self.failUnlessEqual(data, self.smalldata))
d.addCallback(lambda res: get_path(u"s2-rw"))
d.addCallback(lambda dirnode: self.failUnless(dirnode.is_mutable()))
d1.addCallback(lambda res: dirnode.list())
d1.addCallback(self.log, "dirnode.list")
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mkdir(nope)", None, dirnode.create_empty_directory, u"nope"))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mkdir(nope)", None, dirnode.create_subdirectory, u"nope"))
d1.addCallback(self.log, "doing add_file(ro)")
ut = upload.Data("I will disappear, unrecorded and unobserved. The tragedy of my demise is made more poignant by its silence, but this beauty is not for you to ever know.", convergence="99i-p1x4-xd4-18yc-ywt-87uu-msu-zo -- completely and totally unguessable string (unless you read this)")
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "add_file(nope)", None, dirnode.add_file, u"hope", ut))
d1.addCallback(self.log, "doing get(ro)")
d1.addCallback(lambda res: dirnode.get(u"mydata992"))
self.failUnless(IFileNode.providedBy(filenode)))
d1.addCallback(self.log, "doing delete(ro)")
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "delete(nope)", None, dirnode.delete, u"mydata992"))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "delete(nope)", None, dirnode.delete, u"mydata992"))
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "set_uri(nope)", None, dirnode.set_uri, u"hopeless", self.uri, self.uri))
d1.addCallback(lambda res: self.shouldFail2(NoSuchChildError, "get(missing)", "missing", dirnode.get, u"missing"))
personal = self._personal_node
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope"))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv from readonly", None, dirnode.move_child_to, u"mydata992", personal, u"nope"))
d1.addCallback(self.log, "doing move_child_to(ro)2")
- d1.addCallback(lambda res: self.shouldFail2(NotMutableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope"))
+ d1.addCallback(lambda res: self.shouldFail2(NotWriteableError, "mv to readonly", None, personal.move_child_to, u"sekrit data", dirnode, u"nope"))
d1.addCallback(self.log, "finished with _got_s2ro")
return d1
return getPage(url, method="GET", followRedirect=followRedirect)
def POST(self, urlpath, followRedirect=False, use_helper=False, **fields):
- if use_helper:
- url = self.helper_webish_url + urlpath
- else:
- url = self.webish_url + urlpath
sepbase = "boogabooga"
sep = "--" + sepbase
form = []
form.append(str(value))
form.append(sep)
form[-1] += "--"
- body = "\r\n".join(form) + "\r\n"
- headers = {"content-type": "multipart/form-data; boundary=%s" % sepbase,
- }
- return getPage(url, method="POST", postdata=body,
- headers=headers, followRedirect=followRedirect)
+ body = ""
+ headers = {}
+ if fields:
+ body = "\r\n".join(form) + "\r\n"
+ headers["content-type"] = "multipart/form-data; boundary=%s" % sepbase
+ return self.POST2(urlpath, body, headers, followRedirect, use_helper)
+
+ def POST2(self, urlpath, body="", headers={}, followRedirect=False,
+ use_helper=False):
+ if use_helper:
+ url = self.helper_webish_url + urlpath
+ else:
+ url = self.webish_url + urlpath
+ return getPage(url, method="POST", postdata=body, headers=headers,
+ followRedirect=followRedirect)
def _test_web(self, res):
base = self.webish_url
public = "uri/" + self._root_directory_uri
d = getPage(base)
def _got_welcome(page):
- expected = "Connected Storage Servers: <span>%d</span>" % (self.numclients)
- self.failUnless(expected in page,
- "I didn't see the right 'connected storage servers'"
- " message in: %s" % page
- )
- expected = "My nodeid: <span>%s</span>" % (b32encode(self.clients[0].nodeid).lower(),)
- self.failUnless(expected in page,
- "I didn't see the right 'My nodeid' message "
- "in: %s" % page)
+ html = page.replace('\n', ' ')
+ connected_re = r'Connected to <span>%d</span>\s*of <span>%d</span> known storage servers' % (self.numclients, self.numclients)
+ self.failUnless(re.search(connected_re, html),
+ "I didn't see the right '%s' message in:\n%s" % (connected_re, page))
+ # nodeids/tubids don't have any regexp-special characters
+ nodeid_re = r'<th>Node ID:</th>\s*<td title="TubID: %s">%s</td>' % (
+ self.clients[0].get_long_tubid(), self.clients[0].get_long_nodeid())
+ self.failUnless(re.search(nodeid_re, html),
+ "I didn't see the right '%s' message in:\n%s" % (nodeid_re, page))
self.failUnless("Helper: 0 active uploads" in page)
d.addCallback(_got_welcome)
d.addCallback(self.log, "done with _got_welcome")
# get the welcome page from the node that uses the helper too
d.addCallback(lambda res: getPage(self.helper_webish_url))
def _got_welcome_helper(page):
- self.failUnless("Connected to helper?: <span>yes</span>" in page,
- page)
- self.failUnless("Not running helper" in page)
+ html = page.replace('\n', ' ')
+ self.failUnless(re.search('<img (src="img/connected-yes.png" |alt="Connected" ){2}/>', html), page)
+ self.failUnlessIn("Not running helper", page)
d.addCallback(_got_welcome_helper)
d.addCallback(lambda res: getPage(base + public))
d.addCallback(lambda res: getPage(base + public + "/subdir1"))
def _got_subdir1(page):
# there ought to be an href for our file
- self.failUnless(("<td>%d</td>" % len(self.data)) in page)
+ self.failUnlessIn('<td align="right">%d</td>' % len(self.data), page)
self.failUnless(">mydata567</a>" in page)
d.addCallback(_got_subdir1)
d.addCallback(self.log, "done with _got_subdir1")
d.addCallback(self.failUnlessEqual, "new.txt contents")
# and again with something large enough to use multiple segments,
# and hopefully trigger pauseProducing too
+ def _new_happy_semantics(ign):
+ for c in self.clients:
+ # these get reset somewhere? Whatever.
+ c.encoding_params['happy'] = 1
+ d.addCallback(_new_happy_semantics)
d.addCallback(lambda res: self.PUT(public + "/subdir3/big.txt",
"big" * 500000)) # 1.5MB
d.addCallback(lambda res: self.GET(public + "/subdir3/big.txt"))
def _got_status(res):
# find an interesting upload and download to look at. LIT files
# are not interesting.
- for ds in self.clients[0].list_all_download_statuses():
+ h = self.clients[0].get_history()
+ for ds in h.list_all_download_statuses():
if ds.get_size() > 200:
self._down_status = ds.get_counter()
- for us in self.clients[0].list_all_upload_statuses():
+ for us in h.list_all_upload_statuses():
if us.get_size() > 200:
self._up_status = us.get_counter()
- rs = list(self.clients[0].list_all_retrieve_statuses())[0]
+ rs = list(h.list_all_retrieve_statuses())[0]
self._retrieve_status = rs.get_counter()
- ps = list(self.clients[0].list_all_publish_statuses())[0]
+ ps = list(h.list_all_publish_statuses())[0]
self._publish_status = ps.get_counter()
- us = list(self.clients[0].list_all_mapupdate_statuses())[0]
+ us = list(h.list_all_mapupdate_statuses())[0]
self._update_status = us.get_counter()
# and that there are some upload- and download- status pages
return self.GET("status/publish-%d" % self._publish_status)
d.addCallback(_got_update)
def _got_publish(res):
+ self.failUnlessIn("Publish Results", res)
return self.GET("status/retrieve-%d" % self._retrieve_status)
d.addCallback(_got_publish)
+ def _got_retrieve(res):
+ self.failUnlessIn("Retrieve Results", res)
+ d.addCallback(_got_retrieve)
# check that the helper status page exists
d.addCallback(lambda res:
# itself) doesn't explode when you ask for its status
d.addCallback(lambda res: getPage(self.helper_webish_url + "status/"))
def _got_non_helper_status(res):
- self.failUnless("Upload and Download Status" in res)
+ self.failUnlessIn("Recent and Active Operations", res)
d.addCallback(_got_non_helper_status)
# or for helper status with t=json
# see if the statistics page exists
d.addCallback(lambda res: self.GET("statistics"))
def _got_stats(res):
- self.failUnless("Node Statistics" in res)
- self.failUnless(" 'downloader.files_downloaded': 5," in res, res)
+ self.failUnlessIn("Operational Statistics", res)
+ self.failUnlessIn(" 'downloader.files_downloaded': 5,", res)
d.addCallback(_got_stats)
d.addCallback(lambda res: self.GET("statistics?t=json"))
def _got_stats_json(res):
# exercise some of the diagnostic tools in runner.py
# find a share
- for (dirpath, dirnames, filenames) in os.walk(self.basedir):
+ for (dirpath, dirnames, filenames) in os.walk(unicode(self.basedir)):
if "storage" not in dirpath:
continue
if not filenames:
continue
pieces = dirpath.split(os.sep)
- if pieces[-4] == "storage" and pieces[-3] == "shares":
+ if (len(pieces) >= 4
+ and pieces[-4] == "storage"
+ and pieces[-3] == "shares"):
# we're sitting in .../storage/shares/$START/$SINDEX , and there
# are sharefiles here
filename = os.path.join(dirpath, filenames[0])
if magic == '\x00\x00\x00\x01':
break
else:
- self.fail("unable to find any uri_extension files in %s"
+ self.fail("unable to find any uri_extension files in %r"
% self.basedir)
- log.msg("test_system.SystemTest._test_runner using %s" % filename)
+ log.msg("test_system.SystemTest._test_runner using %r" % filename)
out,err = StringIO(), StringIO()
rc = runner.runner(["debug", "dump-share", "--offsets",
- filename],
+ unicode_to_argv(filename)],
stdout=out, stderr=err)
output = out.getvalue()
self.failUnlessEqual(rc, 0)
# we only upload a single file, so we can assert some things about
# its size and shares.
- self.failUnless(("share filename: %s" % filename) in output)
- self.failUnless("size: %d\n" % len(self.data) in output)
- self.failUnless("num_segments: 1\n" in output)
+ self.failUnlessIn("share filename: %s" % quote_output(abspath_expanduser_unicode(filename)), output)
+ self.failUnlessIn("size: %d\n" % len(self.data), output)
+ self.failUnlessIn("num_segments: 1\n", output)
# segment_size is always a multiple of needed_shares
- self.failUnless("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3) in output)
- self.failUnless("total_shares: 10\n" in output)
+ self.failUnlessIn("segment_size: %d\n" % mathutil.next_multiple(len(self.data), 3), output)
+ self.failUnlessIn("total_shares: 10\n", output)
# keys which are supposed to be present
for key in ("size", "num_segments", "segment_size",
"needed_shares", "total_shares",
#"plaintext_hash", "plaintext_root_hash",
"crypttext_hash", "crypttext_root_hash",
"share_root_hash", "UEB_hash"):
- self.failUnless("%s: " % key in output, key)
- self.failUnless(" verify-cap: URI:CHK-Verifier:" in output)
+ self.failUnlessIn("%s: " % key, output)
+ self.failUnlessIn(" verify-cap: URI:CHK-Verifier:", output)
# now use its storage index to find the other shares using the
# 'find-shares' tool
sharedir, shnum = os.path.split(filename)
storagedir, storage_index_s = os.path.split(sharedir)
+ storage_index_s = str(storage_index_s)
out,err = StringIO(), StringIO()
nodedirs = [self.getdir("client%d" % i) for i in range(self.numclients)]
cmd = ["debug", "find-shares", storage_index_s] + nodedirs
d.addCallback(self._test_control2, control_furl_file)
return d
def _test_control2(self, rref, filename):
- d = rref.callRemote("upload_from_file_to_uri", filename, convergence=None)
- downfile = os.path.join(self.basedir, "control.downfile")
- d.addCallback(lambda uri:
- rref.callRemote("download_from_uri_to_file",
- uri, downfile))
- def _check(res):
- self.failUnlessEqual(res, downfile)
- data = open(downfile, "r").read()
- expected_data = open(filename, "r").read()
- self.failUnlessEqual(data, expected_data)
- d.addCallback(_check)
+ d = defer.succeed(None)
d.addCallback(lambda res: rref.callRemote("speed_test", 1, 200, False))
- if sys.platform == "linux2":
+ if sys.platform in ("linux2", "linux3"):
d.addCallback(lambda res: rref.callRemote("get_memory_usage"))
d.addCallback(lambda res: rref.callRemote("measure_peer_response_time"))
return d
# network calls)
private_uri = self._private_node.get_uri()
- some_uri = self._root_directory_uri
client0_basedir = self.getdir("client0")
nodeargs = [
"--node-directory", client0_basedir,
]
- TESTDATA = "I will not write the same thing over and over.\n" * 100
d = defer.succeed(None)
def run(ignored, verb, *args, **kwargs):
stdin = kwargs.get("stdin", "")
- newargs = [verb] + nodeargs + list(args)
+ newargs = nodeargs + [verb] + list(args)
return self._run_cli(newargs, stdin=stdin)
def _check_ls((out,err), expected_children, unexpected_children=[]):
d.addCallback(run, "list-aliases")
def _check_aliases_1((out,err)):
self.failUnlessEqual(err, "")
- self.failUnlessEqual(out, "tahoe: %s\n" % private_uri)
+ self.failUnlessEqual(out.strip(" \n"), "tahoe: %s" % private_uri)
d.addCallback(_check_aliases_1)
# now that that's out of the way, remove root_dir.cap and work with
d.addCallback(run, "put", files[1], "subdir/tahoe-file1")
# tahoe put bar tahoe:FOO
d.addCallback(run, "put", files[2], "tahoe:file2")
- d.addCallback(run, "put", "--mutable", files[3], "tahoe:file3")
+ d.addCallback(run, "put", "--format=SDMF", files[3], "tahoe:file3")
def _check_put_mutable((out,err)):
self._mutable_file3_uri = out.strip()
d.addCallback(_check_put_mutable)
open(os.path.join(sdn2, "rfile5"), "wb").write("rfile5")
# from disk into tahoe
- d.addCallback(run, "cp", "-r", dn, "tahoe:dir1")
+ d.addCallback(run, "cp", "-r", dn, "tahoe:")
d.addCallback(run, "ls")
d.addCallback(_check_ls, ["dir1"])
d.addCallback(run, "ls", "dir1")
def _check_cp_r_out((out,err)):
def _cmp(name):
old = open(os.path.join(dn, name), "rb").read()
- newfn = os.path.join(dn_copy, name)
+ newfn = os.path.join(dn_copy, "dir1", name)
self.failUnless(os.path.exists(newfn))
new = open(newfn, "rb").read()
self.failUnlessEqual(old, new)
# and copy it a second time, which ought to overwrite the same files
d.addCallback(run, "cp", "-r", "tahoe:dir1", dn_copy)
+ # and again, only writing filecaps
+ dn_copy2 = os.path.join(self.basedir, "dir1-copy-capsonly")
+ d.addCallback(run, "cp", "-r", "--caps-only", "tahoe:dir1", dn_copy2)
+ def _check_capsonly((out,err)):
+ # these should all be LITs
+ x = open(os.path.join(dn_copy2, "dir1", "subdir2", "rfile4")).read()
+ y = uri.from_string_filenode(x)
+ self.failUnlessEqual(y.data, "rfile4")
+ d.addCallback(_check_capsonly)
+
# and tahoe-to-tahoe
d.addCallback(run, "cp", "-r", "tahoe:dir1", "tahoe:dir1-copy")
d.addCallback(run, "ls")
d.addCallback(_check_ls, ["dir1", "dir1-copy"])
- d.addCallback(run, "ls", "dir1-copy")
+ d.addCallback(run, "ls", "dir1-copy/dir1")
d.addCallback(_check_ls, ["rfile1", "rfile2", "rfile3", "subdir2"],
["rfile4", "rfile5"])
- d.addCallback(run, "ls", "tahoe:dir1-copy/subdir2")
+ d.addCallback(run, "ls", "tahoe:dir1-copy/dir1/subdir2")
d.addCallback(_check_ls, ["rfile4", "rfile5"],
["rfile1", "rfile2", "rfile3"])
- d.addCallback(run, "get", "dir1-copy/subdir2/rfile4")
+ d.addCallback(run, "get", "dir1-copy/dir1/subdir2/rfile4")
d.addCallback(_check_stdout_against, data="rfile4")
# and copy it a second time, which ought to overwrite the same files
# tahoe_ls doesn't currently handle the error correctly: it tries to
# JSON-parse a traceback.
## def _ls_missing(res):
-## argv = ["ls"] + nodeargs + ["bogus"]
+## argv = nodeargs + ["ls", "bogus"]
## return self._run_cli(argv)
## d.addCallback(_ls_missing)
## def _check_ls_missing((out,err)):
return d
+ def test_filesystem_with_cli_in_subprocess(self):
+ # We do this in a separate test so that test_filesystem doesn't skip if we can't run bin/tahoe.
+
+ self.basedir = "system/SystemTest/test_filesystem_with_cli_in_subprocess"
+ d = self.set_up_nodes()
+ def _new_happy_semantics(ign):
+ for c in self.clients:
+ c.encoding_params['happy'] = 1
+ d.addCallback(_new_happy_semantics)
+
+ def _run_in_subprocess(ignored, verb, *args, **kwargs):
+ stdin = kwargs.get("stdin")
+ env = kwargs.get("env")
+ newargs = ["--node-directory", self.getdir("client0"), verb] + list(args)
+ return self.run_bintahoe(newargs, stdin=stdin, env=env)
+
+ def _check_succeeded(res, check_stderr=True):
+ out, err, rc_or_sig = res
+ self.failUnlessEqual(rc_or_sig, 0, str(res))
+ if check_stderr:
+ self.failUnlessEqual(err, "")
+
+ d.addCallback(_run_in_subprocess, "create-alias", "newalias")
+ d.addCallback(_check_succeeded)
+
+ STDIN_DATA = "This is the file to upload from stdin."
+ d.addCallback(_run_in_subprocess, "put", "-", "newalias:tahoe-file", stdin=STDIN_DATA)
+ d.addCallback(_check_succeeded, check_stderr=False)
+
+ def _mv_with_http_proxy(ign):
+ env = os.environ
+ env['http_proxy'] = env['HTTP_PROXY'] = "http://127.0.0.0:12345" # invalid address
+ return _run_in_subprocess(None, "mv", "newalias:tahoe-file", "newalias:tahoe-moved", env=env)
+ d.addCallback(_mv_with_http_proxy)
+ d.addCallback(_check_succeeded)
+
+ d.addCallback(_run_in_subprocess, "ls", "newalias:")
+ def _check_ls(res):
+ out, err, rc_or_sig = res
+ self.failUnlessEqual(rc_or_sig, 0, str(res))
+ self.failUnlessEqual(err, "", str(res))
+ self.failUnlessIn("tahoe-moved", out)
+ self.failIfIn("tahoe-file", out)
+ d.addCallback(_check_ls)
+ return d
+
+ def test_debug_trial(self):
+ def _check_for_line(lines, result, test):
+ for l in lines:
+ if result in l and test in l:
+ return
+ self.fail("output (prefixed with '##') does not have a line containing both %r and %r:\n## %s"
+ % (result, test, "\n## ".join(lines)))
+
+ def _check_for_outcome(lines, out, outcome):
+ self.failUnlessIn(outcome, out, "output (prefixed with '##') does not contain %r:\n## %s"
+ % (outcome, "\n## ".join(lines)))
+
+ d = self.run_bintahoe(['debug', 'trial', '--reporter=verbose',
+ 'allmydata.test.trialtest'])
+ def _check_failure( (out, err, rc) ):
+ self.failUnlessEqual(rc, 1)
+ lines = out.split('\n')
+ _check_for_line(lines, "[SKIPPED]", "test_skip")
+ _check_for_line(lines, "[TODO]", "test_todo")
+ _check_for_line(lines, "[FAIL]", "test_fail")
+ _check_for_line(lines, "[ERROR]", "test_deferred_error")
+ _check_for_line(lines, "[ERROR]", "test_error")
+ _check_for_outcome(lines, out, "FAILED")
+ d.addCallback(_check_failure)
+
+ # the --quiet argument regression-tests a problem in finding which arguments to pass to trial
+ d.addCallback(lambda ign: self.run_bintahoe(['--quiet', 'debug', 'trial', '--reporter=verbose',
+ 'allmydata.test.trialtest.Success']))
+ def _check_success( (out, err, rc) ):
+ self.failUnlessEqual(rc, 0)
+ lines = out.split('\n')
+ _check_for_line(lines, "[SKIPPED]", "test_skip")
+ _check_for_line(lines, "[TODO]", "test_todo")
+ _check_for_outcome(lines, out, "PASSED")
+ d.addCallback(_check_success)
+ return d
+
def _run_cli(self, argv, stdin=""):
#print "CLI:", argv
stdout, stderr = StringIO(), StringIO()
d.addCallback(lambda res: self._personal_node.get(u"big file"))
def _got_chk_filenode(n):
- self.failUnless(isinstance(n, filenode.FileNode))
+ self.failUnless(isinstance(n, ImmutableFileNode))
d = n.check(Monitor())
def _check_filenode_results(r):
self.failUnless(r.is_healthy())
d.addCallback(lambda res: self._personal_node.get(u"sekrit data"))
def _got_lit_filenode(n):
- self.failUnless(isinstance(n, filenode.LiteralFileNode))
+ self.failUnless(isinstance(n, LiteralFileNode))
d = n.check(Monitor())
def _check_lit_filenode_results(r):
self.failUnlessEqual(r, None)
return d
-class MutableChecker(SystemTestMixin, unittest.TestCase, ErrorMixin):
-
- def _run_cli(self, argv):
- stdout, stderr = StringIO(), StringIO()
- # this can only do synchronous operations
- assert argv[0] == "debug"
- runner.runner(argv, run_by_human=False, stdout=stdout, stderr=stderr)
- return stdout.getvalue()
-
- def test_good(self):
- self.basedir = self.mktemp()
- d = self.set_up_nodes()
- CONTENTS = "a little bit of data"
- d.addCallback(lambda res: self.clients[0].create_mutable_file(CONTENTS))
- def _created(node):
- self.node = node
- si = self.node.get_storage_index()
- d.addCallback(_created)
- # now make sure the webapi verifier sees no problems
- def _do_check(res):
- url = (self.webish_url +
- "uri/%s" % urllib.quote(self.node.get_uri()) +
- "?t=check&verify=true")
- return getPage(url, method="POST")
- d.addCallback(_do_check)
- def _got_results(out):
- self.failUnless("<span>Healthy : Healthy</span>" in out, out)
- self.failUnless("Recoverable Versions: 10*seq1-" in out, out)
- self.failIf("Not Healthy!" in out, out)
- self.failIf("Unhealthy" in out, out)
- self.failIf("Corrupt Shares" in out, out)
- d.addCallback(_got_results)
- d.addErrback(self.explain_web_error)
- return d
-
- def test_corrupt(self):
- self.basedir = self.mktemp()
- d = self.set_up_nodes()
- CONTENTS = "a little bit of data"
- d.addCallback(lambda res: self.clients[0].create_mutable_file(CONTENTS))
- def _created(node):
- self.node = node
- si = self.node.get_storage_index()
- out = self._run_cli(["debug", "find-shares", base32.b2a(si),
- self.clients[1].basedir])
- files = out.split("\n")
- # corrupt one of them, using the CLI debug command
- f = files[0]
- shnum = os.path.basename(f)
- nodeid = self.clients[1].nodeid
- nodeid_prefix = idlib.shortnodeid_b2a(nodeid)
- self.corrupt_shareid = "%s-sh%s" % (nodeid_prefix, shnum)
- out = self._run_cli(["debug", "corrupt-share", files[0]])
- d.addCallback(_created)
- # now make sure the webapi verifier notices it
- def _do_check(res):
- url = (self.webish_url +
- "uri/%s" % urllib.quote(self.node.get_uri()) +
- "?t=check&verify=true")
- return getPage(url, method="POST")
- d.addCallback(_do_check)
- def _got_results(out):
- self.failUnless("Not Healthy!" in out, out)
- self.failUnless("Unhealthy: best version has only 9 shares (encoding is 3-of-10)" in out, out)
- self.failUnless("Corrupt Shares:" in out, out)
- d.addCallback(_got_results)
-
- # now make sure the webapi repairer can fix it
- def _do_repair(res):
- url = (self.webish_url +
- "uri/%s" % urllib.quote(self.node.get_uri()) +
- "?t=check&verify=true&repair=true")
- return getPage(url, method="POST")
- d.addCallback(_do_repair)
- def _got_repair_results(out):
- self.failUnless("<div>Repair successful</div>" in out, out)
- d.addCallback(_got_repair_results)
- d.addCallback(_do_check)
- def _got_postrepair_results(out):
- self.failIf("Not Healthy!" in out, out)
- self.failUnless("Recoverable Versions: 10*seq" in out, out)
- d.addCallback(_got_postrepair_results)
- d.addErrback(self.explain_web_error)
-
- return d
-
- def test_delete_share(self):
- self.basedir = self.mktemp()
- d = self.set_up_nodes()
- CONTENTS = "a little bit of data"
- d.addCallback(lambda res: self.clients[0].create_mutable_file(CONTENTS))
- def _created(node):
- self.node = node
- si = self.node.get_storage_index()
- out = self._run_cli(["debug", "find-shares", base32.b2a(si),
- self.clients[1].basedir])
- files = out.split("\n")
- # corrupt one of them, using the CLI debug command
- f = files[0]
- shnum = os.path.basename(f)
- nodeid = self.clients[1].nodeid
- nodeid_prefix = idlib.shortnodeid_b2a(nodeid)
- self.corrupt_shareid = "%s-sh%s" % (nodeid_prefix, shnum)
- os.unlink(files[0])
- d.addCallback(_created)
- # now make sure the webapi checker notices it
- def _do_check(res):
- url = (self.webish_url +
- "uri/%s" % urllib.quote(self.node.get_uri()) +
- "?t=check&verify=false")
- return getPage(url, method="POST")
- d.addCallback(_do_check)
- def _got_results(out):
- self.failUnless("Not Healthy!" in out, out)
- self.failUnless("Unhealthy: best version has only 9 shares (encoding is 3-of-10)" in out, out)
- self.failIf("Corrupt Shares" in out, out)
- d.addCallback(_got_results)
-
- # now make sure the webapi repairer can fix it
- def _do_repair(res):
- url = (self.webish_url +
- "uri/%s" % urllib.quote(self.node.get_uri()) +
- "?t=check&verify=false&repair=true")
- return getPage(url, method="POST")
- d.addCallback(_do_repair)
- def _got_repair_results(out):
- self.failUnless("Repair successful" in out)
- d.addCallback(_got_repair_results)
- d.addCallback(_do_check)
- def _got_postrepair_results(out):
- self.failIf("Not Healthy!" in out, out)
- self.failUnless("Recoverable Versions: 10*seq" in out)
- d.addCallback(_got_postrepair_results)
- d.addErrback(self.explain_web_error)
-
- return d
-
-
-class DeepCheckBase(SystemTestMixin, ErrorMixin):
-
- def web_json(self, n, **kwargs):
- kwargs["output"] = "json"
- d = self.web(n, "POST", **kwargs)
- d.addCallback(self.decode_json)
- return d
-
- def decode_json(self, (s,url)):
- try:
- data = simplejson.loads(s)
- except ValueError:
- self.fail("%s: not JSON: '%s'" % (url, s))
- return data
-
- def web(self, n, method="GET", **kwargs):
- # returns (data, url)
- url = (self.webish_url + "uri/%s" % urllib.quote(n.get_uri())
- + "?" + "&".join(["%s=%s" % (k,v) for (k,v) in kwargs.items()]))
- d = getPage(url, method=method)
- d.addCallback(lambda data: (data,url))
- return d
-
- def wait_for_operation(self, ignored, ophandle):
- url = self.webish_url + "operations/" + ophandle
- url += "?t=status&output=JSON"
- d = getPage(url)
- def _got(res):
- try:
- data = simplejson.loads(res)
- except ValueError:
- self.fail("%s: not JSON: '%s'" % (url, res))
- if not data["finished"]:
- d = self.stall(delay=1.0)
- d.addCallback(self.wait_for_operation, ophandle)
- return d
- return data
- d.addCallback(_got)
- return d
-
- def get_operation_results(self, ignored, ophandle, output=None):
- url = self.webish_url + "operations/" + ophandle
- url += "?t=status"
- if output:
- url += "&output=" + output
- d = getPage(url)
- def _got(res):
- if output and output.lower() == "json":
- try:
- return simplejson.loads(res)
- except ValueError:
- self.fail("%s: not JSON: '%s'" % (url, res))
- return res
- d.addCallback(_got)
- return d
-
- def slow_web(self, n, output=None, **kwargs):
- # use ophandle=
- handle = base32.b2a(os.urandom(4))
- d = self.web(n, "POST", ophandle=handle, **kwargs)
- d.addCallback(self.wait_for_operation, handle)
- d.addCallback(self.get_operation_results, handle, output=output)
- return d
-
-
-class DeepCheckWebGood(DeepCheckBase, unittest.TestCase):
- # construct a small directory tree (with one dir, one immutable file, one
- # mutable file, one LIT file, and a loop), and then check/examine it in
- # various ways.
-
- def set_up_tree(self, ignored):
- # 2.9s
-
- # root
- # mutable
- # large
- # small
- # small2
- # loop -> root
- c0 = self.clients[0]
- d = c0.create_empty_dirnode()
- def _created_root(n):
- self.root = n
- self.root_uri = n.get_uri()
- d.addCallback(_created_root)
- d.addCallback(lambda ign: c0.create_mutable_file("mutable file contents"))
- d.addCallback(lambda n: self.root.set_node(u"mutable", n))
- def _created_mutable(n):
- self.mutable = n
- self.mutable_uri = n.get_uri()
- d.addCallback(_created_mutable)
-
- large = upload.Data("Lots of data\n" * 1000, None)
- d.addCallback(lambda ign: self.root.add_file(u"large", large))
- def _created_large(n):
- self.large = n
- self.large_uri = n.get_uri()
- d.addCallback(_created_large)
-
- small = upload.Data("Small enough for a LIT", None)
- d.addCallback(lambda ign: self.root.add_file(u"small", small))
- def _created_small(n):
- self.small = n
- self.small_uri = n.get_uri()
- d.addCallback(_created_small)
-
- small2 = upload.Data("Small enough for a LIT too", None)
- d.addCallback(lambda ign: self.root.add_file(u"small2", small2))
- def _created_small2(n):
- self.small2 = n
- self.small2_uri = n.get_uri()
- d.addCallback(_created_small2)
-
- d.addCallback(lambda ign: self.root.set_node(u"loop", self.root))
- return d
-
- def check_is_healthy(self, cr, n, where, incomplete=False):
- self.failUnless(ICheckResults.providedBy(cr), where)
- self.failUnless(cr.is_healthy(), where)
- self.failUnlessEqual(cr.get_storage_index(), n.get_storage_index(),
- where)
- self.failUnlessEqual(cr.get_storage_index_string(),
- base32.b2a(n.get_storage_index()), where)
- needs_rebalancing = bool( len(self.clients) < 10 )
- if not incomplete:
- self.failUnlessEqual(cr.needs_rebalancing(), needs_rebalancing, str((where, cr, cr.get_data())))
- d = cr.get_data()
- self.failUnlessEqual(d["count-shares-good"], 10, where)
- self.failUnlessEqual(d["count-shares-needed"], 3, where)
- self.failUnlessEqual(d["count-shares-expected"], 10, where)
- if not incomplete:
- self.failUnlessEqual(d["count-good-share-hosts"], len(self.clients), where)
- self.failUnlessEqual(d["count-corrupt-shares"], 0, where)
- self.failUnlessEqual(d["list-corrupt-shares"], [], where)
- if not incomplete:
- self.failUnlessEqual(sorted(d["servers-responding"]),
- sorted([c.nodeid for c in self.clients]),
- where)
- self.failUnless("sharemap" in d, str((where, d)))
- all_serverids = set()
- for (shareid, serverids) in d["sharemap"].items():
- all_serverids.update(serverids)
- self.failUnlessEqual(sorted(all_serverids),
- sorted([c.nodeid for c in self.clients]),
- where)
-
- self.failUnlessEqual(d["count-wrong-shares"], 0, where)
- self.failUnlessEqual(d["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where)
-
-
- def check_and_repair_is_healthy(self, cr, n, where, incomplete=False):
- self.failUnless(ICheckAndRepairResults.providedBy(cr), (where, cr))
- self.failUnless(cr.get_pre_repair_results().is_healthy(), where)
- self.check_is_healthy(cr.get_pre_repair_results(), n, where, incomplete)
- self.failUnless(cr.get_post_repair_results().is_healthy(), where)
- self.check_is_healthy(cr.get_post_repair_results(), n, where, incomplete)
- self.failIf(cr.get_repair_attempted(), where)
-
- def deep_check_is_healthy(self, cr, num_healthy, where):
- self.failUnless(IDeepCheckResults.providedBy(cr))
- self.failUnlessEqual(cr.get_counters()["count-objects-healthy"],
- num_healthy, where)
-
- def deep_check_and_repair_is_healthy(self, cr, num_healthy, where):
- self.failUnless(IDeepCheckAndRepairResults.providedBy(cr), where)
- c = cr.get_counters()
- self.failUnlessEqual(c["count-objects-healthy-pre-repair"],
- num_healthy, where)
- self.failUnlessEqual(c["count-objects-healthy-post-repair"],
- num_healthy, where)
- self.failUnlessEqual(c["count-repairs-attempted"], 0, where)
-
- def test_good(self):
- self.basedir = self.mktemp()
- d = self.set_up_nodes()
- d.addCallback(self.set_up_tree)
- d.addCallback(self.do_stats)
- d.addCallback(self.do_test_check_good)
- d.addCallback(self.do_test_web_good)
- d.addCallback(self.do_test_cli_good)
- d.addErrback(self.explain_web_error)
- d.addErrback(self.explain_error)
- return d
-
- def do_stats(self, ignored):
- d = defer.succeed(None)
- d.addCallback(lambda ign: self.root.start_deep_stats().when_done())
- d.addCallback(self.check_stats_good)
- return d
-
- def check_stats_good(self, s):
- self.failUnlessEqual(s["count-directories"], 1)
- self.failUnlessEqual(s["count-files"], 4)
- self.failUnlessEqual(s["count-immutable-files"], 1)
- self.failUnlessEqual(s["count-literal-files"], 2)
- self.failUnlessEqual(s["count-mutable-files"], 1)
- # don't check directories: their size will vary
- # s["largest-directory"]
- # s["size-directories"]
- self.failUnlessEqual(s["largest-directory-children"], 5)
- self.failUnlessEqual(s["largest-immutable-file"], 13000)
- # to re-use this function for both the local
- # dirnode.start_deep_stats() and the webapi t=start-deep-stats, we
- # coerce the result into a list of tuples. dirnode.start_deep_stats()
- # returns a list of tuples, but JSON only knows about lists., so
- # t=start-deep-stats returns a list of lists.
- histogram = [tuple(stuff) for stuff in s["size-files-histogram"]]
- self.failUnlessEqual(histogram, [(11, 31, 2),
- (10001, 31622, 1),
- ])
- self.failUnlessEqual(s["size-immutable-files"], 13000)
- self.failUnlessEqual(s["size-literal-files"], 48)
-
- def do_test_check_good(self, ignored):
- d = defer.succeed(None)
- # check the individual items
- d.addCallback(lambda ign: self.root.check(Monitor()))
- d.addCallback(self.check_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check(Monitor()))
- d.addCallback(self.check_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check(Monitor()))
- d.addCallback(self.check_is_healthy, self.large, "large")
- d.addCallback(lambda ign: self.small.check(Monitor()))
- d.addCallback(self.failUnlessEqual, None, "small")
- d.addCallback(lambda ign: self.small2.check(Monitor()))
- d.addCallback(self.failUnlessEqual, None, "small2")
-
- # and again with verify=True
- d.addCallback(lambda ign: self.root.check(Monitor(), verify=True))
- d.addCallback(self.check_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check(Monitor(), verify=True))
- d.addCallback(self.check_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.large.check(Monitor(), verify=True))
- d.addCallback(self.check_is_healthy, self.large, "large", incomplete=True)
- d.addCallback(lambda ign: self.small.check(Monitor(), verify=True))
- d.addCallback(self.failUnlessEqual, None, "small")
- d.addCallback(lambda ign: self.small2.check(Monitor(), verify=True))
- d.addCallback(self.failUnlessEqual, None, "small2")
-
- # and check_and_repair(), which should be a nop
- d.addCallback(lambda ign: self.root.check_and_repair(Monitor()))
- d.addCallback(self.check_and_repair_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor()))
- d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable")
- #TODO d.addCallback(lambda ign: self.large.check_and_repair(Monitor()))
- #TODO d.addCallback(self.check_and_repair_is_healthy, self.large, "large")
- #TODO d.addCallback(lambda ign: self.small.check_and_repair(Monitor()))
- #TODO d.addCallback(self.failUnlessEqual, None, "small")
- #TODO d.addCallback(lambda ign: self.small2.check_and_repair(Monitor()))
- #TODO d.addCallback(self.failUnlessEqual, None, "small2")
-
- # check_and_repair(verify=True)
- d.addCallback(lambda ign: self.root.check_and_repair(Monitor(), verify=True))
- d.addCallback(self.check_and_repair_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.mutable.check_and_repair(Monitor(), verify=True))
- d.addCallback(self.check_and_repair_is_healthy, self.mutable, "mutable")
- #TODO d.addCallback(lambda ign: self.large.check_and_repair(Monitor(), verify=True))
- #TODO d.addCallback(self.check_and_repair_is_healthy, self.large, "large",
- #TODO incomplete=True)
- #TODO d.addCallback(lambda ign: self.small.check_and_repair(Monitor(), verify=True))
- #TODO d.addCallback(self.failUnlessEqual, None, "small")
- #TODO d.addCallback(lambda ign: self.small2.check_and_repair(Monitor(), verify=True))
- #TODO d.addCallback(self.failUnlessEqual, None, "small2")
-
-
- # now deep-check the root, with various verify= and repair= options
- d.addCallback(lambda ign:
- self.root.start_deep_check().when_done())
- d.addCallback(self.deep_check_is_healthy, 3, "root")
- d.addCallback(lambda ign:
- self.root.start_deep_check(verify=True).when_done())
- d.addCallback(self.deep_check_is_healthy, 3, "root")
- d.addCallback(lambda ign:
- self.root.start_deep_check_and_repair().when_done())
- d.addCallback(self.deep_check_and_repair_is_healthy, 3, "root")
- d.addCallback(lambda ign:
- self.root.start_deep_check_and_repair(verify=True).when_done())
- d.addCallback(self.deep_check_and_repair_is_healthy, 3, "root")
-
- # and finally, start a deep-check, but then cancel it.
- d.addCallback(lambda ign: self.root.start_deep_check())
- def _checking(monitor):
- monitor.cancel()
- d = monitor.when_done()
- # this should fire as soon as the next dirnode.list finishes.
- # TODO: add a counter to measure how many list() calls are made,
- # assert that no more than one gets to run before the cancel()
- # takes effect.
- def _finished_normally(res):
- self.fail("this was supposed to fail, not finish normally")
- def _cancelled(f):
- f.trap(OperationCancelledError)
- d.addCallbacks(_finished_normally, _cancelled)
- return d
- d.addCallback(_checking)
-
- return d
-
- def json_check_is_healthy(self, data, n, where, incomplete=False):
-
- self.failUnlessEqual(data["storage-index"],
- base32.b2a(n.get_storage_index()), where)
- self.failUnless("summary" in data, (where, data))
- self.failUnlessEqual(data["summary"].lower(), "healthy",
- "%s: '%s'" % (where, data["summary"]))
- r = data["results"]
- self.failUnlessEqual(r["healthy"], True, where)
- needs_rebalancing = bool( len(self.clients) < 10 )
- if not incomplete:
- self.failUnlessEqual(r["needs-rebalancing"], needs_rebalancing, where)
- self.failUnlessEqual(r["count-shares-good"], 10, where)
- self.failUnlessEqual(r["count-shares-needed"], 3, where)
- self.failUnlessEqual(r["count-shares-expected"], 10, where)
- if not incomplete:
- self.failUnlessEqual(r["count-good-share-hosts"], len(self.clients), where)
- self.failUnlessEqual(r["count-corrupt-shares"], 0, where)
- self.failUnlessEqual(r["list-corrupt-shares"], [], where)
- if not incomplete:
- self.failUnlessEqual(sorted(r["servers-responding"]),
- sorted([idlib.nodeid_b2a(c.nodeid)
- for c in self.clients]), where)
- self.failUnless("sharemap" in r, where)
- all_serverids = set()
- for (shareid, serverids_s) in r["sharemap"].items():
- all_serverids.update(serverids_s)
- self.failUnlessEqual(sorted(all_serverids),
- sorted([idlib.nodeid_b2a(c.nodeid)
- for c in self.clients]), where)
- self.failUnlessEqual(r["count-wrong-shares"], 0, where)
- self.failUnlessEqual(r["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where)
-
- def json_check_and_repair_is_healthy(self, data, n, where, incomplete=False):
- self.failUnlessEqual(data["storage-index"],
- base32.b2a(n.get_storage_index()), where)
- self.failUnlessEqual(data["repair-attempted"], False, where)
- self.json_check_is_healthy(data["pre-repair-results"],
- n, where, incomplete)
- self.json_check_is_healthy(data["post-repair-results"],
- n, where, incomplete)
-
- def json_full_deepcheck_is_healthy(self, data, n, where):
- self.failUnlessEqual(data["root-storage-index"],
- base32.b2a(n.get_storage_index()), where)
- self.failUnlessEqual(data["count-objects-checked"], 3, where)
- self.failUnlessEqual(data["count-objects-healthy"], 3, where)
- self.failUnlessEqual(data["count-objects-unhealthy"], 0, where)
- self.failUnlessEqual(data["count-corrupt-shares"], 0, where)
- self.failUnlessEqual(data["list-corrupt-shares"], [], where)
- self.failUnlessEqual(data["list-unhealthy-files"], [], where)
- self.json_check_stats_good(data["stats"], where)
-
- def json_full_deepcheck_and_repair_is_healthy(self, data, n, where):
- self.failUnlessEqual(data["root-storage-index"],
- base32.b2a(n.get_storage_index()), where)
- self.failUnlessEqual(data["count-objects-checked"], 3, where)
-
- self.failUnlessEqual(data["count-objects-healthy-pre-repair"], 3, where)
- self.failUnlessEqual(data["count-objects-unhealthy-pre-repair"], 0, where)
- self.failUnlessEqual(data["count-corrupt-shares-pre-repair"], 0, where)
-
- self.failUnlessEqual(data["count-objects-healthy-post-repair"], 3, where)
- self.failUnlessEqual(data["count-objects-unhealthy-post-repair"], 0, where)
- self.failUnlessEqual(data["count-corrupt-shares-post-repair"], 0, where)
-
- self.failUnlessEqual(data["list-corrupt-shares"], [], where)
- self.failUnlessEqual(data["list-remaining-corrupt-shares"], [], where)
- self.failUnlessEqual(data["list-unhealthy-files"], [], where)
-
- self.failUnlessEqual(data["count-repairs-attempted"], 0, where)
- self.failUnlessEqual(data["count-repairs-successful"], 0, where)
- self.failUnlessEqual(data["count-repairs-unsuccessful"], 0, where)
-
-
- def json_check_lit(self, data, n, where):
- self.failUnlessEqual(data["storage-index"], "", where)
- self.failUnlessEqual(data["results"]["healthy"], True, where)
-
- def json_check_stats_good(self, data, where):
- self.check_stats_good(data)
-
- def do_test_web_good(self, ignored):
- d = defer.succeed(None)
-
- # stats
- d.addCallback(lambda ign:
- self.slow_web(self.root,
- t="start-deep-stats", output="json"))
- d.addCallback(self.json_check_stats_good, "deep-stats")
-
- # check, no verify
- d.addCallback(lambda ign: self.web_json(self.root, t="check"))
- d.addCallback(self.json_check_is_healthy, self.root, "root")
- d.addCallback(lambda ign: self.web_json(self.mutable, t="check"))
- d.addCallback(self.json_check_is_healthy, self.mutable, "mutable")
- d.addCallback(lambda ign: self.web_json(self.large, t="check"))
- d.addCallback(self.json_check_is_healthy, self.large, "large")
- d.addCallback(lambda ign: self.web_json(self.small, t="check"))
- d.addCallback(self.json_check_lit, self.small, "small")
- d.addCallback(lambda ign: self.web_json(self.small2, t="check"))
- d.addCallback(self.json_check_lit, self.small2, "small2")
-
- # check and verify
- d.addCallback(lambda ign:
- self.web_json(self.root, t="check", verify="true"))
- d.addCallback(self.json_check_is_healthy, self.root, "root+v")
- d.addCallback(lambda ign:
- self.web_json(self.mutable, t="check", verify="true"))
- d.addCallback(self.json_check_is_healthy, self.mutable, "mutable+v")
- d.addCallback(lambda ign:
- self.web_json(self.large, t="check", verify="true"))
- d.addCallback(self.json_check_is_healthy, self.large, "large+v",
- incomplete=True)
- d.addCallback(lambda ign:
- self.web_json(self.small, t="check", verify="true"))
- d.addCallback(self.json_check_lit, self.small, "small+v")
- d.addCallback(lambda ign:
- self.web_json(self.small2, t="check", verify="true"))
- d.addCallback(self.json_check_lit, self.small2, "small2+v")
-
- # check and repair, no verify
- d.addCallback(lambda ign:
- self.web_json(self.root, t="check", repair="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.root, "root+r")
- d.addCallback(lambda ign:
- self.web_json(self.mutable, t="check", repair="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.mutable, "mutable+r")
- d.addCallback(lambda ign:
- self.web_json(self.large, t="check", repair="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.large, "large+r")
- d.addCallback(lambda ign:
- self.web_json(self.small, t="check", repair="true"))
- d.addCallback(self.json_check_lit, self.small, "small+r")
- d.addCallback(lambda ign:
- self.web_json(self.small2, t="check", repair="true"))
- d.addCallback(self.json_check_lit, self.small2, "small2+r")
-
- # check+verify+repair
- d.addCallback(lambda ign:
- self.web_json(self.root, t="check", repair="true", verify="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.root, "root+vr")
- d.addCallback(lambda ign:
- self.web_json(self.mutable, t="check", repair="true", verify="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.mutable, "mutable+vr")
- d.addCallback(lambda ign:
- self.web_json(self.large, t="check", repair="true", verify="true"))
- d.addCallback(self.json_check_and_repair_is_healthy, self.large, "large+vr", incomplete=True)
- d.addCallback(lambda ign:
- self.web_json(self.small, t="check", repair="true", verify="true"))
- d.addCallback(self.json_check_lit, self.small, "small+vr")
- d.addCallback(lambda ign:
- self.web_json(self.small2, t="check", repair="true", verify="true"))
- d.addCallback(self.json_check_lit, self.small2, "small2+vr")
-
- # now run a deep-check, with various verify= and repair= flags
- d.addCallback(lambda ign:
- self.slow_web(self.root, t="start-deep-check", output="json"))
- d.addCallback(self.json_full_deepcheck_is_healthy, self.root, "root+d")
- d.addCallback(lambda ign:
- self.slow_web(self.root, t="start-deep-check", verify="true",
- output="json"))
- d.addCallback(self.json_full_deepcheck_is_healthy, self.root, "root+dv")
- d.addCallback(lambda ign:
- self.slow_web(self.root, t="start-deep-check", repair="true",
- output="json"))
- d.addCallback(self.json_full_deepcheck_and_repair_is_healthy, self.root, "root+dr")
- d.addCallback(lambda ign:
- self.slow_web(self.root, t="start-deep-check", verify="true", repair="true", output="json"))
- d.addCallback(self.json_full_deepcheck_and_repair_is_healthy, self.root, "root+dvr")
-
- # now look at t=info
- d.addCallback(lambda ign: self.web(self.root, t="info"))
- # TODO: examine the output
- d.addCallback(lambda ign: self.web(self.mutable, t="info"))
- d.addCallback(lambda ign: self.web(self.large, t="info"))
- d.addCallback(lambda ign: self.web(self.small, t="info"))
- d.addCallback(lambda ign: self.web(self.small2, t="info"))
-
- return d
-
- def _run_cli(self, argv, stdin=""):
- #print "CLI:", argv
- stdout, stderr = StringIO(), StringIO()
- d = threads.deferToThread(runner.runner, argv, run_by_human=False,
- stdin=StringIO(stdin),
- stdout=stdout, stderr=stderr)
- def _done(res):
- return stdout.getvalue(), stderr.getvalue()
- d.addCallback(_done)
- return d
-
- def do_test_cli_good(self, ignored):
- basedir = self.getdir("client0")
- d = self._run_cli(["manifest",
- "--node-directory", basedir,
- self.root_uri])
- def _check((out,err)):
- self.failUnlessEqual(err, "")
- lines = [l for l in out.split("\n") if l]
- self.failUnlessEqual(len(lines), 5)
- caps = {}
- for l in lines:
- try:
- cap, path = l.split(None, 1)
- except ValueError:
- cap = l.strip()
- path = ""
- caps[cap] = path
- self.failUnless(self.root.get_uri() in caps)
- self.failUnlessEqual(caps[self.root.get_uri()], "")
- self.failUnlessEqual(caps[self.mutable.get_uri()], "mutable")
- self.failUnlessEqual(caps[self.large.get_uri()], "large")
- self.failUnlessEqual(caps[self.small.get_uri()], "small")
- self.failUnlessEqual(caps[self.small2.get_uri()], "small2")
- d.addCallback(_check)
-
- d.addCallback(lambda res:
- self._run_cli(["manifest",
- "--node-directory", basedir,
- "--storage-index", self.root_uri]))
- def _check2((out,err)):
- self.failUnlessEqual(err, "")
- lines = [l for l in out.split("\n") if l]
- self.failUnlessEqual(len(lines), 3)
- self.failUnless(base32.b2a(self.root.get_storage_index()) in lines)
- self.failUnless(base32.b2a(self.mutable.get_storage_index()) in lines)
- self.failUnless(base32.b2a(self.large.get_storage_index()) in lines)
- d.addCallback(_check2)
-
- d.addCallback(lambda res:
- self._run_cli(["manifest",
- "--node-directory", basedir,
- "--raw", self.root_uri]))
- def _check2r((out,err)):
- self.failUnlessEqual(err, "")
- data = simplejson.loads(out)
- sis = data["storage-index"]
- self.failUnlessEqual(len(sis), 3)
- self.failUnless(base32.b2a(self.root.get_storage_index()) in sis)
- self.failUnless(base32.b2a(self.mutable.get_storage_index()) in sis)
- self.failUnless(base32.b2a(self.large.get_storage_index()) in sis)
- self.failUnlessEqual(data["stats"]["count-files"], 4)
- self.failUnlessEqual(data["origin"],
- base32.b2a(self.root.get_storage_index()))
- verifycaps = data["verifycaps"]
- self.failUnlessEqual(len(verifycaps), 3)
- self.failUnless(self.root.get_verify_cap().to_string() in verifycaps)
- self.failUnless(self.mutable.get_verify_cap().to_string() in verifycaps)
- self.failUnless(self.large.get_verify_cap().to_string() in verifycaps)
- d.addCallback(_check2r)
-
- d.addCallback(lambda res:
- self._run_cli(["stats",
- "--node-directory", basedir,
- self.root_uri]))
- def _check3((out,err)):
- lines = [l.strip() for l in out.split("\n") if l]
- self.failUnless("count-immutable-files: 1" in lines)
- self.failUnless("count-mutable-files: 1" in lines)
- self.failUnless("count-literal-files: 2" in lines)
- self.failUnless("count-files: 4" in lines)
- self.failUnless("count-directories: 1" in lines)
- self.failUnless("size-immutable-files: 13000 (13.00 kB, 12.70 kiB)" in lines, lines)
- self.failUnless("size-literal-files: 48" in lines)
- self.failUnless(" 11-31 : 2 (31 B, 31 B)".strip() in lines)
- self.failUnless("10001-31622 : 1 (31.62 kB, 30.88 kiB)".strip() in lines)
- d.addCallback(_check3)
-
- d.addCallback(lambda res:
- self._run_cli(["stats",
- "--node-directory", basedir,
- "--raw",
- self.root_uri]))
- def _check4((out,err)):
- data = simplejson.loads(out)
- self.failUnlessEqual(data["count-immutable-files"], 1)
- self.failUnlessEqual(data["count-immutable-files"], 1)
- self.failUnlessEqual(data["count-mutable-files"], 1)
- self.failUnlessEqual(data["count-literal-files"], 2)
- self.failUnlessEqual(data["count-files"], 4)
- self.failUnlessEqual(data["count-directories"], 1)
- self.failUnlessEqual(data["size-immutable-files"], 13000)
- self.failUnlessEqual(data["size-literal-files"], 48)
- self.failUnless([11,31,2] in data["size-files-histogram"])
- self.failUnless([10001,31622,1] in data["size-files-histogram"])
- d.addCallback(_check4)
-
- return d
-
-
-class DeepCheckWebBad(DeepCheckBase, unittest.TestCase):
-
- def test_bad(self):
- self.basedir = self.mktemp()
- d = self.set_up_nodes()
- d.addCallback(self.set_up_damaged_tree)
- d.addCallback(self.do_check)
- d.addCallback(self.do_deepcheck)
- d.addCallback(self.do_test_web_bad)
- d.addErrback(self.explain_web_error)
- d.addErrback(self.explain_error)
- return d
-
-
-
- def set_up_damaged_tree(self, ignored):
- # 6.4s
-
- # root
- # mutable-good
- # mutable-missing-shares
- # mutable-corrupt-shares
- # mutable-unrecoverable
- # large-good
- # large-missing-shares
- # large-corrupt-shares
- # large-unrecoverable
-
- self.nodes = {}
-
- c0 = self.clients[0]
- d = c0.create_empty_dirnode()
- def _created_root(n):
- self.root = n
- self.root_uri = n.get_uri()
- d.addCallback(_created_root)
- d.addCallback(self.create_mangled, "mutable-good")
- d.addCallback(self.create_mangled, "mutable-missing-shares")
- d.addCallback(self.create_mangled, "mutable-corrupt-shares")
- d.addCallback(self.create_mangled, "mutable-unrecoverable")
- d.addCallback(self.create_mangled, "large-good")
- d.addCallback(self.create_mangled, "large-missing-shares")
- d.addCallback(self.create_mangled, "large-corrupt-shares")
- d.addCallback(self.create_mangled, "large-unrecoverable")
-
- return d
-
-
- def create_mangled(self, ignored, name):
- nodetype, mangletype = name.split("-", 1)
- if nodetype == "mutable":
- d = self.clients[0].create_mutable_file("mutable file contents")
- d.addCallback(lambda n: self.root.set_node(unicode(name), n))
- elif nodetype == "large":
- large = upload.Data("Lots of data\n" * 1000 + name + "\n", None)
- d = self.root.add_file(unicode(name), large)
- elif nodetype == "small":
- small = upload.Data("Small enough for a LIT", None)
- d = self.root.add_file(unicode(name), small)
-
- def _stash_node(node):
- self.nodes[name] = node
- return node
- d.addCallback(_stash_node)
-
- if mangletype == "good":
- pass
- elif mangletype == "missing-shares":
- d.addCallback(self._delete_some_shares)
- elif mangletype == "corrupt-shares":
- d.addCallback(self._corrupt_some_shares)
- else:
- assert mangletype == "unrecoverable"
- d.addCallback(self._delete_most_shares)
-
- return d
-
- def _run_cli(self, argv):
- stdout, stderr = StringIO(), StringIO()
- # this can only do synchronous operations
- assert argv[0] == "debug"
- runner.runner(argv, run_by_human=False, stdout=stdout, stderr=stderr)
- return stdout.getvalue()
-
- def _find_shares(self, node):
- si = node.get_storage_index()
- out = self._run_cli(["debug", "find-shares", base32.b2a(si)] +
- [c.basedir for c in self.clients])
- files = out.split("\n")
- return [f for f in files if f]
-
- def _delete_some_shares(self, node):
- shares = self._find_shares(node)
- os.unlink(shares[0])
- os.unlink(shares[1])
-
- def _corrupt_some_shares(self, node):
- shares = self._find_shares(node)
- self._run_cli(["debug", "corrupt-share", shares[0]])
- self._run_cli(["debug", "corrupt-share", shares[1]])
-
- def _delete_most_shares(self, node):
- shares = self._find_shares(node)
- for share in shares[1:]:
- os.unlink(share)
-
-
- def check_is_healthy(self, cr, where):
- try:
- self.failUnless(ICheckResults.providedBy(cr), (cr, type(cr), where))
- self.failUnless(cr.is_healthy(), (cr.get_report(), cr.is_healthy(), cr.get_summary(), where))
- self.failUnless(cr.is_recoverable(), where)
- d = cr.get_data()
- self.failUnlessEqual(d["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where)
- return cr
- except Exception, le:
- le.args = tuple(le.args + (where,))
- raise
-
- def check_is_missing_shares(self, cr, where):
- self.failUnless(ICheckResults.providedBy(cr), where)
- self.failIf(cr.is_healthy(), where)
- self.failUnless(cr.is_recoverable(), where)
- d = cr.get_data()
- self.failUnlessEqual(d["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(d["count-unrecoverable-versions"], 0, where)
- return cr
-
- def check_has_corrupt_shares(self, cr, where):
- # by "corrupt-shares" we mean the file is still recoverable
- self.failUnless(ICheckResults.providedBy(cr), where)
- d = cr.get_data()
- self.failIf(cr.is_healthy(), (where, cr))
- self.failUnless(cr.is_recoverable(), where)
- d = cr.get_data()
- self.failUnless(d["count-shares-good"] < 10, where)
- self.failUnless(d["count-corrupt-shares"], where)
- self.failUnless(d["list-corrupt-shares"], where)
- return cr
-
- def check_is_unrecoverable(self, cr, where):
- self.failUnless(ICheckResults.providedBy(cr), where)
- d = cr.get_data()
- self.failIf(cr.is_healthy(), where)
- self.failIf(cr.is_recoverable(), where)
- self.failUnless(d["count-shares-good"] < d["count-shares-needed"], (d["count-shares-good"], d["count-shares-needed"], where))
- self.failUnlessEqual(d["count-recoverable-versions"], 0, where)
- self.failUnlessEqual(d["count-unrecoverable-versions"], 1, where)
- return cr
-
- def do_check(self, ignored):
- d = defer.succeed(None)
-
- # check the individual items, without verification. This will not
- # detect corrupt shares.
- def _check(which, checker):
- d = self.nodes[which].check(Monitor())
- d.addCallback(checker, which + "--check")
- return d
-
- d.addCallback(lambda ign: _check("mutable-good", self.check_is_healthy))
- d.addCallback(lambda ign: _check("mutable-missing-shares",
- self.check_is_missing_shares))
- d.addCallback(lambda ign: _check("mutable-corrupt-shares",
- self.check_is_healthy))
- d.addCallback(lambda ign: _check("mutable-unrecoverable",
- self.check_is_unrecoverable))
- d.addCallback(lambda ign: _check("large-good", self.check_is_healthy))
- d.addCallback(lambda ign: _check("large-missing-shares",
- self.check_is_missing_shares))
- d.addCallback(lambda ign: _check("large-corrupt-shares",
- self.check_is_healthy))
- d.addCallback(lambda ign: _check("large-unrecoverable",
- self.check_is_unrecoverable))
-
- # and again with verify=True, which *does* detect corrupt shares.
- def _checkv(which, checker):
- d = self.nodes[which].check(Monitor(), verify=True)
- d.addCallback(checker, which + "--check-and-verify")
- return d
-
- d.addCallback(lambda ign: _checkv("mutable-good", self.check_is_healthy))
- d.addCallback(lambda ign: _checkv("mutable-missing-shares",
- self.check_is_missing_shares))
- d.addCallback(lambda ign: _checkv("mutable-corrupt-shares",
- self.check_has_corrupt_shares))
- d.addCallback(lambda ign: _checkv("mutable-unrecoverable",
- self.check_is_unrecoverable))
- d.addCallback(lambda ign: _checkv("large-good", self.check_is_healthy))
- d.addCallback(lambda ign: _checkv("large-missing-shares", self.check_is_missing_shares))
- d.addCallback(lambda ign: _checkv("large-corrupt-shares", self.check_has_corrupt_shares))
- d.addCallback(lambda ign: _checkv("large-unrecoverable",
- self.check_is_unrecoverable))
-
- return d
-
- def do_deepcheck(self, ignored):
- d = defer.succeed(None)
-
- # now deep-check the root, with various verify= and repair= options
- d.addCallback(lambda ign:
- self.root.start_deep_check().when_done())
- def _check1(cr):
- self.failUnless(IDeepCheckResults.providedBy(cr))
- c = cr.get_counters()
- self.failUnlessEqual(c["count-objects-checked"], 9)
- self.failUnlessEqual(c["count-objects-healthy"], 5)
- self.failUnlessEqual(c["count-objects-unhealthy"], 4)
- self.failUnlessEqual(c["count-objects-unrecoverable"], 2)
- d.addCallback(_check1)
-
- d.addCallback(lambda ign:
- self.root.start_deep_check(verify=True).when_done())
- def _check2(cr):
- self.failUnless(IDeepCheckResults.providedBy(cr))
- c = cr.get_counters()
- self.failUnlessEqual(c["count-objects-checked"], 9)
- self.failUnlessEqual(c["count-objects-healthy"], 3)
- self.failUnlessEqual(c["count-objects-unhealthy"], 6)
- self.failUnlessEqual(c["count-objects-healthy"], 3) # root, mutable good, large good
- self.failUnlessEqual(c["count-objects-unrecoverable"], 2) # mutable unrecoverable, large unrecoverable
- d.addCallback(_check2)
-
- return d
-
- def json_is_healthy(self, data, where):
- r = data["results"]
- self.failUnless(r["healthy"], where)
- self.failUnless(r["recoverable"], where)
- self.failUnlessEqual(r["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where)
-
- def json_is_missing_shares(self, data, where):
- r = data["results"]
- self.failIf(r["healthy"], where)
- self.failUnless(r["recoverable"], where)
- self.failUnlessEqual(r["count-recoverable-versions"], 1, where)
- self.failUnlessEqual(r["count-unrecoverable-versions"], 0, where)
-
- def json_has_corrupt_shares(self, data, where):
- # by "corrupt-shares" we mean the file is still recoverable
- r = data["results"]
- self.failIf(r["healthy"], where)
- self.failUnless(r["recoverable"], where)
- self.failUnless(r["count-shares-good"] < 10, where)
- self.failUnless(r["count-corrupt-shares"], where)
- self.failUnless(r["list-corrupt-shares"], where)
-
- def json_is_unrecoverable(self, data, where):
- r = data["results"]
- self.failIf(r["healthy"], where)
- self.failIf(r["recoverable"], where)
- self.failUnless(r["count-shares-good"] < r["count-shares-needed"],
- where)
- self.failUnlessEqual(r["count-recoverable-versions"], 0, where)
- self.failUnlessEqual(r["count-unrecoverable-versions"], 1, where)
-
- def do_test_web_bad(self, ignored):
- d = defer.succeed(None)
-
- # check, no verify
- def _check(which, checker):
- d = self.web_json(self.nodes[which], t="check")
- d.addCallback(checker, which + "--webcheck")
- return d
-
- d.addCallback(lambda ign: _check("mutable-good",
- self.json_is_healthy))
- d.addCallback(lambda ign: _check("mutable-missing-shares",
- self.json_is_missing_shares))
- d.addCallback(lambda ign: _check("mutable-corrupt-shares",
- self.json_is_healthy))
- d.addCallback(lambda ign: _check("mutable-unrecoverable",
- self.json_is_unrecoverable))
- d.addCallback(lambda ign: _check("large-good",
- self.json_is_healthy))
- d.addCallback(lambda ign: _check("large-missing-shares",
- self.json_is_missing_shares))
- d.addCallback(lambda ign: _check("large-corrupt-shares",
- self.json_is_healthy))
- d.addCallback(lambda ign: _check("large-unrecoverable",
- self.json_is_unrecoverable))
-
- # check and verify
- def _checkv(which, checker):
- d = self.web_json(self.nodes[which], t="check", verify="true")
- d.addCallback(checker, which + "--webcheck-and-verify")
- return d
-
- d.addCallback(lambda ign: _checkv("mutable-good",
- self.json_is_healthy))
- d.addCallback(lambda ign: _checkv("mutable-missing-shares",
- self.json_is_missing_shares))
- d.addCallback(lambda ign: _checkv("mutable-corrupt-shares",
- self.json_has_corrupt_shares))
- d.addCallback(lambda ign: _checkv("mutable-unrecoverable",
- self.json_is_unrecoverable))
- d.addCallback(lambda ign: _checkv("large-good",
- self.json_is_healthy))
- d.addCallback(lambda ign: _checkv("large-missing-shares", self.json_is_missing_shares))
- d.addCallback(lambda ign: _checkv("large-corrupt-shares", self.json_has_corrupt_shares))
- d.addCallback(lambda ign: _checkv("large-unrecoverable",
- self.json_is_unrecoverable))
-
+class Connections(SystemTestMixin, unittest.TestCase):
+ def test_rref(self):
+ if NormalizedVersion(foolscap.__version__) < NormalizedVersion('0.6.4'):
+ raise unittest.SkipTest("skipped due to http://foolscap.lothar.com/trac/ticket/196 "
+ "(which does not affect normal usage of Tahoe-LAFS)")
+
+ self.basedir = "system/Connections/rref"
+ d = self.set_up_nodes(2)
+ def _start(ign):
+ self.c0 = self.clients[0]
+ nonclients = [s for s in self.c0.storage_broker.get_connected_servers()
+ if s.get_serverid() != self.c0.nodeid]
+ self.failUnlessEqual(len(nonclients), 1)
+
+ self.s1 = nonclients[0] # s1 is the server, not c0
+ self.s1_rref = self.s1.get_rref()
+ self.failIfEqual(self.s1_rref, None)
+ self.failUnless(self.s1.is_connected())
+ d.addCallback(_start)
+
+ # now shut down the server
+ d.addCallback(lambda ign: self.clients[1].disownServiceParent())
+ # and wait for the client to notice
+ def _poll():
+ return len(self.c0.storage_broker.get_connected_servers()) < 2
+ d.addCallback(lambda ign: self.poll(_poll))
+
+ def _down(ign):
+ self.failIf(self.s1.is_connected())
+ rref = self.s1.get_rref()
+ self.failUnless(rref)
+ self.failUnlessIdentical(rref, self.s1_rref)
+ d.addCallback(_down)
return d