-
import os, re, base64
from cStringIO import StringIO
+
from twisted.trial import unittest
from twisted.internet import defer, reactor
+
from allmydata import uri, client
from allmydata.nodemaker import NodeMaker
from allmydata.util import base32, consumer, fileutil, mathutil
+from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
ssk_pubkey_fingerprint_hash
from allmydata.util.consumer import MemoryConsumer
from allmydata.scripts import debug
from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
-from allmydata.mutable.common import ResponseCache, \
+from allmydata.mutable.common import \
MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
NotEnoughServersError, CorruptShareError
PausingAndStoppingConsumer, StoppingConsumer, \
ImmediatelyStoppingConsumer
+def eventuaaaaaly(res=None):
+ d = fireEventually(res)
+ d.addCallback(fireEventually)
+ d.addCallback(fireEventually)
+ return d
+
# this "FakeStorage" exists to put the share data in RAM and avoid using real
# network connections, both to speed up the tests and to reduce the amount of
def read(self, peerid, storage_index):
shares = self._peers.get(peerid, {})
if self._sequence is None:
- return defer.succeed(shares)
+ return eventuaaaaaly(shares)
d = defer.Deferred()
if not self._pending:
self._pending_timer = reactor.callLater(1.0, self._fire_readers)
storage_broker = StorageFarmBroker(None, True)
for peerid in peerids:
fss = FakeStorageServer(peerid, s)
- storage_broker.test_add_rref(peerid, fss)
+ ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
+ "permutation-seed-base32": base32.b2a(peerid) }
+ storage_broker.test_add_rref(peerid, fss, ann)
return storage_broker
-def make_nodemaker(s=None, num_peers=10):
+def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
storage_broker = make_storagebroker(s, num_peers)
sh = client.SecretHolder("lease secret", "convergence secret")
keygen = client.KeyGenerator()
- keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
+ if keysize:
+ keygen.set_default_keysize(keysize)
nodemaker = NodeMaker(storage_broker, sh, None,
None, None,
{"k": 3, "n": 10}, SDMF_VERSION, keygen)
dumped = servermap.dump(StringIO())
self.failUnlessIn("3-of-10", dumped.getvalue())
d.addCallback(_then)
- # Now overwrite the contents with some new contents. We want
+ # Now overwrite the contents with some new contents. We want
# to make them big enough to force the file to be uploaded
# in more than one segment.
big_contents = "contents1" * 100000 # about 900 KiB
# before, they need to be big enough to force multiple
# segments, so that we make the downloader deal with
# multiple segments.
- bigger_contents = "contents2" * 1000000 # about 9MiB
+ bigger_contents = "contents2" * 1000000 # about 9MiB
bigger_contents_uploadable = MutableData(bigger_contents)
d.addCallback(lambda ignored:
n.overwrite(bigger_contents_uploadable))
d.addCallback(_created)
return d
-
- def test_response_cache_memory_leak(self):
- d = self.nodemaker.create_mutable_file("contents")
- def _created(n):
- d = n.download_best_version()
- d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
- d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
-
- def _check_cache(expected):
- # The total size of cache entries should not increase on the second download;
- # in fact the cache contents should be identical.
- d2 = n.download_best_version()
- d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
- return d2
- d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
- return d
- d.addCallback(_created)
- return d
-
def test_create_with_initial_contents_function(self):
data = "initial contents"
def _make_contents(n):
d.addCallback(_created)
return d
+ def publish_empty_sdmf(self):
+ self.CONTENTS = ""
+ self.uploadable = MutableData(self.CONTENTS)
+ self._storage = FakeStorage()
+ self._nodemaker = make_nodemaker(self._storage, keysize=None)
+ self._storage_broker = self._nodemaker.storage_broker
+ d = self._nodemaker.create_mutable_file(self.uploadable,
+ version=SDMF_VERSION)
+ def _created(node):
+ self._fn = node
+ self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
+ d.addCallback(_created)
+ return d
+
def publish_multiple(self, version=0):
self.CONTENTS = ["Contents 0",
d.addCallback(_remove_shares)
return d
+ def test_all_but_two_shares_vanished_updated_servermap(self):
+ # tests error reporting for ticket #1742
+ d = self.make_servermap()
+ def _remove_shares(servermap):
+ self._version = servermap.best_recoverable_version()
+ for shares in self._storage._peers.values()[2:]:
+ shares.clear()
+ return self.make_servermap(servermap)
+ d.addCallback(_remove_shares)
+ def _check(updated_servermap):
+ d1 = self.shouldFail(NotEnoughSharesError,
+ "test_all_but_two_shares_vanished_updated_servermap",
+ "ran out of servers",
+ self.do_download, updated_servermap, version=self._version)
+ return d1
+ d.addCallback(_check)
+ return d
+
def test_no_servers(self):
sb2 = make_storagebroker(num_peers=0)
# if there are no servers, then a MODE_READ servermap should come
def test_corrupt_all_encprivkey_late(self):
- # this should work for the same reason as above, but we corrupt
+ # this should work for the same reason as above, but we corrupt
# after the servermap update to exercise the error handling
# code.
# We need to remove the privkey from the node, or the retrieve
corrupt_early=False,
failure_checker=_check)
- def test_corrupt_all_block_hash_tree_late(self):
- def _check(res):
- f = res[0]
- self.failUnless(f.check(NotEnoughSharesError))
- return self._test_corrupt_all("block_hash_tree",
- "block hash tree failure",
- corrupt_early=False,
- failure_checker=_check)
-
def test_corrupt_all_block_late(self):
def _check(res):
d.addCallback(lambda ignored:
self._test_corrupt_all(("block_hash_tree", 12 * 32),
"block hash tree failure",
- corrupt_early=False,
+ corrupt_early=True,
should_succeed=False))
return d
def test_corrupt_mdmf_block_hash_tree_late(self):
+ # Note - there is no SDMF counterpart to this test, as the SDMF
+ # files are guaranteed to have exactly one block, and therefore
+ # the block hash tree fits within the initial read (#1240).
d = self.publish_mdmf()
d.addCallback(lambda ignored:
self._test_corrupt_all(("block_hash_tree", 12 * 32),
"block hash tree failure",
- corrupt_early=True,
+ corrupt_early=False,
should_succeed=False))
return d
return r
def check_expected_failure(self, r, expected_exception, substring, where):
- for (peerid, storage_index, shnum, f) in r.problems:
+ for (peerid, storage_index, shnum, f) in r.get_share_problems():
if f.check(expected_exception):
self.failUnless(substring in str(f),
"%s: substring '%s' not in '%s'" %
(where, substring, str(f)))
return
self.fail("%s: didn't see expected exception %s in problems %s" %
- (where, expected_exception, r.problems))
+ (where, expected_exception, r.get_share_problems()))
class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
return d
+ def test_verify_mdmf_all_bad_sharedata(self):
+ d = self.publish_mdmf()
+ # On 8 of the shares, corrupt the beginning of the share data.
+ # The signature check during the servermap update won't catch this.
+ d.addCallback(lambda ignored:
+ corrupt(None, self._storage, "share_data", range(8)))
+ # On 2 of the shares, corrupt the end of the share data.
+ # The signature check during the servermap update won't catch
+ # this either, and the retrieval process will have to process
+ # all of the segments before it notices.
+ d.addCallback(lambda ignored:
+ # the block hash tree comes right after the share data, so if we
+ # corrupt a little before the block hash tree, we'll corrupt in the
+ # last block of each share.
+ corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
+ d.addCallback(lambda ignored:
+ self._fn.check(Monitor(), verify=True))
+ # The verifier should flag the file as unhealthy, and should
+ # list all 10 shares as bad.
+ d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
+ def _check_num_bad(r):
+ self.failIf(r.is_recoverable())
+ smap = r.get_servermap()
+ self.failUnlessEqual(len(smap.get_bad_shares()), 10)
+ d.addCallback(_check_num_bad)
+ return d
+
def test_check_all_bad_blocks(self):
d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
# the Checker won't notice this.. it doesn't look at actual data
self.failUnlessEqual(old_shares, current_shares)
- def test_unrepairable_0shares(self):
- d = self.publish_one()
- def _delete_all_shares(ign):
+ def _test_whether_repairable(self, publisher, nshares, expected_result):
+ d = publisher()
+ def _delete_some_shares(ign):
shares = self._storage._peers
for peerid in shares:
- shares[peerid] = {}
- d.addCallback(_delete_all_shares)
+ for shnum in list(shares[peerid]):
+ if shnum >= nshares:
+ del shares[peerid][shnum]
+ d.addCallback(_delete_some_shares)
d.addCallback(lambda ign: self._fn.check(Monitor()))
- d.addCallback(lambda check_results: self._fn.repair(check_results))
- def _check(crr):
- self.failUnlessEqual(crr.get_successful(), False)
+ def _check(cr):
+ self.failIf(cr.is_healthy())
+ self.failUnlessEqual(cr.is_recoverable(), expected_result)
+ return cr
d.addCallback(_check)
- return d
-
- def test_mdmf_unrepairable_0shares(self):
- d = self.publish_mdmf()
- def _delete_all_shares(ign):
- shares = self._storage._peers
- for peerid in shares:
- shares[peerid] = {}
- d.addCallback(_delete_all_shares)
- d.addCallback(lambda ign: self._fn.check(Monitor()))
d.addCallback(lambda check_results: self._fn.repair(check_results))
- d.addCallback(lambda crr: self.failIf(crr.get_successful()))
+ d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
return d
+ def test_unrepairable_0shares(self):
+ return self._test_whether_repairable(self.publish_one, 0, False)
+
+ def test_mdmf_unrepairable_0shares(self):
+ return self._test_whether_repairable(self.publish_mdmf, 0, False)
def test_unrepairable_1share(self):
- d = self.publish_one()
- def _delete_all_shares(ign):
- shares = self._storage._peers
- for peerid in shares:
- for shnum in list(shares[peerid]):
- if shnum > 0:
- del shares[peerid][shnum]
- d.addCallback(_delete_all_shares)
- d.addCallback(lambda ign: self._fn.check(Monitor()))
- d.addCallback(lambda check_results: self._fn.repair(check_results))
- def _check(crr):
- self.failUnlessEqual(crr.get_successful(), False)
- d.addCallback(_check)
- return d
+ return self._test_whether_repairable(self.publish_one, 1, False)
def test_mdmf_unrepairable_1share(self):
- d = self.publish_mdmf()
- def _delete_all_shares(ign):
- shares = self._storage._peers
- for peerid in shares:
- for shnum in list(shares[peerid]):
- if shnum > 0:
- del shares[peerid][shnum]
- d.addCallback(_delete_all_shares)
- d.addCallback(lambda ign: self._fn.check(Monitor()))
- d.addCallback(lambda check_results: self._fn.repair(check_results))
- def _check(crr):
- self.failUnlessEqual(crr.get_successful(), False)
- d.addCallback(_check)
- return d
+ return self._test_whether_repairable(self.publish_mdmf, 1, False)
def test_repairable_5shares(self):
- d = self.publish_mdmf()
- def _delete_all_shares(ign):
- shares = self._storage._peers
- for peerid in shares:
- for shnum in list(shares[peerid]):
- if shnum > 4:
- del shares[peerid][shnum]
- d.addCallback(_delete_all_shares)
- d.addCallback(lambda ign: self._fn.check(Monitor()))
- d.addCallback(lambda check_results: self._fn.repair(check_results))
- def _check(crr):
- self.failUnlessEqual(crr.get_successful(), True)
- d.addCallback(_check)
- return d
+ return self._test_whether_repairable(self.publish_one, 5, True)
def test_mdmf_repairable_5shares(self):
- d = self.publish_mdmf()
+ return self._test_whether_repairable(self.publish_mdmf, 5, True)
+
+ def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
+ """
+ Like the _test_whether_repairable tests, but invoking check_and_repair
+ instead of invoking check and then invoking repair.
+ """
+ d = publisher()
def _delete_some_shares(ign):
shares = self._storage._peers
for peerid in shares:
for shnum in list(shares[peerid]):
- if shnum > 5:
+ if shnum >= nshares:
del shares[peerid][shnum]
d.addCallback(_delete_some_shares)
- d.addCallback(lambda ign: self._fn.check(Monitor()))
- def _check(cr):
- self.failIf(cr.is_healthy())
- self.failUnless(cr.is_recoverable())
- return cr
- d.addCallback(_check)
- d.addCallback(lambda check_results: self._fn.repair(check_results))
- def _check1(crr):
- self.failUnlessEqual(crr.get_successful(), True)
- d.addCallback(_check1)
+ d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
+ d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
return d
+ def test_unrepairable_0shares_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_one, 0, False)
+
+ def test_mdmf_unrepairable_0shares_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
+
+ def test_unrepairable_1share_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_one, 1, False)
+
+ def test_mdmf_unrepairable_1share_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
+
+ def test_repairable_5shares_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_one, 5, True)
+
+ def test_mdmf_repairable_5shares_checkandrepair(self):
+ return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
+
def test_merge(self):
self.old_shares = []
d.addCallback(_check_results)
return d
+ def test_repair_empty(self):
+ # bug 1689: delete one share of an empty mutable file, then repair.
+ # In the buggy version, the check that precedes the retrieve+publish
+ # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
+ # privkey that repair needs.
+ d = self.publish_empty_sdmf()
+ def _delete_one_share(ign):
+ shares = self._storage._peers
+ for peerid in shares:
+ for shnum in list(shares[peerid]):
+ if shnum == 0:
+ del shares[peerid][shnum]
+ d.addCallback(_delete_one_share)
+ d.addCallback(lambda ign: self._fn2.check(Monitor()))
+ d.addCallback(lambda check_results: self._fn2.repair(check_results))
+ def _check(crr):
+ self.failUnlessEqual(crr.get_successful(), True)
+ d.addCallback(_check)
+ return d
+
class DevNullDictionary(dict):
def __setitem__(self, key, value):
return
# then mix up the shares, to make sure that download survives seeing
# a variety of encodings. This is actually kind of tricky to set up.
- contents1 = "Contents for encoding 1 (3-of-10) go here"
- contents2 = "Contents for encoding 2 (4-of-9) go here"
- contents3 = "Contents for encoding 3 (4-of-7) go here"
+ contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
+ contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
+ contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
# we make a retrieval object that doesn't know what encoding
# parameters to use
return d
-class Utils(unittest.TestCase):
- def test_cache(self):
- c = ResponseCache()
- # xdata = base62.b2a(os.urandom(100))[:100]
- xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
- ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
- c.add("v1", 1, 0, xdata)
- c.add("v1", 1, 2000, ydata)
- self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
- self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
- self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
- self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
- self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
- self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
- self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
- self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
- self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
- self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
- self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
-
- # test joining fragments
- c = ResponseCache()
- c.add("v1", 1, 0, xdata[:10])
- c.add("v1", 1, 10, xdata[10:20])
- self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
-
class Exceptions(unittest.TestCase):
def test_repr(self):
nmde = NeedMoreDataError(100, 50, 100)
ucwe = UncoordinatedWriteError()
self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
+
class SameKeyGenerator:
def __init__(self, pubkey, privkey):
self.pubkey = pubkey
self.basedir = "mutable/Problems/test_retrieve_surprise"
self.set_up_grid()
nm = self.g.clients[0].nodemaker
- d = nm.create_mutable_file(MutableData("contents 1"))
+ d = nm.create_mutable_file(MutableData("contents 1"*4000))
def _created(n):
d = defer.succeed(None)
d.addCallback(lambda res: n.get_servermap(MODE_READ))
# now attempt to retrieve the old version with the old servermap.
# This will look like someone has changed the file since we
# updated the servermap.
- d.addCallback(lambda res: n._cache._clear())
d.addCallback(lambda res: log.msg("starting doomed read"))
d.addCallback(lambda res:
self.shouldFail(NotEnoughSharesError,
def test_multiply_placed_shares(self):
self.basedir = "mutable/Problems/test_multiply_placed_shares"
self.set_up_grid()
- self.g.clients[0].DEFAULT_ENCODING_PARAMETERS['n'] = 75
nm = self.g.clients[0].nodemaker
d = nm.create_mutable_file(MutableData("contents 1"))
# remove one of the servers and reupload the file.
self.failUnlessEqual(data, CONTENTS))
return d
+ def test_1654(self):
+ # test that the Retrieve object unconditionally verifies the block
+ # hash tree root for mutable shares. The failure mode is that
+ # carefully crafted shares can cause undetected corruption (the
+ # retrieve appears to finish successfully, but the result is
+ # corrupted). When fixed, these shares always cause a
+ # CorruptShareError, which results in NotEnoughSharesError in this
+ # 2-of-2 file.
+ self.basedir = "mutable/Problems/test_1654"
+ self.set_up_grid(num_servers=2)
+ cap = uri.from_string(TEST_1654_CAP)
+ si = cap.get_storage_index()
+
+ for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
+ sharedata = base64.b64decode(share)
+ storedir = self.get_serverdir(shnum)
+ storage_path = os.path.join(storedir, "shares",
+ storage_index_to_dir(si))
+ fileutil.make_dirs(storage_path)
+ fileutil.write(os.path.join(storage_path, "%d" % shnum),
+ sharedata)
+
+ nm = self.g.clients[0].nodemaker
+ n = nm.create_from_cap(TEST_1654_CAP)
+ # to exercise the problem correctly, we must ensure that sh0 is
+ # processed first, and sh1 second. NoNetworkGrid has facilities to
+ # stall the first request from a single server, but it's not
+ # currently easy to extend that to stall the second request (mutable
+ # retrievals will see two: first the mapupdate, then the fetch).
+ # However, repeated executions of this run without the #1654 fix
+ # suggests that we're failing reliably even without explicit stalls,
+ # probably because the servers are queried in a fixed order. So I'm
+ # ok with relying upon that.
+ d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
+ "ran out of servers",
+ n.download_best_version)
+ return d
+
+
+TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
+
+TEST_1654_SH0 = """\
+VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
+AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
+AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
+UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
+uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
+AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
+ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
+vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
+CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
+Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
+FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
+DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
+AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
+Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
+/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
+73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
+GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
+ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
++QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
+bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
+z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
+eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
+d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
+dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
+2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
+wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
+sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
+eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
+PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
+CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
+Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
+Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
+tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
+Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
+LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
+ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
+jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
+fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
+DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
+tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
+7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
+jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
+TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
+4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
+bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
+72mXGlqyLyWYuAAAAAA="""
+
+TEST_1654_SH1 = """\
+VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
+9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
+AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
+MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
+uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
+AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
+ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
+vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
+CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
+Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
+FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
+DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
+AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
+Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
+/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
+73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
+GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
+ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
++QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
+98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
+z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
+eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
+d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
+dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
+2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
+wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
+sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
+eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
+PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
+CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
+Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
+Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
+tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
+Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
+LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
+ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
+jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
+fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
+DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
+tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
+7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
+jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
+TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
+4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
+bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
+72mXGlqyLyWYuAAAAAA="""
+
class FileHandle(unittest.TestCase):
def setUp(self):
self.c = self.g.clients[0]
self.nm = self.c.nodemaker
self.data = "test data" * 100000 # about 900 KiB; MDMF
- self.small_data = "test data" * 10 # about 90 B; SDMF
+ self.small_data = "test data" * 10 # 90 B; SDMF
def do_upload_mdmf(self):
d.addCallback(_then)
return d
- def do_upload_sdmf(self):
- d = self.nm.create_mutable_file(MutableData(self.small_data))
+ def do_upload_sdmf(self, data=None):
+ if data is None:
+ data = self.small_data
+ d = self.nm.create_mutable_file(MutableData(data))
def _then(n):
assert isinstance(n, MutableFileNode)
assert n._protocol_version == SDMF_VERSION
fso = debug.FindSharesOptions()
storage_index = base32.b2a(n.get_storage_index())
fso.si_s = storage_index
- fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
+ fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(unicode(storedir)))
for (i,ss,storedir)
in self.iterate_servers()]
fso.stdout = StringIO()
return d
- def test_partial_read(self):
- d = self.do_upload_mdmf()
- d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
- modes = [("start_on_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3), 50),
- ("ending_one_byte_after_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3)-50, 51),
- ("zero_length_at_start", 0, 0),
- ("zero_length_in_middle", 50, 0),
- ("zero_length_at_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3), 0),
- ]
+ def _test_partial_read(self, node, expected, modes, step):
+ d = node.get_best_readable_version()
for (name, offset, length) in modes:
- d.addCallback(self._do_partial_read, name, offset, length)
+ d.addCallback(self._do_partial_read, name, expected, offset, length)
# then read only a few bytes at a time, and see that the results are
# what we expect.
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
- for i in xrange(0, len(self.data), 10000):
- d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
+ for i in xrange(0, len(expected), step):
+ d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
d2.addCallback(lambda ignored:
- self.failUnlessEqual(self.data, "".join(c.chunks)))
+ self.failUnlessEqual(expected, "".join(c.chunks)))
return d2
d.addCallback(_read_data)
return d
- def _do_partial_read(self, version, name, offset, length):
+
+ def _do_partial_read(self, version, name, expected, offset, length):
c = consumer.MemoryConsumer()
d = version.read(c, offset, length)
- expected = self.data[offset:offset+length]
+ expected_range = expected[offset:offset+length]
d.addCallback(lambda ignored: "".join(c.chunks))
def _check(results):
- if results != expected:
- print
+ if results != expected_range:
print "got: %s ... %s" % (results[:20], results[-20:])
- print "exp: %s ... %s" % (expected[:20], expected[-20:])
- self.fail("results[%s] != expected" % name)
+ print "exp: %s ... %s" % (expected_range[:20], expected_range[-20:])
+ self.fail("results[%s] != expected_range" % name)
return version # daisy-chained to next call
d.addCallback(_check)
return d
+ def test_partial_read_mdmf(self):
+ segment_boundary = mathutil.next_multiple(128 * 1024, 3)
+ modes = [("start_on_segment_boundary", segment_boundary, 50),
+ ("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("zero_length_at_segment_boundary", segment_boundary, 0),
+ ("complete_file", 0, len(self.data)),
+ ("complete_file_past_end", 0, len(self.data)+1),
+ ]
+ d = self.do_upload_mdmf()
+ d.addCallback(self._test_partial_read, self.data, modes, 10000)
+ return d
+
+ def test_partial_read_sdmf_90(self):
+ modes = [("start_at_middle", 50, 40),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("complete_file", 0, 90),
+ ]
+ d = self.do_upload_sdmf()
+ d.addCallback(self._test_partial_read, self.small_data, modes, 10)
+ return d
+
+ def test_partial_read_sdmf_100(self):
+ data = "test data "*10
+ modes = [("start_at_middle", 50, 50),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("complete_file", 0, 100),
+ ]
+ d = self.do_upload_sdmf(data=data)
+ d.addCallback(self._test_partial_read, data, modes, 10)
+ return d
+
+ def test_partial_read_sdmf_2(self):
+ data = "hi"
+ modes = [("one_byte", 0, 1),
+ ("last_byte", 1, 1),
+ ("complete_file", 0, 2),
+ ]
+ d = self.do_upload_sdmf(data=data)
+ d.addCallback(self._test_partial_read, data, modes, 1)
+ return d
+
def _test_read_and_download(self, node, expected):
d = node.get_best_readable_version()
def setUp(self):
GridTestMixin.setUp(self)
self.basedir = self.mktemp()
- self.set_up_grid()
+ self.set_up_grid(num_servers=13)
self.c = self.g.clients[0]
self.nm = self.c.nodemaker
self.data = "testdata " * 100000 # about 900 KiB; MDMF
- self.small_data = "test data" * 10 # about 90 B; SDMF
+ self.small_data = "test data" * 10 # 90 B; SDMF
def do_upload_sdmf(self):