from zope.interface import implements
from allmydata import uri, client
from allmydata.nodemaker import NodeMaker
-from allmydata.util import base32, consumer, fileutil
+from allmydata.util import base32, consumer, fileutil, mathutil
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
ssk_pubkey_fingerprint_hash
from allmydata.util.deferredutil import gatherResults
from allmydata.mutable.repairer import MustForceRepairError
import allmydata.test.common_util as testutil
+from allmydata.test.common import TEST_RSA_KEY_SIZE
+
# this "FakeStorage" exists to put the share data in RAM and avoid using real
# network connections, both to speed up the tests and to reduce the amount of
storage_broker = make_storagebroker(s, num_peers)
sh = client.SecretHolder("lease secret", "convergence secret")
keygen = client.KeyGenerator()
- keygen.set_default_keysize(522)
+ keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
nodemaker = NodeMaker(storage_broker, sh, None,
None, None,
{"k": 3, "n": 10}, keygen)
self.failUnlessEqual(len(shnums), 1)
d.addCallback(_created)
return d
- test_create.timeout = 15
def test_create_mdmf(self):
def _created(n):
self.failUnless(isinstance(n, MutableFileNode))
cap = n.get_cap()
- self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
+ self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
rcap = n.get_readcap()
self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
vcap = n.get_verify_cap()
d.addCallback(lambda ignored:
self.failUnlessEqual(self.c.data, "contents1" * 100000))
return d
- test_retrieve_pause.timeout = 25
def test_download_from_mdmf_cap(self):
return d
d.addCallback(_created)
return d
- test_create_with_initial_contents.timeout = 15
def test_create_mdmf_with_initial_contents(self):
return d
d.addCallback(_created)
return d
- test_create_mdmf_with_initial_contents.timeout = 20
def test_response_cache_memory_leak(self):
return d
d.addCallback(_created)
return d
- test_modify.timeout = 15
def test_modify_backoffer(self):
self.failIf(servermap.all_peers())
d.addCallback(_check_servermap)
return d
- test_no_servers.timeout = 15
def test_no_servers_download(self):
sb2 = make_storagebroker(num_peers=0)
d.addCallback(_restore)
d.addCallback(_retrieved)
return d
- test_no_servers_download.timeout = 15
def _test_corrupt_all(self, offset, substring,
d = self._fn.check(Monitor(), verify=True)
d.addCallback(self.check_good, "test_verify_good")
return d
- test_verify_good.timeout = 15
def test_verify_all_bad_sig(self):
d = corrupt(None, self._storage, 1) # bad sig
return d
d.addCallback(_created)
return d
- test_unexpected_shares.timeout = 15
def test_bad_server(self):
# Break one server, then create the file: the initial publish should
# use #467 static-server-selection to disable permutation and force
# the choice of server for share[0].
- d = nm.key_generator.generate(522)
+ d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
def _got_key( (pubkey, privkey) ):
nm.key_generator = SameKeyGenerator(pubkey, privkey)
pubkey_s = pubkey.serialize()
"Ran out of non-bad servers",
nm.create_mutable_file, MutableData("contents"))
return d
- test_publish_no_servers.timeout = 30
def test_privkey_query_error(self):
# That's essentially what MDMF node is, so just check that.
mdmf_uri = self.mdmf_node.get_uri()
cap = uri.from_string(mdmf_uri)
- self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
+ self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
cap = uri.from_string(readonly_mdmf_uri)
self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
d.addCallback(_read_data)
return d
+ def test_partial_read_starting_on_segment_boundary(self):
+ d = self.mdmf_node.get_best_readable_version()
+ c = consumer.MemoryConsumer()
+ offset = mathutil.next_multiple(128 * 1024, 3)
+ d.addCallback(lambda version:
+ version.read(c, offset, 50))
+ expected = self.data[offset:offset+50]
+ d.addCallback(lambda ignored:
+ self.failUnlessEqual(expected, "".join(c.chunks)))
+ return d
+
+ def test_partial_read_ending_on_segment_boundary(self):
+ d = self.mdmf_node.get_best_readable_version()
+ c = consumer.MemoryConsumer()
+ offset = mathutil.next_multiple(128 * 1024, 3)
+ start = offset - 50
+ d.addCallback(lambda version:
+ version.read(c, start, 51))
+ expected = self.data[offset-50:offset+1]
+ d.addCallback(lambda ignored:
+ self.failUnlessEqual(expected, "".join(c.chunks)))
+ return d
def test_read(self):
d = self.mdmf_node.get_best_readable_version()
class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
+ timeout = 400 # these tests are too big, 120s is not enough on slow
+ # platforms
def setUp(self):
GridTestMixin.setUp(self)
self.basedir = self.mktemp()
d.addCallback(lambda results:
self.failUnlessEqual(results, new_data))
return d
- test_append_power_of_two.timeout = 15
def test_update_sdmf(self):