import os, re, base64
from cStringIO import StringIO
+
from twisted.trial import unittest
from twisted.internet import defer, reactor
+
from allmydata import uri, client
from allmydata.nodemaker import NodeMaker
from allmydata.util import base32, consumer, fileutil, mathutil
+from allmydata.util.fileutil import abspath_expanduser_unicode
from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
ssk_pubkey_fingerprint_hash
from allmydata.util.consumer import MemoryConsumer
dumped = servermap.dump(StringIO())
self.failUnlessIn("3-of-10", dumped.getvalue())
d.addCallback(_then)
- # Now overwrite the contents with some new contents. We want
+ # Now overwrite the contents with some new contents. We want
# to make them big enough to force the file to be uploaded
# in more than one segment.
big_contents = "contents1" * 100000 # about 900 KiB
# before, they need to be big enough to force multiple
# segments, so that we make the downloader deal with
# multiple segments.
- bigger_contents = "contents2" * 1000000 # about 9MiB
+ bigger_contents = "contents2" * 1000000 # about 9MiB
bigger_contents_uploadable = MutableData(bigger_contents)
d.addCallback(lambda ignored:
n.overwrite(bigger_contents_uploadable))
d.addCallback(_remove_shares)
return d
+ def test_all_but_two_shares_vanished_updated_servermap(self):
+ # tests error reporting for ticket #1742
+ d = self.make_servermap()
+ def _remove_shares(servermap):
+ self._version = servermap.best_recoverable_version()
+ for shares in self._storage._peers.values()[2:]:
+ shares.clear()
+ return self.make_servermap(servermap)
+ d.addCallback(_remove_shares)
+ def _check(updated_servermap):
+ d1 = self.shouldFail(NotEnoughSharesError,
+ "test_all_but_two_shares_vanished_updated_servermap",
+ "ran out of servers",
+ self.do_download, updated_servermap, version=self._version)
+ return d1
+ d.addCallback(_check)
+ return d
+
def test_no_servers(self):
sb2 = make_storagebroker(num_peers=0)
# if there are no servers, then a MODE_READ servermap should come
def test_corrupt_all_encprivkey_late(self):
- # this should work for the same reason as above, but we corrupt
+ # this should work for the same reason as above, but we corrupt
# after the servermap update to exercise the error handling
# code.
# We need to remove the privkey from the node, or the retrieve
self.c = self.g.clients[0]
self.nm = self.c.nodemaker
self.data = "test data" * 100000 # about 900 KiB; MDMF
- self.small_data = "test data" * 10 # about 90 B; SDMF
+ self.small_data = "test data" * 10 # 90 B; SDMF
def do_upload_mdmf(self):
d.addCallback(_then)
return d
- def do_upload_sdmf(self):
- d = self.nm.create_mutable_file(MutableData(self.small_data))
+ def do_upload_sdmf(self, data=None):
+ if data is None:
+ data = self.small_data
+ d = self.nm.create_mutable_file(MutableData(data))
def _then(n):
assert isinstance(n, MutableFileNode)
assert n._protocol_version == SDMF_VERSION
fso = debug.FindSharesOptions()
storage_index = base32.b2a(n.get_storage_index())
fso.si_s = storage_index
- fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
+ fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(unicode(storedir)))
for (i,ss,storedir)
in self.iterate_servers()]
fso.stdout = StringIO()
return d
- def test_partial_read(self):
- d = self.do_upload_mdmf()
- d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
- modes = [("start_on_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3), 50),
- ("ending_one_byte_after_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3)-50, 51),
- ("zero_length_at_start", 0, 0),
- ("zero_length_in_middle", 50, 0),
- ("zero_length_at_segment_boundary",
- mathutil.next_multiple(128 * 1024, 3), 0),
- ]
+ def _test_partial_read(self, node, expected, modes, step):
+ d = node.get_best_readable_version()
for (name, offset, length) in modes:
- d.addCallback(self._do_partial_read, name, offset, length)
+ d.addCallback(self._do_partial_read, name, expected, offset, length)
# then read only a few bytes at a time, and see that the results are
# what we expect.
def _read_data(version):
c = consumer.MemoryConsumer()
d2 = defer.succeed(None)
- for i in xrange(0, len(self.data), 10000):
- d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
+ for i in xrange(0, len(expected), step):
+ d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
d2.addCallback(lambda ignored:
- self.failUnlessEqual(self.data, "".join(c.chunks)))
+ self.failUnlessEqual(expected, "".join(c.chunks)))
return d2
d.addCallback(_read_data)
return d
- def _do_partial_read(self, version, name, offset, length):
+
+ def _do_partial_read(self, version, name, expected, offset, length):
c = consumer.MemoryConsumer()
d = version.read(c, offset, length)
- expected = self.data[offset:offset+length]
+ expected_range = expected[offset:offset+length]
d.addCallback(lambda ignored: "".join(c.chunks))
def _check(results):
- if results != expected:
- print
+ if results != expected_range:
print "got: %s ... %s" % (results[:20], results[-20:])
- print "exp: %s ... %s" % (expected[:20], expected[-20:])
- self.fail("results[%s] != expected" % name)
+ print "exp: %s ... %s" % (expected_range[:20], expected_range[-20:])
+ self.fail("results[%s] != expected_range" % name)
return version # daisy-chained to next call
d.addCallback(_check)
return d
+ def test_partial_read_mdmf(self):
+ segment_boundary = mathutil.next_multiple(128 * 1024, 3)
+ modes = [("start_on_segment_boundary", segment_boundary, 50),
+ ("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("zero_length_at_segment_boundary", segment_boundary, 0),
+ ("complete_file", 0, len(self.data)),
+ ("complete_file_past_end", 0, len(self.data)+1),
+ ]
+ d = self.do_upload_mdmf()
+ d.addCallback(self._test_partial_read, self.data, modes, 10000)
+ return d
+
+ def test_partial_read_sdmf_90(self):
+ modes = [("start_at_middle", 50, 40),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("complete_file", 0, 90),
+ ]
+ d = self.do_upload_sdmf()
+ d.addCallback(self._test_partial_read, self.small_data, modes, 10)
+ return d
+
+ def test_partial_read_sdmf_100(self):
+ data = "test data "*10
+ modes = [("start_at_middle", 50, 50),
+ ("zero_length_at_start", 0, 0),
+ ("zero_length_in_middle", 50, 0),
+ ("complete_file", 0, 100),
+ ]
+ d = self.do_upload_sdmf(data=data)
+ d.addCallback(self._test_partial_read, data, modes, 10)
+ return d
+
+ def test_partial_read_sdmf_2(self):
+ data = "hi"
+ modes = [("one_byte", 0, 1),
+ ("last_byte", 1, 1),
+ ("complete_file", 0, 2),
+ ]
+ d = self.do_upload_sdmf(data=data)
+ d.addCallback(self._test_partial_read, data, modes, 1)
+ return d
+
def _test_read_and_download(self, node, expected):
d = node.get_best_readable_version()
def setUp(self):
GridTestMixin.setUp(self)
self.basedir = self.mktemp()
- self.set_up_grid()
+ self.set_up_grid(num_servers=13)
self.c = self.g.clients[0]
self.nm = self.c.nodemaker
self.data = "testdata " * 100000 # about 900 KiB; MDMF
- self.small_data = "test data" * 10 # about 90 B; SDMF
+ self.small_data = "test data" * 10 # 90 B; SDMF
def do_upload_sdmf(self):