result_of_read = br.remote_read(0, len(share_data)+1)
self.failUnlessEqual(result_of_read, share_data)
-class RemoteBucket:
+class RemoteBucket:
def __init__(self):
self.read_count = 0
self.write_count = 0
0x44, WriteBucketProxy_v2, ReadBucketProxy)
class Server(unittest.TestCase):
-
def setUp(self):
self.sparent = LoggingServiceParent()
self.sparent.startService()
self._lease_secret = itertools.count()
+
def tearDown(self):
return self.sparent.stopService()
+
def workdir(self, name):
basedir = os.path.join("storage", "Server", name)
return basedir
server.setServiceParent(self.sparent)
return server
+
def test_create(self):
self.create("test_create")
self.failUnlessIn("This share tastes like dust.", report)
-
class MutableServer(unittest.TestCase):
-
def setUp(self):
self.sparent = LoggingServiceParent()
self._lease_secret = itertools.count()
+
def tearDown(self):
return self.sparent.stopService()
+
def workdir(self, name):
basedir = os.path.join("storage", "MutableServer", name)
return basedir
def test_create(self):
self.create("test_create")
+
def write_enabler(self, we_tag):
return hashutil.tagged_hash("we_blah", we_tag)
self.failUnless(isinstance(readv_data, dict))
self.failUnlessEqual(len(readv_data), 0)
+
def test_bad_magic(self):
ss = self.create("test_bad_magic")
self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10)
))
self.failUnlessEqual(read("si1", [0], [(0,100)]), {0: [data]})
-
def test_operators(self):
# test operators, the data we're comparing is '11111' in all cases.
# test both fail+pass, reset data after each one.
# header.
self.salt_hash_tree_s = self.serialize_blockhashes(self.salt_hash_tree[1:])
-
def tearDown(self):
self.sparent.stopService()
shutil.rmtree(self.workdir("MDMFProxies storage test server"))
def write_enabler(self, we_tag):
return hashutil.tagged_hash("we_blah", we_tag)
-
def renew_secret(self, tag):
return hashutil.tagged_hash("renew_blah", str(tag))
-
def cancel_secret(self, tag):
return hashutil.tagged_hash("cancel_blah", str(tag))
-
def workdir(self, name):
basedir = os.path.join("storage", "MutableServer", name)
return basedir
-
def create(self, name):
workdir = self.workdir(name)
-
server = StorageServer(workdir, "\x00" * 20)
server.setServiceParent(self.sparent)
return server
data += self.block_hash_tree_s
return data
-
def write_test_share_to_server(self,
storage_index,
tail_segment=False,
self.offsets['EOF'] = eof_offset
return final_share
-
def write_sdmf_share_to_server(self,
storage_index,
empty=False):
self.failUnlessEqual(checkstring, checkstring))
return d
-
def test_read_with_different_tail_segment_size(self):
self.write_test_share_to_server("si1", tail_segment=True)
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
d.addCallback(_check_tail_segment)
return d
-
def test_get_block_with_invalid_segnum(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr.get_block_and_salt, 7))
return d
-
def test_get_encoding_parameters_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
d.addCallback(_check_encoding_parameters)
return d
-
def test_get_seqnum_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
self.failUnlessEqual(seqnum, 0))
return d
-
def test_get_root_hash_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
self.failUnlessEqual(root_hash, self.root_hash))
return d
-
def test_get_checkstring_first(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
self.failUnlessEqual(checkstring, self.checkstring))
return d
-
def test_write_read_vectors(self):
# When writing for us, the storage server will return to us a
# read vector, along with its result. If a write fails because
# The checkstring remains the same for the rest of the process.
return d
-
def test_private_key_after_share_hash_chain(self):
mw = self._make_new_mw("si1", 0)
d = defer.succeed(None)
mw.put_encprivkey, self.encprivkey))
return d
-
def test_signature_after_verification_key(self):
mw = self._make_new_mw("si1", 0)
d = defer.succeed(None)
mw.put_signature, self.signature))
return d
-
def test_uncoordinated_write(self):
# Make two mutable writers, both pointing to the same storage
# server, both at the same storage index, and try writing to the
d.addCallback(_check_failure)
return d
-
def test_invalid_salt_size(self):
# Salts need to be 16 bytes in size. Writes that attempt to
# write more or less than this should be rejected.
another_invalid_salt))
return d
-
def test_write_test_vectors(self):
# If we give the write proxy a bogus test vector at
# any point during the process, it should fail to write when we
def serialize_blockhashes(self, blockhashes):
return "".join(blockhashes)
-
def serialize_sharehashes(self, sharehashes):
ret = "".join([struct.pack(">H32s", i, sharehashes[i])
for i in sorted(sharehashes.keys())])
d.addCallback(_check_publish)
return d
+
def _make_new_mw(self, si, share, datalength=36):
# This is a file of size 36 bytes. Since it has a segment
# size of 6, we know that it has 6 byte segments, which will
6, datalength)
return mw
-
def test_write_rejected_with_too_many_blocks(self):
mw = self._make_new_mw("si0", 0)
mw.put_block, self.block, 7, self.salt))
return d
-
def test_write_rejected_with_invalid_salt(self):
# Try writing an invalid salt. Salts are 16 bytes -- any more or
# less should cause an error.
None, mw.put_block, self.block, 7, bad_salt))
return d
-
def test_write_rejected_with_invalid_root_hash(self):
# Try writing an invalid root hash. This should be SHA256d, and
# 32 bytes long as a result.
None, mw.put_root_hash, invalid_root_hash))
return d
-
def test_write_rejected_with_invalid_blocksize(self):
# The blocksize implied by the writer that we get from
# _make_new_mw is 2bytes -- any more or any less than this
mw.put_block(valid_block, 5, self.salt))
return d
-
def test_write_enforces_order_constraints(self):
# We require that the MDMFSlotWriteProxy be interacted with in a
# specific way.
mw0.put_verification_key(self.verification_key))
return d
-
def test_end_to_end(self):
mw = self._make_new_mw("si1", 0)
# Write a share using the mutable writer, and make sure that the
self.failUnlessEqual(checkstring, mw.get_checkstring()))
return d
-
def test_is_sdmf(self):
# The MDMFSlotReadProxy should also know how to read SDMF files,
# since it will encounter them on the grid. Callers use the
self.failUnless(issdmf))
return d
-
def test_reads_sdmf(self):
# The slot read proxy should, naturally, know how to tell us
# about data in the SDMF format
self.failUnlessEqual(root_hash, self.root_hash, root_hash))
return d
-
def test_only_reads_one_segment_sdmf(self):
# SDMF shares have only one segment, so it doesn't make sense to
# read more segments than that. The reader should know this and
mr.get_block_and_salt, 1))
return d
-
def test_read_with_prefetched_mdmf_data(self):
# The MDMFSlotReadProxy will prefill certain fields if you pass
# it data that you have already fetched. This is useful for
d.addCallback(_check_block_and_salt)
return d
-
def test_read_with_prefetched_sdmf_data(self):
sdmf_data = self.build_test_sdmf_share()
self.write_sdmf_share_to_server("si1")
d.addCallback(_check_block_and_salt)
return d
-
def test_read_with_empty_mdmf_file(self):
# Some tests upload a file with no contents to test things
# unrelated to the actual handling of the content of the file.
mr.get_block_and_salt, 0))
return d
-
def test_read_with_empty_sdmf_file(self):
self.write_sdmf_share_to_server("si1", empty=True)
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
mr.get_block_and_salt, 0))
return d
-
def test_verinfo_with_sdmf_file(self):
self.write_sdmf_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
d.addCallback(_check_verinfo)
return d
-
def test_verinfo_with_mdmf_file(self):
self.write_test_share_to_server("si1")
mr = MDMFSlotReadProxy(self.rref, "si1", 0)
d.addCallback(_check_verinfo)
return d
-
def test_sdmf_writer(self):
# Go through the motions of writing an SDMF share to the storage
# server. Then read the storage server to see that the share got
d.addCallback(_then)
return d
-
def test_sdmf_writer_preexisting_share(self):
data = self.build_test_sdmf_share()
self.write_sdmf_share_to_server("si1")
class Stats(unittest.TestCase):
-
def setUp(self):
self.sparent = LoggingServiceParent()
self._lease_secret = itertools.count()
+
def tearDown(self):
return self.sparent.stopService()
+
def workdir(self, name):
basedir = os.path.join("storage", "Server", name)
return basedir
server.setServiceParent(self.sparent)
return server
+
def test_latencies(self):
server = self.create("test_latencies")
for i in range(10000):
self.failUnless(output["get"]["99_0_percentile"] is None, output)
self.failUnless(output["get"]["99_9_percentile"] is None, output)
+
def remove_tags(s):
s = re.sub(r'<[^>]*>', ' ', s)
s = re.sub(r'\s+', ' ', s)
class BucketCounterTest(unittest.TestCase, CrawlerTestMixin, ReallyEqualMixin):
-
def setUp(self):
self.s = service.MultiService()
self.s.startService()
+
def tearDown(self):
return self.s.stopService()
+
def test_bucket_counter(self):
basedir = "storage/BucketCounter/bucket_counter"
fileutil.make_dirs(basedir)
def setUp(self):
self.s = service.MultiService()
self.s.startService()
+
def tearDown(self):
return self.s.stopService()
+
def make_shares(self, ss):
def make(si):
return (si, hashutil.tagged_hash("renew", si),
def setUp(self):
self.s = service.MultiService()
self.s.startService()
+
def tearDown(self):
return self.s.stopService()
+
def test_no_server(self):
w = StorageStatus(None)
html = w.renderSynchronously()