From: David-Sarah Hopwood Date: Sun, 18 Nov 2012 05:04:48 +0000 (+0000) Subject: Cosmetics. X-Git-Url: https://git.rkrishnan.org/(%5B%5E?a=commitdiff_plain;h=4445ba17dfdc9698007929e239d445be5de78413;p=tahoe-lafs%2Ftahoe-lafs.git Cosmetics. Signed-off-by: David-Sarah Hopwood --- diff --git a/src/allmydata/storage/crawler.py b/src/allmydata/storage/crawler.py index 05b0ef40..571734cf 100644 --- a/src/allmydata/storage/crawler.py +++ b/src/allmydata/storage/crawler.py @@ -131,17 +131,17 @@ class ShareCrawler(HookMixin, service.MultiService): cycle """ - d = {} + p = {} if self.state["current-cycle"] is None: - d["cycle-in-progress"] = False - d["next-crawl-time"] = self.next_wake_time - d["remaining-wait-time"] = self.minus_or_none(self.next_wake_time, + p["cycle-in-progress"] = False + p["next-crawl-time"] = self.next_wake_time + p["remaining-wait-time"] = self.minus_or_none(self.next_wake_time, time.time()) else: - d["cycle-in-progress"] = True + p["cycle-in-progress"] = True pct = 100.0 * self.last_complete_prefix_index / len(self.prefixes) - d["cycle-complete-percentage"] = pct + p["cycle-complete-percentage"] = pct remaining = None if self.last_prefix_elapsed_time is not None: left = len(self.prefixes) - self.last_complete_prefix_index @@ -150,18 +150,18 @@ class ShareCrawler(HookMixin, service.MultiService): # per-bucket time, probably by measuring the time spent on # this prefix so far, divided by the number of buckets we've # processed. - d["estimated-cycle-complete-time-left"] = remaining + p["estimated-cycle-complete-time-left"] = remaining # it's possible to call get_progress() from inside a crawler's # finished_prefix() function - d["remaining-sleep-time"] = self.minus_or_none(self.next_wake_time, + p["remaining-sleep-time"] = self.minus_or_none(self.next_wake_time, time.time()) per_cycle = None if self.last_cycle_elapsed_time is not None: per_cycle = self.last_cycle_elapsed_time elif self.last_prefix_elapsed_time is not None: per_cycle = len(self.prefixes) * self.last_prefix_elapsed_time - d["estimated-time-per-cycle"] = per_cycle - return d + p["estimated-time-per-cycle"] = per_cycle + return p def get_state(self): """I return the current state of the crawler. This is a copy of my diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 86c20f1a..7013b391 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -159,8 +159,8 @@ class Bucket(unittest.TestCase): result_of_read = br.remote_read(0, len(share_data)+1) self.failUnlessEqual(result_of_read, share_data) -class RemoteBucket: +class RemoteBucket: def __init__(self): self.read_count = 0 self.write_count = 0 @@ -302,14 +302,15 @@ class BucketProxy(unittest.TestCase): 0x44, WriteBucketProxy_v2, ReadBucketProxy) class Server(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir @@ -321,6 +322,7 @@ class Server(unittest.TestCase): server.setServiceParent(self.sparent) return server + def test_create(self): self.create("test_create") @@ -757,15 +759,15 @@ class Server(unittest.TestCase): self.failUnlessIn("This share tastes like dust.", report) - class MutableServer(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir @@ -779,6 +781,7 @@ class MutableServer(unittest.TestCase): def test_create(self): self.create("test_create") + def write_enabler(self, we_tag): return hashutil.tagged_hash("we_blah", we_tag) @@ -805,6 +808,7 @@ class MutableServer(unittest.TestCase): self.failUnless(isinstance(readv_data, dict)) self.failUnlessEqual(len(readv_data), 0) + def test_bad_magic(self): ss = self.create("test_bad_magic") self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10) @@ -986,7 +990,6 @@ class MutableServer(unittest.TestCase): )) self.failUnlessEqual(read("si1", [0], [(0,100)]), {0: [data]}) - def test_operators(self): # test operators, the data we're comparing is '11111' in all cases. # test both fail+pass, reset data after each one. @@ -1379,7 +1382,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # header. self.salt_hash_tree_s = self.serialize_blockhashes(self.salt_hash_tree[1:]) - def tearDown(self): self.sparent.stopService() shutil.rmtree(self.workdir("MDMFProxies storage test server")) @@ -1388,23 +1390,18 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def write_enabler(self, we_tag): return hashutil.tagged_hash("we_blah", we_tag) - def renew_secret(self, tag): return hashutil.tagged_hash("renew_blah", str(tag)) - def cancel_secret(self, tag): return hashutil.tagged_hash("cancel_blah", str(tag)) - def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir - def create(self, name): workdir = self.workdir(name) - server = StorageServer(workdir, "\x00" * 20) server.setServiceParent(self.sparent) return server @@ -1506,7 +1503,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): data += self.block_hash_tree_s return data - def write_test_share_to_server(self, storage_index, tail_segment=False, @@ -1579,7 +1575,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.offsets['EOF'] = eof_offset return final_share - def write_sdmf_share_to_server(self, storage_index, empty=False): @@ -1665,7 +1660,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, checkstring)) return d - def test_read_with_different_tail_segment_size(self): self.write_test_share_to_server("si1", tail_segment=True) mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1677,7 +1671,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_tail_segment) return d - def test_get_block_with_invalid_segnum(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1688,7 +1681,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 7)) return d - def test_get_encoding_parameters_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1701,7 +1693,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_encoding_parameters) return d - def test_get_seqnum_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1710,7 +1701,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(seqnum, 0)) return d - def test_get_root_hash_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1719,7 +1709,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(root_hash, self.root_hash)) return d - def test_get_checkstring_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1728,7 +1717,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, self.checkstring)) return d - def test_write_read_vectors(self): # When writing for us, the storage server will return to us a # read vector, along with its result. If a write fails because @@ -1767,7 +1755,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # The checkstring remains the same for the rest of the process. return d - def test_private_key_after_share_hash_chain(self): mw = self._make_new_mw("si1", 0) d = defer.succeed(None) @@ -1786,7 +1773,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_encprivkey, self.encprivkey)) return d - def test_signature_after_verification_key(self): mw = self._make_new_mw("si1", 0) d = defer.succeed(None) @@ -1813,7 +1799,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_signature, self.signature)) return d - def test_uncoordinated_write(self): # Make two mutable writers, both pointing to the same storage # server, both at the same storage index, and try writing to the @@ -1846,7 +1831,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_failure) return d - def test_invalid_salt_size(self): # Salts need to be 16 bytes in size. Writes that attempt to # write more or less than this should be rejected. @@ -1865,7 +1849,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): another_invalid_salt)) return d - def test_write_test_vectors(self): # If we give the write proxy a bogus test vector at # any point during the process, it should fail to write when we @@ -1903,7 +1886,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def serialize_blockhashes(self, blockhashes): return "".join(blockhashes) - def serialize_sharehashes(self, sharehashes): ret = "".join([struct.pack(">H32s", i, sharehashes[i]) for i in sorted(sharehashes.keys())]) @@ -2032,6 +2014,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_publish) return d + def _make_new_mw(self, si, share, datalength=36): # This is a file of size 36 bytes. Since it has a segment # size of 6, we know that it has 6 byte segments, which will @@ -2041,7 +2024,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): 6, datalength) return mw - def test_write_rejected_with_too_many_blocks(self): mw = self._make_new_mw("si0", 0) @@ -2058,7 +2040,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_block, self.block, 7, self.salt)) return d - def test_write_rejected_with_invalid_salt(self): # Try writing an invalid salt. Salts are 16 bytes -- any more or # less should cause an error. @@ -2070,7 +2051,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): None, mw.put_block, self.block, 7, bad_salt)) return d - def test_write_rejected_with_invalid_root_hash(self): # Try writing an invalid root hash. This should be SHA256d, and # 32 bytes long as a result. @@ -2096,7 +2076,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): None, mw.put_root_hash, invalid_root_hash)) return d - def test_write_rejected_with_invalid_blocksize(self): # The blocksize implied by the writer that we get from # _make_new_mw is 2bytes -- any more or any less than this @@ -2130,7 +2109,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_block(valid_block, 5, self.salt)) return d - def test_write_enforces_order_constraints(self): # We require that the MDMFSlotWriteProxy be interacted with in a # specific way. @@ -2216,7 +2194,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw0.put_verification_key(self.verification_key)) return d - def test_end_to_end(self): mw = self._make_new_mw("si1", 0) # Write a share using the mutable writer, and make sure that the @@ -2300,7 +2277,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, mw.get_checkstring())) return d - def test_is_sdmf(self): # The MDMFSlotReadProxy should also know how to read SDMF files, # since it will encounter them on the grid. Callers use the @@ -2312,7 +2288,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnless(issdmf)) return d - def test_reads_sdmf(self): # The slot read proxy should, naturally, know how to tell us # about data in the SDMF format @@ -2382,7 +2357,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(root_hash, self.root_hash, root_hash)) return d - def test_only_reads_one_segment_sdmf(self): # SDMF shares have only one segment, so it doesn't make sense to # read more segments than that. The reader should know this and @@ -2400,7 +2374,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 1)) return d - def test_read_with_prefetched_mdmf_data(self): # The MDMFSlotReadProxy will prefill certain fields if you pass # it data that you have already fetched. This is useful for @@ -2465,7 +2438,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_block_and_salt) return d - def test_read_with_prefetched_sdmf_data(self): sdmf_data = self.build_test_sdmf_share() self.write_sdmf_share_to_server("si1") @@ -2529,7 +2501,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_block_and_salt) return d - def test_read_with_empty_mdmf_file(self): # Some tests upload a file with no contents to test things # unrelated to the actual handling of the content of the file. @@ -2558,7 +2529,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 0)) return d - def test_read_with_empty_sdmf_file(self): self.write_sdmf_share_to_server("si1", empty=True) mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2584,7 +2554,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 0)) return d - def test_verinfo_with_sdmf_file(self): self.write_sdmf_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2625,7 +2594,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_verinfo) return d - def test_verinfo_with_mdmf_file(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2664,7 +2632,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_verinfo) return d - def test_sdmf_writer(self): # Go through the motions of writing an SDMF share to the storage # server. Then read the storage server to see that the share got @@ -2708,7 +2675,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_then) return d - def test_sdmf_writer_preexisting_share(self): data = self.build_test_sdmf_share() self.write_sdmf_share_to_server("si1") @@ -2769,13 +2735,14 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): class Stats(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir @@ -2786,6 +2753,7 @@ class Stats(unittest.TestCase): server.setServiceParent(self.sparent) return server + def test_latencies(self): server = self.create("test_latencies") for i in range(10000): @@ -2852,6 +2820,7 @@ class Stats(unittest.TestCase): self.failUnless(output["get"]["99_0_percentile"] is None, output) self.failUnless(output["get"]["99_9_percentile"] is None, output) + def remove_tags(s): s = re.sub(r'<[^>]*>', ' ', s) s = re.sub(r'\s+', ' ', s) @@ -2859,13 +2828,14 @@ def remove_tags(s): class BucketCounterTest(unittest.TestCase, CrawlerTestMixin, ReallyEqualMixin): - def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def test_bucket_counter(self): basedir = "storage/BucketCounter/bucket_counter" fileutil.make_dirs(basedir) @@ -3031,9 +3001,11 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def make_shares(self, ss): def make(si): return (si, hashutil.tagged_hash("renew", si), @@ -3820,9 +3792,11 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def test_no_server(self): w = StorageStatus(None) html = w.renderSynchronously()