From: Daira Hopwood Date: Sun, 18 Nov 2012 05:04:48 +0000 (+0000) Subject: Cosmetics. X-Git-Url: https://git.rkrishnan.org/frontends/reliability?a=commitdiff_plain;h=148a6d82455590b0bfd73ec5f45e39fd72eceda3;p=tahoe-lafs%2Ftahoe-lafs.git Cosmetics. Signed-off-by: David-Sarah Hopwood --- diff --git a/src/allmydata/storage/crawler.py b/src/allmydata/storage/crawler.py index 05b0ef40..571734cf 100644 --- a/src/allmydata/storage/crawler.py +++ b/src/allmydata/storage/crawler.py @@ -131,17 +131,17 @@ class ShareCrawler(HookMixin, service.MultiService): cycle """ - d = {} + p = {} if self.state["current-cycle"] is None: - d["cycle-in-progress"] = False - d["next-crawl-time"] = self.next_wake_time - d["remaining-wait-time"] = self.minus_or_none(self.next_wake_time, + p["cycle-in-progress"] = False + p["next-crawl-time"] = self.next_wake_time + p["remaining-wait-time"] = self.minus_or_none(self.next_wake_time, time.time()) else: - d["cycle-in-progress"] = True + p["cycle-in-progress"] = True pct = 100.0 * self.last_complete_prefix_index / len(self.prefixes) - d["cycle-complete-percentage"] = pct + p["cycle-complete-percentage"] = pct remaining = None if self.last_prefix_elapsed_time is not None: left = len(self.prefixes) - self.last_complete_prefix_index @@ -150,18 +150,18 @@ class ShareCrawler(HookMixin, service.MultiService): # per-bucket time, probably by measuring the time spent on # this prefix so far, divided by the number of buckets we've # processed. - d["estimated-cycle-complete-time-left"] = remaining + p["estimated-cycle-complete-time-left"] = remaining # it's possible to call get_progress() from inside a crawler's # finished_prefix() function - d["remaining-sleep-time"] = self.minus_or_none(self.next_wake_time, + p["remaining-sleep-time"] = self.minus_or_none(self.next_wake_time, time.time()) per_cycle = None if self.last_cycle_elapsed_time is not None: per_cycle = self.last_cycle_elapsed_time elif self.last_prefix_elapsed_time is not None: per_cycle = len(self.prefixes) * self.last_prefix_elapsed_time - d["estimated-time-per-cycle"] = per_cycle - return d + p["estimated-time-per-cycle"] = per_cycle + return p def get_state(self): """I return the current state of the crawler. This is a copy of my diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 7b812680..f45b21cb 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -154,8 +154,8 @@ class Bucket(unittest.TestCase): result_of_read = br.remote_read(0, len(share_data)+1) self.failUnlessEqual(result_of_read, share_data) -class RemoteBucket: +class RemoteBucket: def __init__(self): self.read_count = 0 self.write_count = 0 @@ -297,14 +297,15 @@ class BucketProxy(unittest.TestCase): 0x44, WriteBucketProxy_v2, ReadBucketProxy) class Server(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self.sparent.startService() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir @@ -316,6 +317,7 @@ class Server(unittest.TestCase): server.setServiceParent(self.sparent) return server + def test_create(self): self.create("test_create") @@ -754,15 +756,15 @@ class Server(unittest.TestCase): self.failUnlessIn("This share tastes like dust.", report) - class MutableServer(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir @@ -776,6 +778,7 @@ class MutableServer(unittest.TestCase): def test_create(self): self.create("test_create") + def write_enabler(self, we_tag): return hashutil.tagged_hash("we_blah", we_tag) @@ -802,6 +805,7 @@ class MutableServer(unittest.TestCase): self.failUnless(isinstance(readv_data, dict)) self.failUnlessEqual(len(readv_data), 0) + def test_bad_magic(self): ss = self.create("test_bad_magic") self.allocate(ss, "si1", "we1", self._lease_secret.next(), set([0]), 10) @@ -983,7 +987,6 @@ class MutableServer(unittest.TestCase): )) self.failUnlessEqual(read("si1", [0], [(0,100)]), {0: [data]}) - def test_operators(self): # test operators, the data we're comparing is '11111' in all cases. # test both fail+pass, reset data after each one. @@ -1376,7 +1379,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # header. self.salt_hash_tree_s = self.serialize_blockhashes(self.salt_hash_tree[1:]) - def tearDown(self): self.sparent.stopService() shutil.rmtree(self.workdir("MDMFProxies storage test server")) @@ -1385,23 +1387,18 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def write_enabler(self, we_tag): return hashutil.tagged_hash("we_blah", we_tag) - def renew_secret(self, tag): return hashutil.tagged_hash("renew_blah", str(tag)) - def cancel_secret(self, tag): return hashutil.tagged_hash("cancel_blah", str(tag)) - def workdir(self, name): basedir = os.path.join("storage", "MutableServer", name) return basedir - def create(self, name): workdir = self.workdir(name) - server = StorageServer(workdir, "\x00" * 20) server.setServiceParent(self.sparent) return server @@ -1503,7 +1500,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): data += self.block_hash_tree_s return data - def write_test_share_to_server(self, storage_index, tail_segment=False, @@ -1576,7 +1572,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.offsets['EOF'] = eof_offset return final_share - def write_sdmf_share_to_server(self, storage_index, empty=False): @@ -1662,7 +1657,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, checkstring)) return d - def test_read_with_different_tail_segment_size(self): self.write_test_share_to_server("si1", tail_segment=True) mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1674,7 +1668,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_tail_segment) return d - def test_get_block_with_invalid_segnum(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1685,7 +1678,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 7)) return d - def test_get_encoding_parameters_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1698,7 +1690,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_encoding_parameters) return d - def test_get_seqnum_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1707,7 +1698,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(seqnum, 0)) return d - def test_get_root_hash_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1716,7 +1706,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(root_hash, self.root_hash)) return d - def test_get_checkstring_first(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -1725,7 +1714,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, self.checkstring)) return d - def test_write_read_vectors(self): # When writing for us, the storage server will return to us a # read vector, along with its result. If a write fails because @@ -1764,7 +1752,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # The checkstring remains the same for the rest of the process. return d - def test_private_key_after_share_hash_chain(self): mw = self._make_new_mw("si1", 0) d = defer.succeed(None) @@ -1783,7 +1770,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_encprivkey, self.encprivkey)) return d - def test_signature_after_verification_key(self): mw = self._make_new_mw("si1", 0) d = defer.succeed(None) @@ -1810,7 +1796,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_signature, self.signature)) return d - def test_uncoordinated_write(self): # Make two mutable writers, both pointing to the same storage # server, both at the same storage index, and try writing to the @@ -1843,7 +1828,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_failure) return d - def test_invalid_salt_size(self): # Salts need to be 16 bytes in size. Writes that attempt to # write more or less than this should be rejected. @@ -1862,7 +1846,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): another_invalid_salt)) return d - def test_write_test_vectors(self): # If we give the write proxy a bogus test vector at # any point during the process, it should fail to write when we @@ -1900,7 +1883,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def serialize_blockhashes(self, blockhashes): return "".join(blockhashes) - def serialize_sharehashes(self, sharehashes): ret = "".join([struct.pack(">H32s", i, sharehashes[i]) for i in sorted(sharehashes.keys())]) @@ -2029,6 +2011,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_publish) return d + def _make_new_mw(self, si, share, datalength=36): # This is a file of size 36 bytes. Since it has a segment # size of 6, we know that it has 6 byte segments, which will @@ -2038,7 +2021,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): 6, datalength) return mw - def test_write_rejected_with_too_many_blocks(self): mw = self._make_new_mw("si0", 0) @@ -2055,7 +2037,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_block, self.block, 7, self.salt)) return d - def test_write_rejected_with_invalid_salt(self): # Try writing an invalid salt. Salts are 16 bytes -- any more or # less should cause an error. @@ -2067,7 +2048,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): None, mw.put_block, self.block, 7, bad_salt)) return d - def test_write_rejected_with_invalid_root_hash(self): # Try writing an invalid root hash. This should be SHA256d, and # 32 bytes long as a result. @@ -2093,7 +2073,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): None, mw.put_root_hash, invalid_root_hash)) return d - def test_write_rejected_with_invalid_blocksize(self): # The blocksize implied by the writer that we get from # _make_new_mw is 2bytes -- any more or any less than this @@ -2127,7 +2106,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw.put_block(valid_block, 5, self.salt)) return d - def test_write_enforces_order_constraints(self): # We require that the MDMFSlotWriteProxy be interacted with in a # specific way. @@ -2213,7 +2191,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mw0.put_verification_key(self.verification_key)) return d - def test_end_to_end(self): mw = self._make_new_mw("si1", 0) # Write a share using the mutable writer, and make sure that the @@ -2297,7 +2274,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(checkstring, mw.get_checkstring())) return d - def test_is_sdmf(self): # The MDMFSlotReadProxy should also know how to read SDMF files, # since it will encounter them on the grid. Callers use the @@ -2309,7 +2285,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnless(issdmf)) return d - def test_reads_sdmf(self): # The slot read proxy should, naturally, know how to tell us # about data in the SDMF format @@ -2379,7 +2354,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): self.failUnlessEqual(root_hash, self.root_hash, root_hash)) return d - def test_only_reads_one_segment_sdmf(self): # SDMF shares have only one segment, so it doesn't make sense to # read more segments than that. The reader should know this and @@ -2397,7 +2371,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 1)) return d - def test_read_with_prefetched_mdmf_data(self): # The MDMFSlotReadProxy will prefill certain fields if you pass # it data that you have already fetched. This is useful for @@ -2462,7 +2435,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_block_and_salt) return d - def test_read_with_prefetched_sdmf_data(self): sdmf_data = self.build_test_sdmf_share() self.write_sdmf_share_to_server("si1") @@ -2526,7 +2498,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_block_and_salt) return d - def test_read_with_empty_mdmf_file(self): # Some tests upload a file with no contents to test things # unrelated to the actual handling of the content of the file. @@ -2555,7 +2526,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 0)) return d - def test_read_with_empty_sdmf_file(self): self.write_sdmf_share_to_server("si1", empty=True) mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2581,7 +2551,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): mr.get_block_and_salt, 0)) return d - def test_verinfo_with_sdmf_file(self): self.write_sdmf_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2622,7 +2591,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_verinfo) return d - def test_verinfo_with_mdmf_file(self): self.write_test_share_to_server("si1") mr = MDMFSlotReadProxy(self.rref, "si1", 0) @@ -2661,7 +2629,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_check_verinfo) return d - def test_sdmf_writer(self): # Go through the motions of writing an SDMF share to the storage # server. Then read the storage server to see that the share got @@ -2705,7 +2672,6 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): d.addCallback(_then) return d - def test_sdmf_writer_preexisting_share(self): data = self.build_test_sdmf_share() self.write_sdmf_share_to_server("si1") @@ -2766,13 +2732,14 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): class Stats(unittest.TestCase): - def setUp(self): self.sparent = LoggingServiceParent() self._lease_secret = itertools.count() + def tearDown(self): return self.sparent.stopService() + def workdir(self, name): basedir = os.path.join("storage", "Server", name) return basedir @@ -2783,6 +2750,7 @@ class Stats(unittest.TestCase): server.setServiceParent(self.sparent) return server + def test_latencies(self): server = self.create("test_latencies") for i in range(10000): @@ -2849,6 +2817,7 @@ class Stats(unittest.TestCase): self.failUnless(output["get"]["99_0_percentile"] is None, output) self.failUnless(output["get"]["99_9_percentile"] is None, output) + def remove_tags(s): s = re.sub(r'<[^>]*>', ' ', s) s = re.sub(r'\s+', ' ', s) @@ -2856,13 +2825,14 @@ def remove_tags(s): class BucketCounterTest(unittest.TestCase, CrawlerTestMixin, ReallyEqualMixin): - def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def test_bucket_counter(self): basedir = "storage/BucketCounter/bucket_counter" fileutil.make_dirs(basedir) @@ -3028,9 +2998,11 @@ class LeaseCrawler(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def make_shares(self, ss): def make(si): return (si, hashutil.tagged_hash("renew", si), @@ -3817,9 +3789,11 @@ class WebStatus(unittest.TestCase, pollmixin.PollMixin, WebRenderingMixin): def setUp(self): self.s = service.MultiService() self.s.startService() + def tearDown(self): return self.s.stopService() + def test_no_server(self): w = StorageStatus(None) html = w.renderSynchronously()