From: Brian Warner Date: Tue, 9 Apr 2013 19:04:12 +0000 (+0100) Subject: Remove the whitespace reported by find-trailing-spaces. No code changes. X-Git-Tag: allmydata-tahoe-1.10b1~13 X-Git-Url: https://git.rkrishnan.org/%5B/%5D%20/uri/frontends/rgr-080307.php?a=commitdiff_plain;h=d8c536847b1ea577f9ac5b6aa98c7ce5d1961c8c;p=tahoe-lafs%2Ftahoe-lafs.git Remove the whitespace reported by find-trailing-spaces. No code changes. --- diff --git a/src/allmydata/mutable/layout.py b/src/allmydata/mutable/layout.py index 75598a4d..b938794f 100644 --- a/src/allmydata/mutable/layout.py +++ b/src/allmydata/mutable/layout.py @@ -20,7 +20,7 @@ from zope.interface import implements # Q: The sequence number; this is sort of like a revision history for # mutable files; they start at 1 and increase as they are changed after # being uploaded. Stored as an unsigned 64-bit integer. -# 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 +# 32s: The root hash of the share hash tree. We use sha-256d, so we use 32 # bytes to store the value. # 16s: The salt for the readkey. This is a 16-byte random value. # @@ -46,7 +46,7 @@ from zope.interface import implements # to account for the possibility of a lot of share data. # Q: The offset of the EOF. An unsigned 64-bit integer, to account for # the possibility of a lot of share data. -# +# # After all of these, we have the following: # - The verification key: Occupies the space between the end of the header # and the start of the signature (i.e.: data[HEADER_LENGTH:o['signature']]. @@ -57,7 +57,7 @@ from zope.interface import implements # - The share data, which goes from the share data offset to the encrypted # private key offset. # - The encrypted private key offset, which goes until the end of the file. -# +# # The block hash tree in this encoding has only one share, so the offset of # the share data will be 32 bits more than the offset of the block hash tree. # Given this, we may need to check to see how many bytes a reasonably sized @@ -248,7 +248,7 @@ class SDMFSlotWriteProxy: self._segment_size = segment_size self._data_length = data_length - # This is an SDMF file, so it should have only one segment, so, + # This is an SDMF file, so it should have only one segment, so, # modulo padding of the data length, the segment size and the # data length should be the same. expected_segment_size = mathutil.next_multiple(data_length, @@ -610,12 +610,12 @@ class MDMFSlotWriteProxy: # in meaning to what we have with SDMF files, except now instead of # using the literal salt, we use a value derived from all of the # salts -- the share hash root. - # + # # The salt is stored before the block for each segment. The block # hash tree is computed over the combination of block and salt for # each segment. In this way, we get integrity checking for both # block and salt with the current block hash tree arrangement. - # + # # The ordering of the offsets is different to reflect the dependencies # that we'll run into with an MDMF file. The expected write flow is # something like this: @@ -625,16 +625,16 @@ class MDMFSlotWriteProxy: # and where they should go.. We can also figure out where the # encrypted private key should go, because we can figure out how # big the share data will be. - # + # # 1: Encrypt, encode, and upload the file in chunks. Do something - # like + # like # # put_block(data, segnum, salt) # # to write a block and a salt to the disk. We can do both of # these operations now because we have enough of the offsets to # know where to put them. - # + # # 2: Put the encrypted private key. Use: # # put_encprivkey(encprivkey) @@ -644,7 +644,7 @@ class MDMFSlotWriteProxy: # # 3: We're now in a position to upload the block hash tree for # a share. Put that using something like: - # + # # put_blockhashes(block_hash_tree) # # Note that block_hash_tree is a list of hashes -- we'll take @@ -655,20 +655,20 @@ class MDMFSlotWriteProxy: # # 4: We're now in a position to upload the share hash chain for # a share. Do that with something like: - # - # put_sharehashes(share_hash_chain) # - # share_hash_chain should be a dictionary mapping shnums to + # put_sharehashes(share_hash_chain) + # + # share_hash_chain should be a dictionary mapping shnums to # 32-byte hashes -- the wrapper handles serialization. # We'll know where to put the signature at this point, also. # The root of this tree will be put explicitly in the next # step. - # + # # 5: Before putting the signature, we must first put the # root_hash. Do this with: - # + # # put_root_hash(root_hash). - # + # # In terms of knowing where to put this value, it was always # possible to place it, but it makes sense semantically to # place it after the share hash tree, so that's why you do it @@ -679,27 +679,27 @@ class MDMFSlotWriteProxy: # get_signable() # # to get the part of the header that you want to sign, and use: - # + # # put_signature(signature) # # to write your signature to the remote server. # # 6: Add the verification key, and finish. Do: # - # put_verification_key(key) + # put_verification_key(key) # - # and + # and # # finish_publish() # # Checkstring management: - # + # # To write to a mutable slot, we have to provide test vectors to ensure # that we are writing to the same data that we think we are. These # vectors allow us to detect uncoordinated writes; that is, writes # where both we and some other shareholder are writing to the # mutable slot, and to report those back to the parts of the program - # doing the writing. + # doing the writing. # # With SDMF, this was easy -- all of the share data was written in # one go, so it was easy to detect uncoordinated writes, and we only @@ -724,7 +724,7 @@ class MDMFSlotWriteProxy: # - When we write out the salt hash # - When we write out the root of the share hash tree # - # since these values will change the header. It is possible that we + # since these values will change the header. It is possible that we # can just make those be written in one operation to minimize # disruption. def __init__(self, @@ -745,7 +745,7 @@ class MDMFSlotWriteProxy: assert self.shnum >= 0 and self.shnum < total_shares self._total_shares = total_shares # We build up the offset table as we write things. It is the - # last thing we write to the remote server. + # last thing we write to the remote server. self._offsets = {} self._testvs = [] # This is a list of write vectors that will be sent to our @@ -1010,7 +1010,7 @@ class MDMFSlotWriteProxy: Put the root hash (the root of the share hash tree) in the remote slot. """ - # It does not make sense to be able to put the root + # It does not make sense to be able to put the root # hash without first putting the share hashes, since you need # the share hashes to generate the root hash. # @@ -1247,7 +1247,7 @@ class MDMFSlotReadProxy: """ if self._offsets: return defer.succeed(None) - # At this point, we may be either SDMF or MDMF. Fetching 107 + # At this point, we may be either SDMF or MDMF. Fetching 107 # bytes will be enough to get header and offsets for both SDMF and # MDMF, though we'll be left with 4 more bytes than we # need if this ends up being MDMF. This is probably less diff --git a/src/allmydata/mutable/publish.py b/src/allmydata/mutable/publish.py index c87e8978..4acc2d6c 100644 --- a/src/allmydata/mutable/publish.py +++ b/src/allmydata/mutable/publish.py @@ -332,7 +332,7 @@ class Publish: # These are filled in later, after we've modified the block hash # tree suitably. self.sharehash_leaves = None # eventually [sharehashes] - self.sharehashes = {} # shnum -> [sharehash leaves necessary to + self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") @@ -516,7 +516,7 @@ class Publish: for j in xrange(self.num_segments): blocks.append(None) self.sharehash_leaves = None # eventually [sharehashes] - self.sharehashes = {} # shnum -> [sharehash leaves necessary to + self.sharehashes = {} # shnum -> [sharehash leaves necessary to # validate the share] self.log("Starting push") @@ -1324,7 +1324,7 @@ class TransformingUploadable: def read(self, length): - # We can get data from 3 sources here. + # We can get data from 3 sources here. # 1. The first of the segments provided to us. # 2. The data that we're replacing things with. # 3. The last of the segments provided to us. diff --git a/src/allmydata/mutable/retrieve.py b/src/allmydata/mutable/retrieve.py index 77f8de27..b92e931f 100644 --- a/src/allmydata/mutable/retrieve.py +++ b/src/allmydata/mutable/retrieve.py @@ -130,7 +130,7 @@ class Retrieve: # verify means that we are using the downloader logic to verify all # of our shares. This tells the downloader a few things. - # + # # 1. We need to download all of the shares. # 2. We don't need to decode or decrypt the shares, since our # caller doesn't care about the plaintext, only the @@ -392,7 +392,7 @@ class Retrieve: # Our last task is to tell the downloader where to start and # where to stop. We use three parameters for that: # - self._start_segment: the segment that we need to start - # downloading from. + # downloading from. # - self._current_segment: the next segment that we need to # download. # - self._last_segment: The last segment that we were asked to @@ -405,7 +405,7 @@ class Retrieve: if self._offset: self.log("got offset: %d" % self._offset) # our start segment is the first segment containing the - # offset we were given. + # offset we were given. start = self._offset // self._segment_size assert start < self._num_segments @@ -798,7 +798,7 @@ class Retrieve: # Reaching this point means that we know that this segment # is correct. Now we need to check to see whether the share - # hash chain is also correct. + # hash chain is also correct. # SDMF wrote share hash chains that didn't contain the # leaves, which would be produced from the block hash tree. # So we need to validate the block hash tree first. If diff --git a/src/allmydata/test/common.py b/src/allmydata/test/common.py index 64603130..82ccbf64 100644 --- a/src/allmydata/test/common.py +++ b/src/allmydata/test/common.py @@ -410,7 +410,7 @@ def make_verifier_uri(): fingerprint=os.urandom(32)).to_string() def create_mutable_filenode(contents, mdmf=False, all_contents=None): - # XXX: All of these arguments are kind of stupid. + # XXX: All of these arguments are kind of stupid. if mdmf: cap = make_mdmf_mutable_file_cap() else: diff --git a/src/allmydata/test/test_mutable.py b/src/allmydata/test/test_mutable.py index ee5b768a..47f24c72 100644 --- a/src/allmydata/test/test_mutable.py +++ b/src/allmydata/test/test_mutable.py @@ -484,7 +484,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): dumped = servermap.dump(StringIO()) self.failUnlessIn("3-of-10", dumped.getvalue()) d.addCallback(_then) - # Now overwrite the contents with some new contents. We want + # Now overwrite the contents with some new contents. We want # to make them big enough to force the file to be uploaded # in more than one segment. big_contents = "contents1" * 100000 # about 900 KiB @@ -499,7 +499,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin): # before, they need to be big enough to force multiple # segments, so that we make the downloader deal with # multiple segments. - bigger_contents = "contents2" * 1000000 # about 9MiB + bigger_contents = "contents2" * 1000000 # about 9MiB bigger_contents_uploadable = MutableData(bigger_contents) d.addCallback(lambda ignored: n.overwrite(bigger_contents_uploadable)) @@ -1482,7 +1482,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin): def test_corrupt_all_encprivkey_late(self): - # this should work for the same reason as above, but we corrupt + # this should work for the same reason as above, but we corrupt # after the servermap update to exercise the error handling # code. # We need to remove the privkey from the node, or the retrieve diff --git a/src/allmydata/test/test_storage.py b/src/allmydata/test/test_storage.py index 08c513cd..5c5e2c7c 100644 --- a/src/allmydata/test/test_storage.py +++ b/src/allmydata/test/test_storage.py @@ -1593,7 +1593,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def write_sdmf_share_to_server(self, storage_index, empty=False): - # Some tests need SDMF shares to verify that we can still + # Some tests need SDMF shares to verify that we can still # read them. This method writes one, which resembles but is not assert self.rref write = self.ss.remote_slot_testv_and_readv_and_writev @@ -1877,8 +1877,8 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def test_write_test_vectors(self): - # If we give the write proxy a bogus test vector at - # any point during the process, it should fail to write when we + # If we give the write proxy a bogus test vector at + # any point during the process, it should fail to write when we # tell it to write. def _check_failure(results): self.failUnlessEqual(len(results), 2) @@ -2153,7 +2153,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): # 5: Write the root hash and salt hash # 6: Write the signature and verification key # 7: Write the file. - # + # # Some of these can be performed out-of-order, and some can't. # The dependencies that I want to test here are: # - Private key before block hashes @@ -2678,7 +2678,7 @@ class MDMFProxies(unittest.TestCase, ShouldFailMixin): def test_sdmf_writer(self): # Go through the motions of writing an SDMF share to the storage # server. Then read the storage server to see that the share got - # written in the way that we think it should have. + # written in the way that we think it should have. # We do this first so that the necessary instance variables get # set the way we want them for the tests below. diff --git a/src/allmydata/test/test_util.py b/src/allmydata/test/test_util.py index ecaaa441..3e2fa11a 100644 --- a/src/allmydata/test/test_util.py +++ b/src/allmydata/test/test_util.py @@ -790,7 +790,7 @@ class Abbreviate(unittest.TestCase): self.failUnlessIn("1 BB", str(e)) e = self.failUnlessRaises(ValueError, p, "fhtagn") self.failUnlessIn("fhtagn", str(e)) - + class Limiter(unittest.TestCase): timeout = 480 # This takes longer than 240 seconds on Francois's arm box.