From: Brian Warner Date: Mon, 29 Jun 2009 20:03:58 +0000 (-0700) Subject: remove trailing whitespace X-Git-Tag: trac-4000~54 X-Git-Url: https://git.rkrishnan.org/pf/content/-?a=commitdiff_plain;h=c6ae2558472b08c3221152fea6337bab162f5eb0;p=tahoe-lafs%2Ftahoe-lafs.git remove trailing whitespace --- diff --git a/src/allmydata/mutable/repairer.py b/src/allmydata/mutable/repairer.py index d04e3c69..b6693be8 100644 --- a/src/allmydata/mutable/repairer.py +++ b/src/allmydata/mutable/repairer.py @@ -13,7 +13,7 @@ class RepairResults: class RepairRequiresWritecapError(Exception): """Repair currently requires a writecap.""" - + class MustForceRepairError(Exception): pass diff --git a/src/allmydata/test/test_mutable.py b/src/allmydata/test/test_mutable.py index 36064b2f..9aef36bf 100644 --- a/src/allmydata/test/test_mutable.py +++ b/src/allmydata/test/test_mutable.py @@ -265,8 +265,8 @@ def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0): return res class Filenode(unittest.TestCase, testutil.ShouldFailMixin): - # this used to be in Publish, but we removed the limit. Some of - # these tests test whether the new code correctly allows files + # this used to be in Publish, but we removed the limit. Some of + # these tests test whether the new code correctly allows files # larger than the limit. OLD_MAX_SEGMENT_SIZE = 3500000 def setUp(self): @@ -1792,7 +1792,7 @@ class LessFakeClient(FakeClient): def __init__(self, basedir, num_peers=10): self._num_peers = num_peers - peerids = [tagged_hash("peerid", "%d" % i)[:20] + peerids = [tagged_hash("peerid", "%d" % i)[:20] for i in range(self._num_peers)] self.storage_broker = StorageFarmBroker(None, True) for peerid in peerids: diff --git a/src/allmydata/util/statistics.py b/src/allmydata/util/statistics.py index 4df02382..e2c689aa 100644 --- a/src/allmydata/util/statistics.py +++ b/src/allmydata/util/statistics.py @@ -72,7 +72,7 @@ def survival_pmf_via_bd(p_list): Note that this function does little to no error checking and is intended for internal use and testing only. """ - pmf_list = [ binomial_distribution_pmf(p_list.count(p), p) + pmf_list = [ binomial_distribution_pmf(p_list.count(p), p) for p in set(p_list) ] return reduce(convolve, pmf_list) @@ -90,7 +90,7 @@ def survival_pmf_via_conv(p_list): def print_pmf(pmf, n=4, out=sys.stdout): """ Print a PMF in a readable form, with values rounded to n - significant digits. + significant digits. """ for k, p in enumerate(pmf): print >>out, "i=" + str(k) + ":", round_sigfigs(p, n) @@ -124,7 +124,7 @@ def find_k(p_list, target_loss_prob): def find_k_from_pmf(pmf, target_loss_prob): """ - Find the highest k value that achieves the targeted loss + Find the highest k value that achieves the targeted loss probability, given the share survival PMF given in pmf. """ assert valid_pmf(pmf) @@ -151,11 +151,11 @@ def repair_count_pmf(survival_pmf, k): # Probability of 0 to repair is the probability of all shares # surviving plus the probability of less than k surviving. pmf = [ survival_pmf[n] + sum(survival_pmf[0:k]) ] - + # Probability of more than 0, up to N-k to repair for i in range(1, n-k+1): pmf.append(survival_pmf[n-i]) - + # Probability of more than N-k to repair is 0, because that means # there are less than k available and the file is irreparable. for i in range(n-k+1, n+1): @@ -220,14 +220,14 @@ def convolve(list_a, list_b): """ n = len(list_a) m = len(list_b) - + result = [] for i in range(n + m - 1): sum = 0.0 lower = max(0, i - n + 1) upper = min(m - 1, i) - + for j in range(lower, upper+1): sum += list_a[i-j] * list_b[j] @@ -250,8 +250,8 @@ def binomial_distribution_pmf(n, p): result = [] for k in range(n+1): - result.append(math.pow(p , k ) * - math.pow(1 - p, n - k) * + result.append(math.pow(p , k ) * + math.pow(1 - p, n - k) * binomial_coeff(n, k)) assert valid_pmf(result)