From 4e29060847b32716d01d36da93e1dbf9cc7af6cf Mon Sep 17 00:00:00 2001 From: Kevan Carstensen Date: Mon, 16 Nov 2009 15:24:59 -0700 Subject: [PATCH] Change stray "shares_of_happiness" to "servers_of_happiness" --- docs/architecture.txt | 2 +- src/allmydata/immutable/upload.py | 2 +- src/allmydata/interfaces.py | 4 ++-- src/allmydata/test/test_encode.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/architecture.txt b/docs/architecture.txt index 3d4d90c0..f7f00b4a 100644 --- a/docs/architecture.txt +++ b/docs/architecture.txt @@ -187,7 +187,7 @@ place a quantity known as "servers of happiness" that each map to a unique server, we'll do the upload anyways. If we cannot place at least this many in this way, the upload is declared a failure. -The current defaults use k=3, shares_of_happiness=7, and N=10, meaning that +The current defaults use k=3, servers_of_happiness=7, and N=10, meaning that we'll try to place 10 shares, we'll be happy if we can place shares on enough servers that there are 7 different servers, the correct functioning of any 3 of which guarantee the availability of the file, and we need to get back any 3 to diff --git a/src/allmydata/immutable/upload.py b/src/allmydata/immutable/upload.py index 70b7d6d6..3477ed5f 100644 --- a/src/allmydata/immutable/upload.py +++ b/src/allmydata/immutable/upload.py @@ -408,7 +408,7 @@ class Tahoe2PeerSelector: pass else: # No more peers, so this upload might fail (it depends upon - # whether we've hit shares_of_happiness or not). Log the last + # whether we've hit servers_of_happiness or not). Log the last # failure we got: if a coding error causes all peers to fail # in the same way, this allows the common failure to be seen # by the uploader and should help with debugging diff --git a/src/allmydata/interfaces.py b/src/allmydata/interfaces.py index 5b82f816..42a188cc 100644 --- a/src/allmydata/interfaces.py +++ b/src/allmydata/interfaces.py @@ -806,7 +806,7 @@ class IMutableFileNode(IFileNode): class NotEnoughSharesError(Exception): """Download was unable to get enough shares, or upload was unable to - place 'shares_of_happiness' shares.""" + place 'servers_of_happiness' shares.""" class NoSharesError(Exception): """Upload or Download was unable to get any shares at all.""" @@ -1305,7 +1305,7 @@ class IEncoder(Interface): pushed. 'share_counts': return a tuple describing how many shares are used: - (needed_shares, shares_of_happiness, total_shares) + (needed_shares, servers_of_happiness, total_shares) 'num_segments': return an int with the number of segments that will be encoded. diff --git a/src/allmydata/test/test_encode.py b/src/allmydata/test/test_encode.py index fb290a00..36a854ea 100644 --- a/src/allmydata/test/test_encode.py +++ b/src/allmydata/test/test_encode.py @@ -770,7 +770,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin): def test_lost_one_shareholder(self): # we have enough shareholders when we start, but one segment in we # lose one of them. The upload should still succeed, as long as we - # still have 'shares_of_happiness' peers left. + # still have 'servers_of_happiness' peers left. modemap = dict([(i, "good") for i in range(9)] + [(i, "lost") for i in range(9, 10)]) return self.send_and_recover((4,8,10), bucket_modes=modemap) @@ -778,7 +778,7 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin): def test_lost_one_shareholder_early(self): # we have enough shareholders when we choose peers, but just before # we send the 'start' message, we lose one of them. The upload should - # still succeed, as long as we still have 'shares_of_happiness' peers + # still succeed, as long as we still have 'servers_of_happiness' peers # left. modemap = dict([(i, "good") for i in range(9)] + [(i, "lost-early") for i in range(9, 10)]) -- 2.37.2