From 1b4b4cbd4a035d9feeef663ff654854e75995bde Mon Sep 17 00:00:00 2001
From: Brian Warner <warner@allmydata.com>
Date: Tue, 15 Apr 2008 16:08:32 -0700
Subject: [PATCH] mutable WIP: rename NotEnoughPeersError to
 NotEnoughSharesError

---
 docs/mutable-DSA.txt               |  4 ++--
 docs/mutable.txt                   |  6 +++---
 src/allmydata/download.py          | 14 +++++++-------
 src/allmydata/encode.py            | 14 +++++++-------
 src/allmydata/mutable/node.py      |  4 ++--
 src/allmydata/mutable/retrieve.py  |  6 +++---
 src/allmydata/mutable/servermap.py |  2 +-
 src/allmydata/test/common.py       |  6 +++---
 src/allmydata/test/test_encode.py  | 14 +++++++-------
 src/allmydata/test/test_mutable.py |  6 +++---
 src/allmydata/test/test_system.py  |  6 +++---
 src/allmydata/test/test_upload.py  |  2 +-
 src/allmydata/test/test_web.py     |  2 +-
 src/allmydata/upload.py            |  4 ++--
 14 files changed, 45 insertions(+), 45 deletions(-)

diff --git a/docs/mutable-DSA.txt b/docs/mutable-DSA.txt
index 4875d874..b02815c1 100644
--- a/docs/mutable-DSA.txt
+++ b/docs/mutable-DSA.txt
@@ -530,8 +530,8 @@ MutableFileNode class will be created. Instances of this class will contain a
 URI and a reference to the client (for peer selection and connection). The
 methods of MutableFileNode are:
 
- * replace(newdata) -> OK, ConsistencyError, NotEnoughPeersError
- * get() -> [deferred] newdata, NotEnoughPeersError
+ * replace(newdata) -> OK, ConsistencyError, NotEnoughSharesError
+ * get() -> [deferred] newdata, NotEnoughSharesError
    * if there are multiple retrieveable versions in the grid, get() returns
      the first version it can reconstruct, and silently ignores the others.
      In the future, a more advanced API will signal and provide access to
diff --git a/docs/mutable.txt b/docs/mutable.txt
index e3eb1f2b..44910d94 100644
--- a/docs/mutable.txt
+++ b/docs/mutable.txt
@@ -359,13 +359,13 @@ a reference to the client (for peer selection and connection).
 
 The methods of MutableFileNode are:
 
- * download_to_data() -> [deferred] newdata, NotEnoughPeersError
+ * download_to_data() -> [deferred] newdata, NotEnoughSharesError
    * if there are multiple retrieveable versions in the grid, get() returns
      the first version it can reconstruct, and silently ignores the others.
      In the future, a more advanced API will signal and provide access to
      the multiple heads.
- * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
- * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
+ * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
+ * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
 
 download_to_data() causes a new retrieval to occur, pulling the current
 contents from the grid and returning them to the caller. At the same time,
diff --git a/src/allmydata/download.py b/src/allmydata/download.py
index 2d239522..12f94bc9 100644
--- a/src/allmydata/download.py
+++ b/src/allmydata/download.py
@@ -11,7 +11,7 @@ from allmydata.util.assertutil import _assert
 from allmydata import codec, hashtree, storage, uri
 from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
      IDownloadStatus, IDownloadResults
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.cipher.aes import AES
 
 class HaveAllPeersError(Exception):
@@ -332,7 +332,7 @@ class SegmentDownloader:
         return d
 
     def _try(self):
-        # fill our set of active buckets, maybe raising NotEnoughPeersError
+        # fill our set of active buckets, maybe raising NotEnoughSharesError
         active_buckets = self.parent._activate_enough_buckets()
         # Now we have enough buckets, in self.parent.active_buckets.
 
@@ -598,7 +598,7 @@ class FileDownloader:
             self._results.timings["peer_selection"] = now - self._started
 
         if len(self._share_buckets) < self._num_needed_shares:
-            raise NotEnoughPeersError
+            raise NotEnoughSharesError
 
         #for s in self._share_vbuckets.values():
         #    for vb in s:
@@ -637,7 +637,7 @@ class FileDownloader:
     def _obtain_validated_thing(self, ignored, sources, name, methname, args,
                                 validatorfunc):
         if not sources:
-            raise NotEnoughPeersError("started with zero peers while fetching "
+            raise NotEnoughSharesError("started with zero peers while fetching "
                                       "%s" % name)
         bucket = sources[0]
         sources = sources[1:]
@@ -649,7 +649,7 @@ class FileDownloader:
             self.log("%s from vbucket %s failed:" % (name, bucket),
                      failure=f, level=log.WEIRD)
             if not sources:
-                raise NotEnoughPeersError("ran out of peers, last error was %s"
+                raise NotEnoughSharesError("ran out of peers, last error was %s"
                                           % (f,))
             # try again with a different one
             return self._obtain_validated_thing(None, sources, name,
@@ -771,7 +771,7 @@ class FileDownloader:
 
     def _activate_enough_buckets(self):
         """either return a mapping from shnum to a ValidatedBucket that can
-        provide data for that share, or raise NotEnoughPeersError"""
+        provide data for that share, or raise NotEnoughSharesError"""
 
         while len(self.active_buckets) < self._num_needed_shares:
             # need some more
@@ -779,7 +779,7 @@ class FileDownloader:
             available_shnums = set(self._share_vbuckets.keys())
             potential_shnums = list(available_shnums - handled_shnums)
             if not potential_shnums:
-                raise NotEnoughPeersError
+                raise NotEnoughSharesError
             # choose a random share
             shnum = random.choice(potential_shnums)
             # and a random bucket that will provide it
diff --git a/src/allmydata/encode.py b/src/allmydata/encode.py
index 3480ce5f..1441b9c6 100644
--- a/src/allmydata/encode.py
+++ b/src/allmydata/encode.py
@@ -60,7 +60,7 @@ hash tree is put into the URI.
 
 """
 
-class NotEnoughPeersError(Exception):
+class NotEnoughSharesError(Exception):
     worth_retrying = False
     servermap = None
     pass
@@ -489,7 +489,7 @@ class Encoder(object):
                      level=log.WEIRD)
         if len(self.landlords) < self.shares_of_happiness:
             msg = "lost too many shareholders during upload: %s" % why
-            raise NotEnoughPeersError(msg)
+            raise NotEnoughSharesError(msg)
         self.log("but we can still continue with %s shares, we'll be happy "
                  "with at least %s" % (len(self.landlords),
                                        self.shares_of_happiness),
@@ -497,17 +497,17 @@ class Encoder(object):
 
     def _gather_responses(self, dl):
         d = defer.DeferredList(dl, fireOnOneErrback=True)
-        def _eatNotEnoughPeersError(f):
+        def _eatNotEnoughSharesError(f):
             # all exceptions that occur while talking to a peer are handled
-            # in _remove_shareholder. That might raise NotEnoughPeersError,
+            # in _remove_shareholder. That might raise NotEnoughSharesError,
             # which will cause the DeferredList to errback but which should
-            # otherwise be consumed. Allow non-NotEnoughPeersError exceptions
+            # otherwise be consumed. Allow non-NotEnoughSharesError exceptions
             # to pass through as an unhandled errback. We use this in lieu of
             # consumeErrors=True to allow coding errors to be logged.
-            f.trap(NotEnoughPeersError)
+            f.trap(NotEnoughSharesError)
             return None
         for d0 in dl:
-            d0.addErrback(_eatNotEnoughPeersError)
+            d0.addErrback(_eatNotEnoughSharesError)
         return d
 
     def finish_hashing(self):
diff --git a/src/allmydata/mutable/node.py b/src/allmydata/mutable/node.py
index 3ad40470..4c901ace 100644
--- a/src/allmydata/mutable/node.py
+++ b/src/allmydata/mutable/node.py
@@ -7,7 +7,7 @@ from twisted.internet import defer
 from allmydata.interfaces import IMutableFileNode, IMutableFileURI
 from allmydata.util import hashutil
 from allmydata.uri import WriteableSSKFileURI
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.publickey import rsa
 from pycryptopp.cipher.aes import AES
 
@@ -294,7 +294,7 @@ class MutableFileNode:
         d = self.obtain_lock()
         d.addCallback(lambda res: self._update_and_retrieve_best())
         def _maybe_retry(f):
-            f.trap(NotEnoughPeersError)
+            f.trap(NotEnoughSharesError)
             e = f.value
             if not e.worth_retrying:
                 return f
diff --git a/src/allmydata/mutable/retrieve.py b/src/allmydata/mutable/retrieve.py
index 9abc5430..b28203cb 100644
--- a/src/allmydata/mutable/retrieve.py
+++ b/src/allmydata/mutable/retrieve.py
@@ -8,7 +8,7 @@ from foolscap.eventual import eventually
 from allmydata.interfaces import IRetrieveStatus
 from allmydata.util import hashutil, idlib, log
 from allmydata import hashtree, codec, storage
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.cipher.aes import AES
 
 from common import DictOfSets, CorruptShareError, UncoordinatedWriteError
@@ -328,7 +328,7 @@ class Retrieve:
         # There are some number of queries outstanding, each for a single
         # share. If we can generate 'needed_shares' additional queries, we do
         # so. If we can't, then we know this file is a goner, and we raise
-        # NotEnoughPeersError.
+        # NotEnoughSharesError.
         self.log(format=("_maybe_send_more_queries, have=%(have)d, k=%(k)d, "
                          "outstanding=%(outstanding)d"),
                  have=len(self.shares), k=k,
@@ -387,7 +387,7 @@ class Retrieve:
                     }
             self.log(format=format,
                      level=log.WEIRD, **args)
-            err = NotEnoughPeersError("%s, last failure: %s" %
+            err = NotEnoughSharesError("%s, last failure: %s" %
                                       (format % args, self._last_failure))
             if self._bad_shares:
                 self.log("We found some bad shares this pass. You should "
diff --git a/src/allmydata/mutable/servermap.py b/src/allmydata/mutable/servermap.py
index 4f8fca2b..fb38a12f 100644
--- a/src/allmydata/mutable/servermap.py
+++ b/src/allmydata/mutable/servermap.py
@@ -485,7 +485,7 @@ class ServermapUpdater:
             # NOTE: if uncoordinated writes are taking place, someone might
             # change the share (and most probably move the encprivkey) before
             # we get a chance to do one of these reads and fetch it. This
-            # will cause us to see a NotEnoughPeersError(unable to fetch
+            # will cause us to see a NotEnoughSharesError(unable to fetch
             # privkey) instead of an UncoordinatedWriteError . This is a
             # nuisance, but it will go away when we move to DSA-based mutable
             # files (since the privkey will be small enough to fit in the
diff --git a/src/allmydata/test/common.py b/src/allmydata/test/common.py
index 3a52dc5d..27375c0b 100644
--- a/src/allmydata/test/common.py
+++ b/src/allmydata/test/common.py
@@ -6,7 +6,7 @@ from twisted.python import failure
 from twisted.application import service
 from allmydata import uri, dirnode
 from allmydata.interfaces import IURI, IMutableFileNode, IFileNode
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from allmydata.util import log
 
 class FakeCHKFileNode:
@@ -34,7 +34,7 @@ class FakeCHKFileNode:
 
     def download(self, target):
         if self.my_uri not in self.all_contents:
-            f = failure.Failure(NotEnoughPeersError())
+            f = failure.Failure(NotEnoughSharesError())
             target.fail(f)
             return defer.fail(f)
         data = self.all_contents[self.my_uri]
@@ -44,7 +44,7 @@ class FakeCHKFileNode:
         return defer.maybeDeferred(target.finish)
     def download_to_data(self):
         if self.my_uri not in self.all_contents:
-            return defer.fail(NotEnoughPeersError())
+            return defer.fail(NotEnoughSharesError())
         data = self.all_contents[self.my_uri]
         return defer.succeed(data)
     def get_size(self):
diff --git a/src/allmydata/test/test_encode.py b/src/allmydata/test/test_encode.py
index cf2ab20b..6a0711e3 100644
--- a/src/allmydata/test/test_encode.py
+++ b/src/allmydata/test/test_encode.py
@@ -408,7 +408,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -457,7 +457,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -480,7 +480,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -597,7 +597,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -620,7 +620,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -649,7 +649,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(encode.NotEnoughPeersError), res)
+            self.failUnless(res.check(encode.NotEnoughSharesError), res)
         d.addBoth(_done)
         return d
 
@@ -660,7 +660,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(encode.NotEnoughPeersError))
+            self.failUnless(res.check(encode.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
diff --git a/src/allmydata/test/test_mutable.py b/src/allmydata/test/test_mutable.py
index 03dca84c..c84fb96d 100644
--- a/src/allmydata/test/test_mutable.py
+++ b/src/allmydata/test/test_mutable.py
@@ -8,7 +8,7 @@ from allmydata import uri, download
 from allmydata.util import base32
 from allmydata.util.idlib import shortnodeid_b2a
 from allmydata.util.hashutil import tagged_hash
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from allmydata.interfaces import IURI, IMutableFileURI, IUploadable
 from foolscap.eventual import eventually, fireEventually
 from foolscap.logging import log
@@ -363,7 +363,7 @@ class MakeShares(unittest.TestCase):
 
     # TODO: when we publish to 20 peers, we should get one share per peer on 10
     # when we publish to 3 peers, we should get either 3 or 4 shares per peer
-    # when we publish to zero peers, we should get a NotEnoughPeersError
+    # when we publish to zero peers, we should get a NotEnoughSharesError
 
 class Servermap(unittest.TestCase):
     def setUp(self):
@@ -644,7 +644,7 @@ class Roundtrip(unittest.TestCase):
                                self.failUnlessEqual(new_contents, self.CONTENTS))
                 return d1
             else:
-                return self.shouldFail(NotEnoughPeersError,
+                return self.shouldFail(NotEnoughSharesError,
                                        "_corrupt_all(offset=%s)" % (offset,),
                                        substring,
                                        r.download)
diff --git a/src/allmydata/test/test_system.py b/src/allmydata/test/test_system.py
index c09d11da..565dfe6b 100644
--- a/src/allmydata/test/test_system.py
+++ b/src/allmydata/test/test_system.py
@@ -374,10 +374,10 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
                 log.msg("finished downloading non-existend URI",
                         level=log.UNUSUAL, facility="tahoe.tests")
                 self.failUnless(isinstance(res, Failure))
-                self.failUnless(res.check(download.NotEnoughPeersError),
-                                "expected NotEnoughPeersError, got %s" % res)
+                self.failUnless(res.check(download.NotEnoughSharesError),
+                                "expected NotEnoughSharesError, got %s" % res)
                 # TODO: files that have zero peers should get a special kind
-                # of NotEnoughPeersError, which can be used to suggest that
+                # of NotEnoughSharesError, which can be used to suggest that
                 # the URI might be wrong or that they've never uploaded the
                 # file in the first place.
             d1.addBoth(_baduri_should_fail)
diff --git a/src/allmydata/test/test_upload.py b/src/allmydata/test/test_upload.py
index e15731a3..798badb2 100644
--- a/src/allmydata/test/test_upload.py
+++ b/src/allmydata/test/test_upload.py
@@ -303,7 +303,7 @@ class FullServer(unittest.TestCase):
         self.u.parent = self.node
 
     def _should_fail(self, f):
-        self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError), f)
+        self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughSharesError), f)
 
     def test_data_large(self):
         data = DATA
diff --git a/src/allmydata/test/test_web.py b/src/allmydata/test/test_web.py
index 87764acd..cbe20864 100644
--- a/src/allmydata/test/test_web.py
+++ b/src/allmydata/test/test_web.py
@@ -1659,7 +1659,7 @@ class Web(WebMixin, unittest.TestCase):
         base = "/uri/%s" % self._bad_file_uri
         d = self.GET(base)
         d.addBoth(self.shouldHTTPError, "test_GET_URI_URL_missing",
-                  http.GONE, response_substring="NotEnoughPeersError")
+                  http.GONE, response_substring="NotEnoughSharesError")
         # TODO: how can we exercise both sides of WebDownloadTarget.fail
         # here? we must arrange for a download to fail after target.open()
         # has been called, and then inspect the response to see that it is
diff --git a/src/allmydata/upload.py b/src/allmydata/upload.py
index f52978ab..c99270c9 100644
--- a/src/allmydata/upload.py
+++ b/src/allmydata/upload.py
@@ -157,7 +157,7 @@ class Tahoe2PeerSelector:
 
         peers = client.get_permuted_peers("storage", storage_index)
         if not peers:
-            raise encode.NotEnoughPeersError("client gave us zero peers")
+            raise encode.NotEnoughSharesError("client gave us zero peers")
 
         # figure out how much space to ask for
 
@@ -269,7 +269,7 @@ class Tahoe2PeerSelector:
                 if self.last_failure_msg:
                     msg += " (%s)" % (self.last_failure_msg,)
                 log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
-                raise encode.NotEnoughPeersError(msg)
+                raise encode.NotEnoughSharesError(msg)
             else:
                 # we placed enough to be happy, so we're done
                 if self._status:
-- 
2.45.2