]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
mutable WIP: rename NotEnoughPeersError to NotEnoughSharesError
authorBrian Warner <warner@allmydata.com>
Tue, 15 Apr 2008 23:08:32 +0000 (16:08 -0700)
committerBrian Warner <warner@allmydata.com>
Tue, 15 Apr 2008 23:08:32 +0000 (16:08 -0700)
14 files changed:
docs/mutable-DSA.txt
docs/mutable.txt
src/allmydata/download.py
src/allmydata/encode.py
src/allmydata/mutable/node.py
src/allmydata/mutable/retrieve.py
src/allmydata/mutable/servermap.py
src/allmydata/test/common.py
src/allmydata/test/test_encode.py
src/allmydata/test/test_mutable.py
src/allmydata/test/test_system.py
src/allmydata/test/test_upload.py
src/allmydata/test/test_web.py
src/allmydata/upload.py

index 4875d8742e06bbb72f82785197030327b859b0f0..b02815c152fd94935421e26678c573d5d8ad356d 100644 (file)
@@ -530,8 +530,8 @@ MutableFileNode class will be created. Instances of this class will contain a
 URI and a reference to the client (for peer selection and connection). The
 methods of MutableFileNode are:
 
- * replace(newdata) -> OK, ConsistencyError, NotEnoughPeersError
- * get() -> [deferred] newdata, NotEnoughPeersError
+ * replace(newdata) -> OK, ConsistencyError, NotEnoughSharesError
+ * get() -> [deferred] newdata, NotEnoughSharesError
    * if there are multiple retrieveable versions in the grid, get() returns
      the first version it can reconstruct, and silently ignores the others.
      In the future, a more advanced API will signal and provide access to
index e3eb1f2bf4605f455a9c81163fdce8ebd4f805a1..44910d94fa7e94941993d1542e4cf90f8ad2c209 100644 (file)
@@ -359,13 +359,13 @@ a reference to the client (for peer selection and connection).
 
 The methods of MutableFileNode are:
 
- * download_to_data() -> [deferred] newdata, NotEnoughPeersError
+ * download_to_data() -> [deferred] newdata, NotEnoughSharesError
    * if there are multiple retrieveable versions in the grid, get() returns
      the first version it can reconstruct, and silently ignores the others.
      In the future, a more advanced API will signal and provide access to
      the multiple heads.
- * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
- * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
+ * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
+ * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
 
 download_to_data() causes a new retrieval to occur, pulling the current
 contents from the grid and returning them to the caller. At the same time,
index 2d239522b797ea139a33e81888cd931def4527a2..12f94bc9f419244f53b1f8cf588ac7f986bdbeb7 100644 (file)
@@ -11,7 +11,7 @@ from allmydata.util.assertutil import _assert
 from allmydata import codec, hashtree, storage, uri
 from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
      IDownloadStatus, IDownloadResults
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.cipher.aes import AES
 
 class HaveAllPeersError(Exception):
@@ -332,7 +332,7 @@ class SegmentDownloader:
         return d
 
     def _try(self):
-        # fill our set of active buckets, maybe raising NotEnoughPeersError
+        # fill our set of active buckets, maybe raising NotEnoughSharesError
         active_buckets = self.parent._activate_enough_buckets()
         # Now we have enough buckets, in self.parent.active_buckets.
 
@@ -598,7 +598,7 @@ class FileDownloader:
             self._results.timings["peer_selection"] = now - self._started
 
         if len(self._share_buckets) < self._num_needed_shares:
-            raise NotEnoughPeersError
+            raise NotEnoughSharesError
 
         #for s in self._share_vbuckets.values():
         #    for vb in s:
@@ -637,7 +637,7 @@ class FileDownloader:
     def _obtain_validated_thing(self, ignored, sources, name, methname, args,
                                 validatorfunc):
         if not sources:
-            raise NotEnoughPeersError("started with zero peers while fetching "
+            raise NotEnoughSharesError("started with zero peers while fetching "
                                       "%s" % name)
         bucket = sources[0]
         sources = sources[1:]
@@ -649,7 +649,7 @@ class FileDownloader:
             self.log("%s from vbucket %s failed:" % (name, bucket),
                      failure=f, level=log.WEIRD)
             if not sources:
-                raise NotEnoughPeersError("ran out of peers, last error was %s"
+                raise NotEnoughSharesError("ran out of peers, last error was %s"
                                           % (f,))
             # try again with a different one
             return self._obtain_validated_thing(None, sources, name,
@@ -771,7 +771,7 @@ class FileDownloader:
 
     def _activate_enough_buckets(self):
         """either return a mapping from shnum to a ValidatedBucket that can
-        provide data for that share, or raise NotEnoughPeersError"""
+        provide data for that share, or raise NotEnoughSharesError"""
 
         while len(self.active_buckets) < self._num_needed_shares:
             # need some more
@@ -779,7 +779,7 @@ class FileDownloader:
             available_shnums = set(self._share_vbuckets.keys())
             potential_shnums = list(available_shnums - handled_shnums)
             if not potential_shnums:
-                raise NotEnoughPeersError
+                raise NotEnoughSharesError
             # choose a random share
             shnum = random.choice(potential_shnums)
             # and a random bucket that will provide it
index 3480ce5fb8a458987ad54abfc3adc411ef6db0ec..1441b9c6b889c59a2a58e8d68159d23941ddd990 100644 (file)
@@ -60,7 +60,7 @@ hash tree is put into the URI.
 
 """
 
-class NotEnoughPeersError(Exception):
+class NotEnoughSharesError(Exception):
     worth_retrying = False
     servermap = None
     pass
@@ -489,7 +489,7 @@ class Encoder(object):
                      level=log.WEIRD)
         if len(self.landlords) < self.shares_of_happiness:
             msg = "lost too many shareholders during upload: %s" % why
-            raise NotEnoughPeersError(msg)
+            raise NotEnoughSharesError(msg)
         self.log("but we can still continue with %s shares, we'll be happy "
                  "with at least %s" % (len(self.landlords),
                                        self.shares_of_happiness),
@@ -497,17 +497,17 @@ class Encoder(object):
 
     def _gather_responses(self, dl):
         d = defer.DeferredList(dl, fireOnOneErrback=True)
-        def _eatNotEnoughPeersError(f):
+        def _eatNotEnoughSharesError(f):
             # all exceptions that occur while talking to a peer are handled
-            # in _remove_shareholder. That might raise NotEnoughPeersError,
+            # in _remove_shareholder. That might raise NotEnoughSharesError,
             # which will cause the DeferredList to errback but which should
-            # otherwise be consumed. Allow non-NotEnoughPeersError exceptions
+            # otherwise be consumed. Allow non-NotEnoughSharesError exceptions
             # to pass through as an unhandled errback. We use this in lieu of
             # consumeErrors=True to allow coding errors to be logged.
-            f.trap(NotEnoughPeersError)
+            f.trap(NotEnoughSharesError)
             return None
         for d0 in dl:
-            d0.addErrback(_eatNotEnoughPeersError)
+            d0.addErrback(_eatNotEnoughSharesError)
         return d
 
     def finish_hashing(self):
index 3ad40470d117a69e5eefd650853905da1c38ac94..4c901ace02ffbfa4e531fe672ebe841f4413870d 100644 (file)
@@ -7,7 +7,7 @@ from twisted.internet import defer
 from allmydata.interfaces import IMutableFileNode, IMutableFileURI
 from allmydata.util import hashutil
 from allmydata.uri import WriteableSSKFileURI
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.publickey import rsa
 from pycryptopp.cipher.aes import AES
 
@@ -294,7 +294,7 @@ class MutableFileNode:
         d = self.obtain_lock()
         d.addCallback(lambda res: self._update_and_retrieve_best())
         def _maybe_retry(f):
-            f.trap(NotEnoughPeersError)
+            f.trap(NotEnoughSharesError)
             e = f.value
             if not e.worth_retrying:
                 return f
index 9abc5430b48245b4aefa0d29ca318f0a2e35b14d..b28203cbb79afeaa33a6a764f8f17e2019e0f238 100644 (file)
@@ -8,7 +8,7 @@ from foolscap.eventual import eventually
 from allmydata.interfaces import IRetrieveStatus
 from allmydata.util import hashutil, idlib, log
 from allmydata import hashtree, codec, storage
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from pycryptopp.cipher.aes import AES
 
 from common import DictOfSets, CorruptShareError, UncoordinatedWriteError
@@ -328,7 +328,7 @@ class Retrieve:
         # There are some number of queries outstanding, each for a single
         # share. If we can generate 'needed_shares' additional queries, we do
         # so. If we can't, then we know this file is a goner, and we raise
-        # NotEnoughPeersError.
+        # NotEnoughSharesError.
         self.log(format=("_maybe_send_more_queries, have=%(have)d, k=%(k)d, "
                          "outstanding=%(outstanding)d"),
                  have=len(self.shares), k=k,
@@ -387,7 +387,7 @@ class Retrieve:
                     }
             self.log(format=format,
                      level=log.WEIRD, **args)
-            err = NotEnoughPeersError("%s, last failure: %s" %
+            err = NotEnoughSharesError("%s, last failure: %s" %
                                       (format % args, self._last_failure))
             if self._bad_shares:
                 self.log("We found some bad shares this pass. You should "
index 4f8fca2b97af657f14fb41f85eb3b702f9ea9260..fb38a12ff8dc04058faf4e6cf82947d75e904170 100644 (file)
@@ -485,7 +485,7 @@ class ServermapUpdater:
             # NOTE: if uncoordinated writes are taking place, someone might
             # change the share (and most probably move the encprivkey) before
             # we get a chance to do one of these reads and fetch it. This
-            # will cause us to see a NotEnoughPeersError(unable to fetch
+            # will cause us to see a NotEnoughSharesError(unable to fetch
             # privkey) instead of an UncoordinatedWriteError . This is a
             # nuisance, but it will go away when we move to DSA-based mutable
             # files (since the privkey will be small enough to fit in the
index 3a52dc5d4ef7bb0f13ad5ca19ed70f2fa63ef753..27375c0b75bb74ab12547349eb28782a92cf6aa9 100644 (file)
@@ -6,7 +6,7 @@ from twisted.python import failure
 from twisted.application import service
 from allmydata import uri, dirnode
 from allmydata.interfaces import IURI, IMutableFileNode, IFileNode
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from allmydata.util import log
 
 class FakeCHKFileNode:
@@ -34,7 +34,7 @@ class FakeCHKFileNode:
 
     def download(self, target):
         if self.my_uri not in self.all_contents:
-            f = failure.Failure(NotEnoughPeersError())
+            f = failure.Failure(NotEnoughSharesError())
             target.fail(f)
             return defer.fail(f)
         data = self.all_contents[self.my_uri]
@@ -44,7 +44,7 @@ class FakeCHKFileNode:
         return defer.maybeDeferred(target.finish)
     def download_to_data(self):
         if self.my_uri not in self.all_contents:
-            return defer.fail(NotEnoughPeersError())
+            return defer.fail(NotEnoughSharesError())
         data = self.all_contents[self.my_uri]
         return defer.succeed(data)
     def get_size(self):
index cf2ab20bc50cf779e353917aa4ab4bb9f0a3d78f..6a0711e3fee84d74eaf4371bec4359533d4b93e1 100644 (file)
@@ -408,7 +408,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -457,7 +457,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -480,7 +480,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -597,7 +597,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -620,7 +620,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(download.NotEnoughPeersError))
+            self.failUnless(res.check(download.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
@@ -649,7 +649,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(encode.NotEnoughPeersError), res)
+            self.failUnless(res.check(encode.NotEnoughSharesError), res)
         d.addBoth(_done)
         return d
 
@@ -660,7 +660,7 @@ class Roundtrip(unittest.TestCase):
         d = self.send_and_recover((4,8,10), bucket_modes=modemap)
         def _done(res):
             self.failUnless(isinstance(res, Failure))
-            self.failUnless(res.check(encode.NotEnoughPeersError))
+            self.failUnless(res.check(encode.NotEnoughSharesError))
         d.addBoth(_done)
         return d
 
index 03dca84cd0daf4a4d790530633a24444b03a76b3..c84fb96dbed2300ccc248ffae528e6dce8df3d9d 100644 (file)
@@ -8,7 +8,7 @@ from allmydata import uri, download
 from allmydata.util import base32
 from allmydata.util.idlib import shortnodeid_b2a
 from allmydata.util.hashutil import tagged_hash
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
 from allmydata.interfaces import IURI, IMutableFileURI, IUploadable
 from foolscap.eventual import eventually, fireEventually
 from foolscap.logging import log
@@ -363,7 +363,7 @@ class MakeShares(unittest.TestCase):
 
     # TODO: when we publish to 20 peers, we should get one share per peer on 10
     # when we publish to 3 peers, we should get either 3 or 4 shares per peer
-    # when we publish to zero peers, we should get a NotEnoughPeersError
+    # when we publish to zero peers, we should get a NotEnoughSharesError
 
 class Servermap(unittest.TestCase):
     def setUp(self):
@@ -644,7 +644,7 @@ class Roundtrip(unittest.TestCase):
                                self.failUnlessEqual(new_contents, self.CONTENTS))
                 return d1
             else:
-                return self.shouldFail(NotEnoughPeersError,
+                return self.shouldFail(NotEnoughSharesError,
                                        "_corrupt_all(offset=%s)" % (offset,),
                                        substring,
                                        r.download)
index c09d11da03c0e81a49b3227b7f90eda2336165ac..565dfe6b9f22a46d4b81eefb963e6e678e61a2f6 100644 (file)
@@ -374,10 +374,10 @@ class SystemTest(testutil.SignalMixin, testutil.PollMixin, unittest.TestCase):
                 log.msg("finished downloading non-existend URI",
                         level=log.UNUSUAL, facility="tahoe.tests")
                 self.failUnless(isinstance(res, Failure))
-                self.failUnless(res.check(download.NotEnoughPeersError),
-                                "expected NotEnoughPeersError, got %s" % res)
+                self.failUnless(res.check(download.NotEnoughSharesError),
+                                "expected NotEnoughSharesError, got %s" % res)
                 # TODO: files that have zero peers should get a special kind
-                # of NotEnoughPeersError, which can be used to suggest that
+                # of NotEnoughSharesError, which can be used to suggest that
                 # the URI might be wrong or that they've never uploaded the
                 # file in the first place.
             d1.addBoth(_baduri_should_fail)
index e15731a32e1df5f0ecdcddf358c56e33d991242d..798badb2cce90dbaee23ea4054ab66d0349e6772 100644 (file)
@@ -303,7 +303,7 @@ class FullServer(unittest.TestCase):
         self.u.parent = self.node
 
     def _should_fail(self, f):
-        self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError), f)
+        self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughSharesError), f)
 
     def test_data_large(self):
         data = DATA
index 87764acd3888abdaaad6dce9b8336e75e8c7600d..cbe20864961cc9e6674a79ba70a66c82bfadc4bd 100644 (file)
@@ -1659,7 +1659,7 @@ class Web(WebMixin, unittest.TestCase):
         base = "/uri/%s" % self._bad_file_uri
         d = self.GET(base)
         d.addBoth(self.shouldHTTPError, "test_GET_URI_URL_missing",
-                  http.GONE, response_substring="NotEnoughPeersError")
+                  http.GONE, response_substring="NotEnoughSharesError")
         # TODO: how can we exercise both sides of WebDownloadTarget.fail
         # here? we must arrange for a download to fail after target.open()
         # has been called, and then inspect the response to see that it is
index f52978ab72a495199568573205a37dae1c4c2c43..c99270c97141e72c9cdccca5135ab60f3bec9929 100644 (file)
@@ -157,7 +157,7 @@ class Tahoe2PeerSelector:
 
         peers = client.get_permuted_peers("storage", storage_index)
         if not peers:
-            raise encode.NotEnoughPeersError("client gave us zero peers")
+            raise encode.NotEnoughSharesError("client gave us zero peers")
 
         # figure out how much space to ask for
 
@@ -269,7 +269,7 @@ class Tahoe2PeerSelector:
                 if self.last_failure_msg:
                     msg += " (%s)" % (self.last_failure_msg,)
                 log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
-                raise encode.NotEnoughPeersError(msg)
+                raise encode.NotEnoughSharesError(msg)
             else:
                 # we placed enough to be happy, so we're done
                 if self._status: