URI and a reference to the client (for peer selection and connection). The
methods of MutableFileNode are:
- * replace(newdata) -> OK, ConsistencyError, NotEnoughPeersError
- * get() -> [deferred] newdata, NotEnoughPeersError
+ * replace(newdata) -> OK, ConsistencyError, NotEnoughSharesError
+ * get() -> [deferred] newdata, NotEnoughSharesError
* if there are multiple retrieveable versions in the grid, get() returns
the first version it can reconstruct, and silently ignores the others.
In the future, a more advanced API will signal and provide access to
The methods of MutableFileNode are:
- * download_to_data() -> [deferred] newdata, NotEnoughPeersError
+ * download_to_data() -> [deferred] newdata, NotEnoughSharesError
* if there are multiple retrieveable versions in the grid, get() returns
the first version it can reconstruct, and silently ignores the others.
In the future, a more advanced API will signal and provide access to
the multiple heads.
- * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
- * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughPeersError
+ * update(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
+ * overwrite(newdata) -> OK, UncoordinatedWriteError, NotEnoughSharesError
download_to_data() causes a new retrieval to occur, pulling the current
contents from the grid and returning them to the caller. At the same time,
from allmydata import codec, hashtree, storage, uri
from allmydata.interfaces import IDownloadTarget, IDownloader, IFileURI, \
IDownloadStatus, IDownloadResults
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
from pycryptopp.cipher.aes import AES
class HaveAllPeersError(Exception):
return d
def _try(self):
- # fill our set of active buckets, maybe raising NotEnoughPeersError
+ # fill our set of active buckets, maybe raising NotEnoughSharesError
active_buckets = self.parent._activate_enough_buckets()
# Now we have enough buckets, in self.parent.active_buckets.
self._results.timings["peer_selection"] = now - self._started
if len(self._share_buckets) < self._num_needed_shares:
- raise NotEnoughPeersError
+ raise NotEnoughSharesError
#for s in self._share_vbuckets.values():
# for vb in s:
def _obtain_validated_thing(self, ignored, sources, name, methname, args,
validatorfunc):
if not sources:
- raise NotEnoughPeersError("started with zero peers while fetching "
+ raise NotEnoughSharesError("started with zero peers while fetching "
"%s" % name)
bucket = sources[0]
sources = sources[1:]
self.log("%s from vbucket %s failed:" % (name, bucket),
failure=f, level=log.WEIRD)
if not sources:
- raise NotEnoughPeersError("ran out of peers, last error was %s"
+ raise NotEnoughSharesError("ran out of peers, last error was %s"
% (f,))
# try again with a different one
return self._obtain_validated_thing(None, sources, name,
def _activate_enough_buckets(self):
"""either return a mapping from shnum to a ValidatedBucket that can
- provide data for that share, or raise NotEnoughPeersError"""
+ provide data for that share, or raise NotEnoughSharesError"""
while len(self.active_buckets) < self._num_needed_shares:
# need some more
available_shnums = set(self._share_vbuckets.keys())
potential_shnums = list(available_shnums - handled_shnums)
if not potential_shnums:
- raise NotEnoughPeersError
+ raise NotEnoughSharesError
# choose a random share
shnum = random.choice(potential_shnums)
# and a random bucket that will provide it
"""
-class NotEnoughPeersError(Exception):
+class NotEnoughSharesError(Exception):
worth_retrying = False
servermap = None
pass
level=log.WEIRD)
if len(self.landlords) < self.shares_of_happiness:
msg = "lost too many shareholders during upload: %s" % why
- raise NotEnoughPeersError(msg)
+ raise NotEnoughSharesError(msg)
self.log("but we can still continue with %s shares, we'll be happy "
"with at least %s" % (len(self.landlords),
self.shares_of_happiness),
def _gather_responses(self, dl):
d = defer.DeferredList(dl, fireOnOneErrback=True)
- def _eatNotEnoughPeersError(f):
+ def _eatNotEnoughSharesError(f):
# all exceptions that occur while talking to a peer are handled
- # in _remove_shareholder. That might raise NotEnoughPeersError,
+ # in _remove_shareholder. That might raise NotEnoughSharesError,
# which will cause the DeferredList to errback but which should
- # otherwise be consumed. Allow non-NotEnoughPeersError exceptions
+ # otherwise be consumed. Allow non-NotEnoughSharesError exceptions
# to pass through as an unhandled errback. We use this in lieu of
# consumeErrors=True to allow coding errors to be logged.
- f.trap(NotEnoughPeersError)
+ f.trap(NotEnoughSharesError)
return None
for d0 in dl:
- d0.addErrback(_eatNotEnoughPeersError)
+ d0.addErrback(_eatNotEnoughSharesError)
return d
def finish_hashing(self):
from allmydata.interfaces import IMutableFileNode, IMutableFileURI
from allmydata.util import hashutil
from allmydata.uri import WriteableSSKFileURI
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
from pycryptopp.publickey import rsa
from pycryptopp.cipher.aes import AES
d = self.obtain_lock()
d.addCallback(lambda res: self._update_and_retrieve_best())
def _maybe_retry(f):
- f.trap(NotEnoughPeersError)
+ f.trap(NotEnoughSharesError)
e = f.value
if not e.worth_retrying:
return f
from allmydata.interfaces import IRetrieveStatus
from allmydata.util import hashutil, idlib, log
from allmydata import hashtree, codec, storage
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
from pycryptopp.cipher.aes import AES
from common import DictOfSets, CorruptShareError, UncoordinatedWriteError
# There are some number of queries outstanding, each for a single
# share. If we can generate 'needed_shares' additional queries, we do
# so. If we can't, then we know this file is a goner, and we raise
- # NotEnoughPeersError.
+ # NotEnoughSharesError.
self.log(format=("_maybe_send_more_queries, have=%(have)d, k=%(k)d, "
"outstanding=%(outstanding)d"),
have=len(self.shares), k=k,
}
self.log(format=format,
level=log.WEIRD, **args)
- err = NotEnoughPeersError("%s, last failure: %s" %
+ err = NotEnoughSharesError("%s, last failure: %s" %
(format % args, self._last_failure))
if self._bad_shares:
self.log("We found some bad shares this pass. You should "
# NOTE: if uncoordinated writes are taking place, someone might
# change the share (and most probably move the encprivkey) before
# we get a chance to do one of these reads and fetch it. This
- # will cause us to see a NotEnoughPeersError(unable to fetch
+ # will cause us to see a NotEnoughSharesError(unable to fetch
# privkey) instead of an UncoordinatedWriteError . This is a
# nuisance, but it will go away when we move to DSA-based mutable
# files (since the privkey will be small enough to fit in the
from twisted.application import service
from allmydata import uri, dirnode
from allmydata.interfaces import IURI, IMutableFileNode, IFileNode
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
from allmydata.util import log
class FakeCHKFileNode:
def download(self, target):
if self.my_uri not in self.all_contents:
- f = failure.Failure(NotEnoughPeersError())
+ f = failure.Failure(NotEnoughSharesError())
target.fail(f)
return defer.fail(f)
data = self.all_contents[self.my_uri]
return defer.maybeDeferred(target.finish)
def download_to_data(self):
if self.my_uri not in self.all_contents:
- return defer.fail(NotEnoughPeersError())
+ return defer.fail(NotEnoughSharesError())
data = self.all_contents[self.my_uri]
return defer.succeed(data)
def get_size(self):
d = self.send_and_recover((4,8,10), AVAILABLE_SHARES=2)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError))
+ self.failUnless(res.check(download.NotEnoughSharesError))
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError))
+ self.failUnless(res.check(download.NotEnoughSharesError))
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError))
+ self.failUnless(res.check(download.NotEnoughSharesError))
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError))
+ self.failUnless(res.check(download.NotEnoughSharesError))
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError))
+ self.failUnless(res.check(download.NotEnoughSharesError))
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(encode.NotEnoughPeersError), res)
+ self.failUnless(res.check(encode.NotEnoughSharesError), res)
d.addBoth(_done)
return d
d = self.send_and_recover((4,8,10), bucket_modes=modemap)
def _done(res):
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(encode.NotEnoughPeersError))
+ self.failUnless(res.check(encode.NotEnoughSharesError))
d.addBoth(_done)
return d
from allmydata.util import base32
from allmydata.util.idlib import shortnodeid_b2a
from allmydata.util.hashutil import tagged_hash
-from allmydata.encode import NotEnoughPeersError
+from allmydata.encode import NotEnoughSharesError
from allmydata.interfaces import IURI, IMutableFileURI, IUploadable
from foolscap.eventual import eventually, fireEventually
from foolscap.logging import log
# TODO: when we publish to 20 peers, we should get one share per peer on 10
# when we publish to 3 peers, we should get either 3 or 4 shares per peer
- # when we publish to zero peers, we should get a NotEnoughPeersError
+ # when we publish to zero peers, we should get a NotEnoughSharesError
class Servermap(unittest.TestCase):
def setUp(self):
self.failUnlessEqual(new_contents, self.CONTENTS))
return d1
else:
- return self.shouldFail(NotEnoughPeersError,
+ return self.shouldFail(NotEnoughSharesError,
"_corrupt_all(offset=%s)" % (offset,),
substring,
r.download)
log.msg("finished downloading non-existend URI",
level=log.UNUSUAL, facility="tahoe.tests")
self.failUnless(isinstance(res, Failure))
- self.failUnless(res.check(download.NotEnoughPeersError),
- "expected NotEnoughPeersError, got %s" % res)
+ self.failUnless(res.check(download.NotEnoughSharesError),
+ "expected NotEnoughSharesError, got %s" % res)
# TODO: files that have zero peers should get a special kind
- # of NotEnoughPeersError, which can be used to suggest that
+ # of NotEnoughSharesError, which can be used to suggest that
# the URI might be wrong or that they've never uploaded the
# file in the first place.
d1.addBoth(_baduri_should_fail)
self.u.parent = self.node
def _should_fail(self, f):
- self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughPeersError), f)
+ self.failUnless(isinstance(f, Failure) and f.check(encode.NotEnoughSharesError), f)
def test_data_large(self):
data = DATA
base = "/uri/%s" % self._bad_file_uri
d = self.GET(base)
d.addBoth(self.shouldHTTPError, "test_GET_URI_URL_missing",
- http.GONE, response_substring="NotEnoughPeersError")
+ http.GONE, response_substring="NotEnoughSharesError")
# TODO: how can we exercise both sides of WebDownloadTarget.fail
# here? we must arrange for a download to fail after target.open()
# has been called, and then inspect the response to see that it is
peers = client.get_permuted_peers("storage", storage_index)
if not peers:
- raise encode.NotEnoughPeersError("client gave us zero peers")
+ raise encode.NotEnoughSharesError("client gave us zero peers")
# figure out how much space to ask for
if self.last_failure_msg:
msg += " (%s)" % (self.last_failure_msg,)
log.msg(msg, level=log.UNUSUAL, parent=self._log_parent)
- raise encode.NotEnoughPeersError(msg)
+ raise encode.NotEnoughSharesError(msg)
else:
# we placed enough to be happy, so we're done
if self._status: