from allmydata.immutable import encode
from allmydata.util import base32, dictutil, idlib, log, mathutil
from allmydata.util.happinessutil import servers_of_happiness, \
- shares_by_server, merge_peers, \
+ shares_by_server, merge_servers, \
failure_message
from allmydata.util.assertutil import precondition
from allmydata.util.rrefutil import add_version_to_remote_reference
from allmydata.interfaces import IUploadable, IUploader, IUploadResults, \
IEncryptedUploadable, RIEncryptedUploadable, IUploadStatus, \
- NoServersError, InsufficientVersionError, UploadUnhappinessError
+ NoServersError, InsufficientVersionError, UploadUnhappinessError, \
+ DEFAULT_MAX_SEGMENT_SIZE
from allmydata.immutable import layout
from pycryptopp.cipher.aes import AES
from cStringIO import StringIO
-KiB=1024
-MiB=1024*KiB
-GiB=1024*MiB
-TiB=1024*GiB
-PiB=1024*TiB
-
-class HaveAllPeersError(Exception):
- # we use this to jump out of the loop
- pass
-
# this wants to live in storage, not here
class TooFullError(Exception):
pass
def pretty_print_shnum_to_servers(s):
return ', '.join([ "sh%s: %s" % (k, '+'.join([idlib.shortnodeid_b2a(x) for x in v])) for k, v in s.iteritems() ])
-class PeerTracker:
- def __init__(self, peerid, storage_server,
+class ServerTracker:
+ def __init__(self, server,
sharesize, blocksize, num_segments, num_share_hashes,
storage_index,
bucket_renewal_secret, bucket_cancel_secret):
- precondition(isinstance(peerid, str), peerid)
- precondition(len(peerid) == 20, peerid)
- self.peerid = peerid
- self._storageserver = storage_server # to an RIStorageServer
+ self._server = server
self.buckets = {} # k: shareid, v: IRemoteBucketWriter
self.sharesize = sharesize
- wbp = layout.make_write_bucket_proxy(None, sharesize,
+ wbp = layout.make_write_bucket_proxy(None, None, sharesize,
blocksize, num_segments,
num_share_hashes,
- EXTENSION_SIZE, peerid)
+ EXTENSION_SIZE)
self.wbp_class = wbp.__class__ # to create more of them
self.allocated_size = wbp.get_allocated_size()
self.blocksize = blocksize
self.cancel_secret = bucket_cancel_secret
def __repr__(self):
- return ("<PeerTracker for peer %s and SI %s>"
- % (idlib.shortnodeid_b2a(self.peerid),
- si_b2a(self.storage_index)[:5]))
+ return ("<ServerTracker for server %s and SI %s>"
+ % (self._server.get_name(), si_b2a(self.storage_index)[:5]))
+
+ def get_serverid(self):
+ return self._server.get_serverid()
+ def get_name(self):
+ return self._server.get_name()
def query(self, sharenums):
- d = self._storageserver.callRemote("allocate_buckets",
- self.storage_index,
- self.renew_secret,
- self.cancel_secret,
- sharenums,
- self.allocated_size,
- canary=Referenceable())
+ rref = self._server.get_rref()
+ d = rref.callRemote("allocate_buckets",
+ self.storage_index,
+ self.renew_secret,
+ self.cancel_secret,
+ sharenums,
+ self.allocated_size,
+ canary=Referenceable())
d.addCallback(self._got_reply)
return d
def ask_about_existing_shares(self):
- return self._storageserver.callRemote("get_buckets",
- self.storage_index)
+ rref = self._server.get_rref()
+ return rref.callRemote("get_buckets", self.storage_index)
def _got_reply(self, (alreadygot, buckets)):
#log.msg("%s._got_reply(%s)" % (self, (alreadygot, buckets)))
b = {}
for sharenum, rref in buckets.iteritems():
- bp = self.wbp_class(rref, self.sharesize,
+ bp = self.wbp_class(rref, self._server, self.sharesize,
self.blocksize,
self.num_segments,
self.num_share_hashes,
- EXTENSION_SIZE,
- self.peerid)
+ EXTENSION_SIZE)
b[sharenum] = bp
self.buckets.update(b)
return (alreadygot, set(b.keys()))
def str_shareloc(shnum, bucketwriter):
- return "%s: %s" % (shnum, idlib.shortnodeid_b2a(bucketwriter._nodeid),)
+ return "%s: %s" % (shnum, bucketwriter.get_servername(),)
-class Tahoe2PeerSelector(log.PrefixingLogMixin):
+class Tahoe2ServerSelector(log.PrefixingLogMixin):
def __init__(self, upload_id, logparent=None, upload_status=None):
self.upload_id = upload_id
self.query_count, self.good_query_count, self.bad_query_count = 0,0,0
- # Peers that are working normally, but full.
+ # Servers that are working normally, but full.
self.full_count = 0
self.error_count = 0
- self.num_peers_contacted = 0
+ self.num_servers_contacted = 0
self.last_failure_msg = None
self._status = IUploadStatus(upload_status)
log.PrefixingLogMixin.__init__(self, 'tahoe.immutable.upload', logparent, prefix=upload_id)
self.log("starting", level=log.OPERATIONAL)
def __repr__(self):
- return "<Tahoe2PeerSelector for upload %s>" % self.upload_id
+ return "<Tahoe2ServerSelector for upload %s>" % self.upload_id
def get_shareholders(self, storage_broker, secret_holder,
storage_index, share_size, block_size,
num_segments, total_shares, needed_shares,
servers_of_happiness):
"""
- @return: (upload_servers, already_peers), where upload_servers is a set of
- PeerTracker instances that have agreed to hold some shares
- for us (the shareids are stashed inside the PeerTracker),
- and already_peers is a dict mapping shnum to a set of peers
- which claim to already have the share.
+ @return: (upload_trackers, already_serverids), where upload_trackers
+ is a set of ServerTracker instances that have agreed to hold
+ some shares for us (the shareids are stashed inside the
+ ServerTracker), and already_serverids is a dict mapping
+ shnum to a set of serverids for servers which claim to
+ already have the share.
"""
if self._status:
- self._status.set_status("Contacting Peers..")
+ self._status.set_status("Contacting Servers..")
self.total_shares = total_shares
self.servers_of_happiness = servers_of_happiness
self.needed_shares = needed_shares
self.homeless_shares = set(range(total_shares))
- self.contacted_peers = [] # peers worth asking again
- self.contacted_peers2 = [] # peers that we have asked again
- self._started_second_pass = False
- self.use_peers = set() # PeerTrackers that have shares assigned to them
- self.preexisting_shares = {} # shareid => set(peerids) holding shareid
- # We don't try to allocate shares to these servers, since they've said
- # that they're incapable of storing shares of the size that we'd want
- # to store. We keep them around because they may have existing shares
- # for this storage index, which we want to know about for accurate
- # servers_of_happiness accounting
- # (this is eventually a list, but it is initialized later)
- self.readonly_peers = None
- # These peers have shares -- any shares -- for our SI. We keep
+ self.use_trackers = set() # ServerTrackers that have shares assigned
+ # to them
+ self.preexisting_shares = {} # shareid => set(serverids) holding shareid
+
+ # These servers have shares -- any shares -- for our SI. We keep
# track of these to write an error message with them later.
- self.peers_with_shares = set()
+ self.serverids_with_shares = set()
# this needed_hashes computation should mirror
# Encoder.send_all_share_hash_trees. We use an IncompleteHashTree
num_share_hashes = len(ht.needed_hashes(0, include_leaf=True))
# figure out how much space to ask for
- wbp = layout.make_write_bucket_proxy(None, share_size, 0, num_segments,
- num_share_hashes, EXTENSION_SIZE,
- None)
+ wbp = layout.make_write_bucket_proxy(None, None,
+ share_size, 0, num_segments,
+ num_share_hashes, EXTENSION_SIZE)
allocated_size = wbp.get_allocated_size()
- all_peers = storage_broker.get_servers_for_index(storage_index)
- if not all_peers:
- raise NoServersError("client gave us zero peers")
+ all_servers = storage_broker.get_servers_for_psi(storage_index)
+ if not all_servers:
+ raise NoServersError("client gave us zero servers")
- # filter the list of peers according to which ones can accomodate
- # this request. This excludes older peers (which used a 4-byte size
+ # filter the list of servers according to which ones can accomodate
+ # this request. This excludes older servers (which used a 4-byte size
# field) from getting large shares (for files larger than about
# 12GiB). See #439 for details.
- def _get_maxsize(peer):
- (peerid, conn) = peer
- v1 = conn.version["http://allmydata.org/tahoe/protocols/storage/v1"]
+ def _get_maxsize(server):
+ v0 = server.get_rref().version
+ v1 = v0["http://allmydata.org/tahoe/protocols/storage/v1"]
return v1["maximum-immutable-share-size"]
- writable_peers = [peer for peer in all_peers
- if _get_maxsize(peer) >= allocated_size]
- readonly_peers = set(all_peers[:2*total_shares]) - set(writable_peers)
+ writeable_servers = [server for server in all_servers
+ if _get_maxsize(server) >= allocated_size]
+ readonly_servers = set(all_servers[:2*total_shares]) - set(writeable_servers)
# decide upon the renewal/cancel secrets, to include them in the
# allocate_buckets query.
storage_index)
file_cancel_secret = file_cancel_secret_hash(client_cancel_secret,
storage_index)
- def _make_trackers(peers):
- return [PeerTracker(peerid, conn,
- share_size, block_size,
- num_segments, num_share_hashes,
- storage_index,
- bucket_renewal_secret_hash(file_renewal_secret,
- peerid),
- bucket_cancel_secret_hash(file_cancel_secret,
- peerid))
- for (peerid, conn) in peers]
- self.uncontacted_peers = _make_trackers(writable_peers)
- self.readonly_peers = _make_trackers(readonly_peers)
- # We now ask peers that can't hold any new shares about existing
+ def _make_trackers(servers):
+ trackers = []
+ for s in servers:
+ seed = s.get_lease_seed()
+ renew = bucket_renewal_secret_hash(file_renewal_secret, seed)
+ cancel = bucket_cancel_secret_hash(file_cancel_secret, seed)
+ st = ServerTracker(s,
+ share_size, block_size,
+ num_segments, num_share_hashes,
+ storage_index,
+ renew, cancel)
+ trackers.append(st)
+ return trackers
+
+ # We assign each servers/trackers into one three lists. They all
+ # start in the "first pass" list. During the first pass, as we ask
+ # each one to hold a share, we move their tracker to the "second
+ # pass" list, until the first-pass list is empty. Then during the
+ # second pass, as we ask each to hold more shares, we move their
+ # tracker to the "next pass" list, until the second-pass list is
+ # empty. Then we move everybody from the next-pass list back to the
+ # second-pass list and repeat the "second" pass (really the third,
+ # fourth, etc pass), until all shares are assigned, or we've run out
+ # of potential servers.
+ self.first_pass_trackers = _make_trackers(writeable_servers)
+ self.second_pass_trackers = [] # servers worth asking again
+ self.next_pass_trackers = [] # servers that we have asked again
+ self._started_second_pass = False
+
+ # We don't try to allocate shares to these servers, since they've
+ # said that they're incapable of storing shares of the size that we'd
+ # want to store. We ask them about existing shares for this storage
+ # index, which we want to know about for accurate
+ # servers_of_happiness accounting, then we forget about them.
+ readonly_trackers = _make_trackers(readonly_servers)
+
+ # We now ask servers that can't hold any new shares about existing
# shares that they might have for our SI. Once this is done, we
# start placing the shares that we haven't already accounted
# for.
ds = []
- if self._status and self.readonly_peers:
- self._status.set_status("Contacting readonly peers to find "
+ if self._status and readonly_trackers:
+ self._status.set_status("Contacting readonly servers to find "
"any existing shares")
- for peer in self.readonly_peers:
- assert isinstance(peer, PeerTracker)
- d = peer.ask_about_existing_shares()
- d.addBoth(self._handle_existing_response, peer.peerid)
+ for tracker in readonly_trackers:
+ assert isinstance(tracker, ServerTracker)
+ d = tracker.ask_about_existing_shares()
+ d.addBoth(self._handle_existing_response, tracker)
ds.append(d)
- self.num_peers_contacted += 1
+ self.num_servers_contacted += 1
self.query_count += 1
- self.log("asking peer %s for any existing shares" %
- (idlib.shortnodeid_b2a(peer.peerid),),
- level=log.NOISY)
+ self.log("asking server %s for any existing shares" %
+ (tracker.get_name(),), level=log.NOISY)
dl = defer.DeferredList(ds)
dl.addCallback(lambda ign: self._loop())
return dl
- def _handle_existing_response(self, res, peer):
+ def _handle_existing_response(self, res, tracker):
"""
I handle responses to the queries sent by
- Tahoe2PeerSelector._existing_shares.
+ Tahoe2ServerSelector._existing_shares.
"""
+ serverid = tracker.get_serverid()
if isinstance(res, failure.Failure):
self.log("%s got error during existing shares check: %s"
- % (idlib.shortnodeid_b2a(peer), res),
- level=log.UNUSUAL)
+ % (tracker.get_name(), res), level=log.UNUSUAL)
self.error_count += 1
self.bad_query_count += 1
else:
buckets = res
if buckets:
- self.peers_with_shares.add(peer)
- self.log("response to get_buckets() from peer %s: alreadygot=%s"
- % (idlib.shortnodeid_b2a(peer), tuple(sorted(buckets))),
+ self.serverids_with_shares.add(serverid)
+ self.log("response to get_buckets() from server %s: alreadygot=%s"
+ % (tracker.get_name(), tuple(sorted(buckets))),
level=log.NOISY)
for bucket in buckets:
- self.preexisting_shares.setdefault(bucket, set()).add(peer)
+ self.preexisting_shares.setdefault(bucket, set()).add(serverid)
self.homeless_shares.discard(bucket)
self.full_count += 1
self.bad_query_count += 1
len(self.homeless_shares)))
return (msg + "want to place shares on at least %d servers such that "
"any %d of them have enough shares to recover the file, "
- "sent %d queries to %d peers, "
+ "sent %d queries to %d servers, "
"%d queries placed some shares, %d placed none "
"(of which %d placed none due to the server being"
" full and %d placed none due to an error)" %
(self.servers_of_happiness, self.needed_shares,
- self.query_count, self.num_peers_contacted,
+ self.query_count, self.num_servers_contacted,
self.good_query_count, self.bad_query_count,
self.full_count, self.error_count))
def _loop(self):
if not self.homeless_shares:
- merged = merge_peers(self.preexisting_shares, self.use_peers)
+ merged = merge_servers(self.preexisting_shares, self.use_trackers)
effective_happiness = servers_of_happiness(merged)
if self.servers_of_happiness <= effective_happiness:
msg = ("server selection successful for %s: %s: pretty_print_merged: %s, "
- "self.use_peers: %s, self.preexisting_shares: %s") \
- % (self, self._get_progress_message(),
- pretty_print_shnum_to_servers(merged),
- [', '.join([str_shareloc(k,v) for k,v in p.buckets.iteritems()])
- for p in self.use_peers],
- pretty_print_shnum_to_servers(self.preexisting_shares))
+ "self.use_trackers: %s, self.preexisting_shares: %s") \
+ % (self, self._get_progress_message(),
+ pretty_print_shnum_to_servers(merged),
+ [', '.join([str_shareloc(k,v)
+ for k,v in st.buckets.iteritems()])
+ for st in self.use_trackers],
+ pretty_print_shnum_to_servers(self.preexisting_shares))
self.log(msg, level=log.OPERATIONAL)
- return (self.use_peers, self.preexisting_shares)
+ return (self.use_trackers, self.preexisting_shares)
else:
# We're not okay right now, but maybe we can fix it by
# redistributing some shares. In cases where one or two
# servers has, before the upload, all or most of the
# shares for a given SI, this can work by allowing _loop
- # a chance to spread those out over the other peers,
+ # a chance to spread those out over the other servers,
delta = self.servers_of_happiness - effective_happiness
shares = shares_by_server(self.preexisting_shares)
# Each server in shares maps to a set of shares stored on it.
shares_to_spread = sum([len(list(sharelist)) - 1
for (server, sharelist)
in shares.items()])
- if delta <= len(self.uncontacted_peers) and \
+ if delta <= len(self.first_pass_trackers) and \
shares_to_spread >= delta:
items = shares.items()
while len(self.homeless_shares) < delta:
if not self.preexisting_shares[share]:
del self.preexisting_shares[share]
items.append((server, sharelist))
- for writer in self.use_peers:
+ for writer in self.use_trackers:
writer.abort_some_buckets(self.homeless_shares)
return self._loop()
else:
# Redistribution won't help us; fail.
- peer_count = len(self.peers_with_shares)
- failmsg = failure_message(peer_count,
- self.needed_shares,
- self.servers_of_happiness,
- effective_happiness)
+ server_count = len(self.serverids_with_shares)
+ failmsg = failure_message(server_count,
+ self.needed_shares,
+ self.servers_of_happiness,
+ effective_happiness)
servmsgtempl = "server selection unsuccessful for %r: %s (%s), merged=%s"
servmsg = servmsgtempl % (
self,
self.log(servmsg, level=log.INFREQUENT)
return self._failed("%s (%s)" % (failmsg, self._get_progress_message()))
- if self.uncontacted_peers:
- peer = self.uncontacted_peers.pop(0)
- # TODO: don't pre-convert all peerids to PeerTrackers
- assert isinstance(peer, PeerTracker)
+ if self.first_pass_trackers:
+ tracker = self.first_pass_trackers.pop(0)
+ # TODO: don't pre-convert all serverids to ServerTrackers
+ assert isinstance(tracker, ServerTracker)
shares_to_ask = set(sorted(self.homeless_shares)[:1])
self.homeless_shares -= shares_to_ask
self.query_count += 1
- self.num_peers_contacted += 1
+ self.num_servers_contacted += 1
if self._status:
- self._status.set_status("Contacting Peers [%s] (first query),"
+ self._status.set_status("Contacting Servers [%s] (first query),"
" %d shares left.."
- % (idlib.shortnodeid_b2a(peer.peerid),
+ % (tracker.get_name(),
len(self.homeless_shares)))
- d = peer.query(shares_to_ask)
- d.addBoth(self._got_response, peer, shares_to_ask,
- self.contacted_peers)
+ d = tracker.query(shares_to_ask)
+ d.addBoth(self._got_response, tracker, shares_to_ask,
+ self.second_pass_trackers)
return d
- elif self.contacted_peers:
- # ask a peer that we've already asked.
+ elif self.second_pass_trackers:
+ # ask a server that we've already asked.
if not self._started_second_pass:
self.log("starting second pass",
level=log.NOISY)
self._started_second_pass = True
num_shares = mathutil.div_ceil(len(self.homeless_shares),
- len(self.contacted_peers))
- peer = self.contacted_peers.pop(0)
+ len(self.second_pass_trackers))
+ tracker = self.second_pass_trackers.pop(0)
shares_to_ask = set(sorted(self.homeless_shares)[:num_shares])
self.homeless_shares -= shares_to_ask
self.query_count += 1
if self._status:
- self._status.set_status("Contacting Peers [%s] (second query),"
+ self._status.set_status("Contacting Servers [%s] (second query),"
" %d shares left.."
- % (idlib.shortnodeid_b2a(peer.peerid),
+ % (tracker.get_name(),
len(self.homeless_shares)))
- d = peer.query(shares_to_ask)
- d.addBoth(self._got_response, peer, shares_to_ask,
- self.contacted_peers2)
+ d = tracker.query(shares_to_ask)
+ d.addBoth(self._got_response, tracker, shares_to_ask,
+ self.next_pass_trackers)
return d
- elif self.contacted_peers2:
+ elif self.next_pass_trackers:
# we've finished the second-or-later pass. Move all the remaining
- # peers back into self.contacted_peers for the next pass.
- self.contacted_peers.extend(self.contacted_peers2)
- self.contacted_peers2[:] = []
+ # servers back into self.second_pass_trackers for the next pass.
+ self.second_pass_trackers.extend(self.next_pass_trackers)
+ self.next_pass_trackers[:] = []
return self._loop()
else:
- # no more peers. If we haven't placed enough shares, we fail.
- merged = merge_peers(self.preexisting_shares, self.use_peers)
+ # no more servers. If we haven't placed enough shares, we fail.
+ merged = merge_servers(self.preexisting_shares, self.use_trackers)
effective_happiness = servers_of_happiness(merged)
if effective_happiness < self.servers_of_happiness:
- msg = failure_message(len(self.peers_with_shares),
+ msg = failure_message(len(self.serverids_with_shares),
self.needed_shares,
self.servers_of_happiness,
effective_happiness)
- msg = ("peer selection failed for %s: %s (%s)" % (self,
- msg,
- self._get_progress_message()))
+ msg = ("server selection failed for %s: %s (%s)" %
+ (self, msg, self._get_progress_message()))
if self.last_failure_msg:
msg += " (%s)" % (self.last_failure_msg,)
self.log(msg, level=log.UNUSUAL)
msg = ("server selection successful (no more servers) for %s: %s: %s" % (self,
self._get_progress_message(), pretty_print_shnum_to_servers(merged)))
self.log(msg, level=log.OPERATIONAL)
- return (self.use_peers, self.preexisting_shares)
+ return (self.use_trackers, self.preexisting_shares)
- def _got_response(self, res, peer, shares_to_ask, put_peer_here):
+ def _got_response(self, res, tracker, shares_to_ask, put_tracker_here):
if isinstance(res, failure.Failure):
# This is unusual, and probably indicates a bug or a network
# problem.
- self.log("%s got error during peer selection: %s" % (peer, res),
+ self.log("%s got error during server selection: %s" % (tracker, res),
level=log.UNUSUAL)
self.error_count += 1
self.bad_query_count += 1
self.homeless_shares |= shares_to_ask
- if (self.uncontacted_peers
- or self.contacted_peers
- or self.contacted_peers2):
+ if (self.first_pass_trackers
+ or self.second_pass_trackers
+ or self.next_pass_trackers):
# there is still hope, so just loop
pass
else:
- # No more peers, so this upload might fail (it depends upon
+ # No more servers, so this upload might fail (it depends upon
# whether we've hit servers_of_happiness or not). Log the last
- # failure we got: if a coding error causes all peers to fail
+ # failure we got: if a coding error causes all servers to fail
# in the same way, this allows the common failure to be seen
# by the uploader and should help with debugging
- msg = ("last failure (from %s) was: %s" % (peer, res))
+ msg = ("last failure (from %s) was: %s" % (tracker, res))
self.last_failure_msg = msg
else:
(alreadygot, allocated) = res
- self.log("response to allocate_buckets() from peer %s: alreadygot=%s, allocated=%s"
- % (idlib.shortnodeid_b2a(peer.peerid),
+ self.log("response to allocate_buckets() from server %s: alreadygot=%s, allocated=%s"
+ % (tracker.get_name(),
tuple(sorted(alreadygot)), tuple(sorted(allocated))),
level=log.NOISY)
progress = False
for s in alreadygot:
- self.preexisting_shares.setdefault(s, set()).add(peer.peerid)
+ self.preexisting_shares.setdefault(s, set()).add(tracker.get_serverid())
if s in self.homeless_shares:
self.homeless_shares.remove(s)
progress = True
elif s in shares_to_ask:
progress = True
- # the PeerTracker will remember which shares were allocated on
+ # the ServerTracker will remember which shares were allocated on
# that peer. We just have to remember to use them.
if allocated:
- self.use_peers.add(peer)
+ self.use_trackers.add(tracker)
progress = True
if allocated or alreadygot:
- self.peers_with_shares.add(peer.peerid)
+ self.serverids_with_shares.add(tracker.get_serverid())
not_yet_present = set(shares_to_ask) - set(alreadygot)
still_homeless = not_yet_present - set(allocated)
if still_homeless:
# In networks with lots of space, this is very unusual and
- # probably indicates an error. In networks with peers that
+ # probably indicates an error. In networks with servers that
# are full, it is merely unusual. In networks that are very
# full, it is common, and many uploads will fail. In most
# cases, this is obviously not fatal, and we'll just use some
- # other peers.
+ # other servers.
# some shares are still homeless, keep trying to find them a
# home. The ones that were rejected get first priority.
else:
# if they *were* able to accept everything, they might be
# willing to accept even more.
- put_peer_here.append(peer)
+ put_tracker_here.append(tracker)
# now loop
return self._loop()
def _failed(self, msg):
"""
- I am called when peer selection fails. I first abort all of the
+ I am called when server selection fails. I first abort all of the
remote buckets that I allocated during my unsuccessful attempt to
place shares for this file. I then raise an
UploadUnhappinessError with my msg argument.
"""
- for peer in self.use_peers:
- assert isinstance(peer, PeerTracker)
-
- peer.abort()
-
+ for tracker in self.use_trackers:
+ assert isinstance(tracker, ServerTracker)
+ tracker.abort()
raise UploadUnhappinessError(msg)
self.results = value
class CHKUploader:
- peer_selector_class = Tahoe2PeerSelector
+ server_selector_class = Tahoe2ServerSelector
def __init__(self, storage_broker, secret_holder):
- # peer_selector needs storage_broker and secret_holder
+ # server_selector needs storage_broker and secret_holder
self._storage_broker = storage_broker
self._secret_holder = secret_holder
self._log_number = self.log("CHKUploader starting", parent=None)
self._upload_status.set_results(self._results)
# locate_all_shareholders() will create the following attribute:
- # self._peer_trackers = {} # k: shnum, v: instance of PeerTracker
+ # self._server_trackers = {} # k: shnum, v: instance of ServerTracker
def log(self, *args, **kwargs):
if "parent" not in kwargs:
return d
def locate_all_shareholders(self, encoder, started):
- peer_selection_started = now = time.time()
+ server_selection_started = now = time.time()
self._storage_index_elapsed = now - started
storage_broker = self._storage_broker
secret_holder = self._secret_holder
self._storage_index = storage_index
upload_id = si_b2a(storage_index)[:5]
self.log("using storage index %s" % upload_id)
- peer_selector = self.peer_selector_class(upload_id, self._log_number,
- self._upload_status)
+ server_selector = self.server_selector_class(upload_id,
+ self._log_number,
+ self._upload_status)
share_size = encoder.get_param("share_size")
block_size = encoder.get_param("block_size")
num_segments = encoder.get_param("num_segments")
k,desired,n = encoder.get_param("share_counts")
- self._peer_selection_started = time.time()
- d = peer_selector.get_shareholders(storage_broker, secret_holder,
- storage_index,
- share_size, block_size,
- num_segments, n, k, desired)
+ self._server_selection_started = time.time()
+ d = server_selector.get_shareholders(storage_broker, secret_holder,
+ storage_index,
+ share_size, block_size,
+ num_segments, n, k, desired)
def _done(res):
- self._peer_selection_elapsed = time.time() - peer_selection_started
+ self._server_selection_elapsed = time.time() - server_selection_started
return res
d.addCallback(_done)
return d
- def set_shareholders(self, (upload_servers, already_peers), encoder):
+ def set_shareholders(self, (upload_trackers, already_serverids), encoder):
"""
- @param upload_servers: a sequence of PeerTracker objects that have agreed to hold some
- shares for us (the shareids are stashed inside the PeerTracker)
- @paran already_peers: a dict mapping sharenum to a set of peerids
- that claim to already have this share
+ @param upload_trackers: a sequence of ServerTracker objects that
+ have agreed to hold some shares for us (the
+ shareids are stashed inside the ServerTracker)
+
+ @paran already_serverids: a dict mapping sharenum to a set of
+ serverids for servers that claim to already
+ have this share
"""
- msgtempl = "set_shareholders; upload_servers is %s, already_peers is %s"
- values = ([', '.join([str_shareloc(k,v) for k,v in p.buckets.iteritems()])
- for p in upload_servers], already_peers)
+ msgtempl = "set_shareholders; upload_trackers is %s, already_serverids is %s"
+ values = ([', '.join([str_shareloc(k,v)
+ for k,v in st.buckets.iteritems()])
+ for st in upload_trackers], already_serverids)
self.log(msgtempl % values, level=log.OPERATIONAL)
# record already-present shares in self._results
- self._results.preexisting_shares = len(already_peers)
+ self._results.preexisting_shares = len(already_serverids)
- self._peer_trackers = {} # k: shnum, v: instance of PeerTracker
- for peer in upload_servers:
- assert isinstance(peer, PeerTracker)
+ self._server_trackers = {} # k: shnum, v: instance of ServerTracker
+ for tracker in upload_trackers:
+ assert isinstance(tracker, ServerTracker)
buckets = {}
- servermap = already_peers.copy()
- for peer in upload_servers:
- buckets.update(peer.buckets)
- for shnum in peer.buckets:
- self._peer_trackers[shnum] = peer
- servermap.setdefault(shnum, set()).add(peer.peerid)
- assert len(buckets) == sum([len(peer.buckets) for peer in upload_servers]), \
+ servermap = already_serverids.copy()
+ for tracker in upload_trackers:
+ buckets.update(tracker.buckets)
+ for shnum in tracker.buckets:
+ self._server_trackers[shnum] = tracker
+ servermap.setdefault(shnum, set()).add(tracker.get_serverid())
+ assert len(buckets) == sum([len(tracker.buckets)
+ for tracker in upload_trackers]), \
"%s (%s) != %s (%s)" % (
len(buckets),
buckets,
- sum([len(peer.buckets) for peer in upload_servers]),
- [(p.buckets, p.peerid) for p in upload_servers]
+ sum([len(tracker.buckets) for tracker in upload_trackers]),
+ [(t.buckets, t.get_serverid()) for t in upload_trackers]
)
encoder.set_shareholders(buckets, servermap)
""" Returns a Deferred that will fire with the UploadResults instance. """
r = self._results
for shnum in self._encoder.get_shares_placed():
- peer_tracker = self._peer_trackers[shnum]
- peerid = peer_tracker.peerid
- r.sharemap.add(shnum, peerid)
- r.servermap.add(peerid, shnum)
+ server_tracker = self._server_trackers[shnum]
+ serverid = server_tracker.get_serverid()
+ r.sharemap.add(shnum, serverid)
+ r.servermap.add(serverid, shnum)
r.pushed_shares = len(self._encoder.get_shares_placed())
now = time.time()
r.file_size = self._encoder.file_size
r.timings["total"] = now - self._started
r.timings["storage_index"] = self._storage_index_elapsed
- r.timings["peer_selection"] = self._peer_selection_elapsed
+ r.timings["peer_selection"] = self._server_selection_elapsed
r.timings.update(self._encoder.get_times())
r.uri_extension_data = self._encoder.get_uri_extension_data()
r.verifycapstr = verifycap.to_string()
return self._upload_status
class BaseUploadable:
- default_max_segment_size = 128*KiB # overridden by max_segment_size
+ # this is overridden by max_segment_size
+ default_max_segment_size = DEFAULT_MAX_SEGMENT_SIZE
default_encoding_param_k = 3 # overridden by encoding_parameters
default_encoding_param_happy = 7
default_encoding_param_n = 10
name = "uploader"
URI_LIT_SIZE_THRESHOLD = 55
- def __init__(self, helper_furl=None, stats_provider=None):
+ def __init__(self, helper_furl=None, stats_provider=None, history=None):
self._helper_furl = helper_furl
self.stats_provider = stats_provider
+ self._history = history
self._helper = None
self._all_uploads = weakref.WeakKeyDictionary() # for debugging
log.PrefixingLogMixin.__init__(self, facility="tahoe.immutable.upload")
return (self._helper_furl, bool(self._helper))
- def upload(self, uploadable, history=None):
+ def upload(self, uploadable):
"""
Returns a Deferred that will fire with the UploadResults instance.
"""
d2.addCallback(lambda x: uploader.start(eu))
self._all_uploads[uploader] = None
- if history:
- history.add_upload(uploader.get_upload_status())
+ if self._history:
+ self._history.add_upload(uploader.get_upload_status())
def turn_verifycap_into_read_cap(uploadresults):
# Generate the uri from the verifycap plus the key.
d3 = uploadable.get_encryption_key()