+# -*- coding: utf-8 -*-
import os, shutil
from cStringIO import StringIO
from twisted.trial import unittest
from twisted.python.failure import Failure
-from twisted.python import log
from twisted.internet import defer
from foolscap.api import fireEventually
from allmydata import uri, monitor, client
from allmydata.immutable import upload, encode
from allmydata.interfaces import FileTooLargeError, UploadUnhappinessError
+from allmydata.util import log, base32
from allmydata.util.assertutil import precondition
from allmydata.util.deferredutil import DeferredListShouldSucceed
+from allmydata.test.no_network import GridTestMixin
+from allmydata.test.common_util import ShouldFailMixin
from allmydata.util.happinessutil import servers_of_happiness, \
- shares_by_server, merge_peers
-from no_network import GridTestMixin
-from common_util import ShouldFailMixin
+ shares_by_server, merge_servers
from allmydata.storage_client import StorageFarmBroker
from allmydata.storage.server import storage_index_to_dir
+from allmydata.client import Client
MiB = 1024*1024
def extract_uri(results):
- return results.uri
+ return results.get_uri()
# Some of these took longer than 480 seconds on Zandr's arm box, but this may
# have been due to an earlier test ERROR'ing out due to timeout, which seems
"n": n,
"max_segment_size": max_segsize,
}
- self.node.DEFAULT_ENCODING_PARAMETERS = p
+ self.node.encoding_params = p
class FakeStorageServer:
def __init__(self, mode):
self.allocated = []
self.queries = 0
self.version = { "http://allmydata.org/tahoe/protocols/storage/v1" :
- { "maximum-immutable-share-size": 2**32 },
+ { "maximum-immutable-share-size": 2**32 - 1 },
"application-version": str(allmydata.__full_version__),
}
if mode == "small":
d.addCallback(lambda res: _call())
return d
+
+ def callRemoteOnly(self, methname, *args, **kwargs):
+ d = self.callRemote(methname, *args, **kwargs)
+ del d # callRemoteOnly ignores this
+ return None
+
+
def remote_write(self, offset, data):
precondition(not self.closed)
precondition(offset >= 0)
self.closed = True
def remote_abort(self):
- log.err(RuntimeError("uh oh, I was asked to abort"))
+ pass
class FakeClient:
DEFAULT_ENCODING_PARAMETERS = {"k":25,
"n": 100,
"max_segment_size": 1*MiB,
}
+
def __init__(self, mode="good", num_servers=50):
self.num_servers = num_servers
+ self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
if type(mode) is str:
mode = dict([i,mode] for i in range(num_servers))
- peers = [ ("%20d"%fakeid, FakeStorageServer(mode[fakeid]))
- for fakeid in range(self.num_servers) ]
- self.storage_broker = StorageFarmBroker(None, permute_peers=True)
- for (serverid, server) in peers:
- self.storage_broker.test_add_server(serverid, server)
- self.last_peers = [p[1] for p in peers]
+ servers = [ ("%20d"%fakeid, FakeStorageServer(mode[fakeid]))
+ for fakeid in range(self.num_servers) ]
+ self.storage_broker = StorageFarmBroker(None, True, 0, None)
+ for (serverid, rref) in servers:
+ ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(serverid),
+ "permutation-seed-base32": base32.b2a(serverid) }
+ self.storage_broker.test_add_rref(serverid, rref, ann)
+ self.last_servers = [s[1] for s in servers]
def log(self, *args, **kwargs):
pass
def get_encoding_parameters(self):
- return self.DEFAULT_ENCODING_PARAMETERS
+ return self.encoding_params
def get_storage_broker(self):
return self.storage_broker
_secret_holder = client.SecretHolder("lease secret", "convergence secret")
def test_first_error_all(self):
self.make_node("first-fail")
d = self.shouldFail(UploadUnhappinessError, "first_error_all",
- "peer selection failed",
+ "server selection failed",
upload_data, self.u, DATA)
def _check((f,)):
self.failUnlessIn("placed 0 shares out of 100 total", str(f.value))
def test_second_error_all(self):
self.make_node("second-fail")
d = self.shouldFail(UploadUnhappinessError, "second_error_all",
- "peer selection failed",
+ "server selection failed",
upload_data, self.u, DATA)
def _check((f,)):
self.failUnlessIn("placed 10 shares out of 100 total", str(f.value))
d.addBoth(self._should_fail)
return d
-class PeerSelection(unittest.TestCase):
+class ServerSelection(unittest.TestCase):
def make_client(self, num_servers=50):
self.node = FakeClient(mode="good", num_servers=num_servers)
"n": n,
"max_segment_size": max_segsize,
}
- self.node.DEFAULT_ENCODING_PARAMETERS = p
+ self.node.encoding_params = p
def test_one_each(self):
- # if we have 50 shares, and there are 50 peers, and they all accept a
- # share, we should get exactly one share per peer
+ # if we have 50 shares, and there are 50 servers, and they all accept
+ # a share, we should get exactly one share per server
self.make_client()
data = self.get_data(SIZE_LARGE)
d.addCallback(extract_uri)
d.addCallback(self._check_large, SIZE_LARGE)
def _check(res):
- for p in self.node.last_peers:
- allocated = p.allocated
+ for s in self.node.last_servers:
+ allocated = s.allocated
self.failUnlessEqual(len(allocated), 1)
- self.failUnlessEqual(p.queries, 1)
+ self.failUnlessEqual(s.queries, 1)
d.addCallback(_check)
return d
def test_two_each(self):
- # if we have 100 shares, and there are 50 peers, and they all accept
- # all shares, we should get exactly two shares per peer
+ # if we have 100 shares, and there are 50 servers, and they all
+ # accept all shares, we should get exactly two shares per server
self.make_client()
data = self.get_data(SIZE_LARGE)
- # if there are 50 peers, then happy needs to be <= 50
+ # if there are 50 servers, then happy needs to be <= 50
self.set_encoding_parameters(50, 50, 100)
d = upload_data(self.u, data)
d.addCallback(extract_uri)
d.addCallback(self._check_large, SIZE_LARGE)
def _check(res):
- for p in self.node.last_peers:
- allocated = p.allocated
+ for s in self.node.last_servers:
+ allocated = s.allocated
self.failUnlessEqual(len(allocated), 2)
- self.failUnlessEqual(p.queries, 2)
+ self.failUnlessEqual(s.queries, 2)
d.addCallback(_check)
return d
def test_one_each_plus_one_extra(self):
- # if we have 51 shares, and there are 50 peers, then one peer gets
- # two shares and the rest get just one
+ # if we have 51 shares, and there are 50 servers, then one server
+ # gets two shares and the rest get just one
self.make_client()
data = self.get_data(SIZE_LARGE)
def _check(res):
got_one = []
got_two = []
- for p in self.node.last_peers:
- allocated = p.allocated
+ for s in self.node.last_servers:
+ allocated = s.allocated
self.failUnless(len(allocated) in (1,2), len(allocated))
if len(allocated) == 1:
- self.failUnlessEqual(p.queries, 1)
- got_one.append(p)
+ self.failUnlessEqual(s.queries, 1)
+ got_one.append(s)
else:
- self.failUnlessEqual(p.queries, 2)
- got_two.append(p)
+ self.failUnlessEqual(s.queries, 2)
+ got_two.append(s)
self.failUnlessEqual(len(got_one), 49)
self.failUnlessEqual(len(got_two), 1)
d.addCallback(_check)
return d
def test_four_each(self):
- # if we have 200 shares, and there are 50 peers, then each peer gets
- # 4 shares. The design goal is to accomplish this with only two
- # queries per peer.
+ # if we have 200 shares, and there are 50 servers, then each server
+ # gets 4 shares. The design goal is to accomplish this with only two
+ # queries per server.
self.make_client()
data = self.get_data(SIZE_LARGE)
- # if there are 50 peers, then happy should be no more than 50 if
- # we want this to work.
+ # if there are 50 servers, then happy should be no more than 50 if we
+ # want this to work.
self.set_encoding_parameters(100, 50, 200)
d = upload_data(self.u, data)
d.addCallback(extract_uri)
d.addCallback(self._check_large, SIZE_LARGE)
def _check(res):
- for p in self.node.last_peers:
- allocated = p.allocated
+ for s in self.node.last_servers:
+ allocated = s.allocated
self.failUnlessEqual(len(allocated), 4)
- self.failUnlessEqual(p.queries, 2)
+ self.failUnlessEqual(s.queries, 2)
d.addCallback(_check)
return d
d.addCallback(self._check_large, SIZE_LARGE)
def _check(res):
counts = {}
- for p in self.node.last_peers:
- allocated = p.allocated
+ for s in self.node.last_servers:
+ allocated = s.allocated
counts[len(allocated)] = counts.get(len(allocated), 0) + 1
histogram = [counts.get(i, 0) for i in range(5)]
self.failUnlessEqual(histogram, [0,0,0,2,1])
d.addCallback(extract_uri)
d.addCallback(self._check_large, SIZE_LARGE)
def _check(res):
- # we should have put one share each on the big peers, and zero
- # shares on the small peers
+ # we should have put one share each on the big servers, and zero
+ # shares on the small servers
total_allocated = 0
- for p in self.node.last_peers:
+ for p in self.node.last_servers:
if p.mode == "good":
self.failUnlessEqual(len(p.allocated), 1)
elif p.mode == "small":
class StorageIndex(unittest.TestCase):
def test_params_must_matter(self):
DATA = "I am some data"
+ PARAMS = Client.DEFAULT_ENCODING_PARAMETERS
+
u = upload.Data(DATA, convergence="")
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d1 = eu.get_storage_index()
# CHK means the same data should encrypt the same way
u = upload.Data(DATA, convergence="")
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d1a = eu.get_storage_index()
# but if we use a different convergence string it should be different
u = upload.Data(DATA, convergence="wheee!")
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d1salt1 = eu.get_storage_index()
# and if we add yet a different convergence it should be different again
u = upload.Data(DATA, convergence="NOT wheee!")
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d1salt2 = eu.get_storage_index()
# and if we use the first string again it should be the same as last time
u = upload.Data(DATA, convergence="wheee!")
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d1salt1a = eu.get_storage_index()
# and if we change the encoding parameters, it should be different (from the same convergence string with different encoding parameters)
u = upload.Data(DATA, convergence="")
+ u.set_default_encoding_parameters(PARAMS)
u.encoding_param_k = u.default_encoding_param_k + 1
eu = upload.EncryptAnUploadable(u)
d2 = eu.get_storage_index()
# and if we use a random key, it should be different than the CHK
u = upload.Data(DATA, convergence=None)
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d3 = eu.get_storage_index()
# and different from another instance
u = upload.Data(DATA, convergence=None)
+ u.set_default_encoding_parameters(PARAMS)
eu = upload.EncryptAnUploadable(u)
d4 = eu.get_storage_index()
d.addCallback(_done)
return d
+# copied from python docs because itertools.combinations was added in
+# python 2.6 and we support >= 2.4.
+def combinations(iterable, r):
+ # combinations('ABCD', 2) --> AB AC AD BC BD CD
+ # combinations(range(4), 3) --> 012 013 023 123
+ pool = tuple(iterable)
+ n = len(pool)
+ if r > n:
+ return
+ indices = range(r)
+ yield tuple(pool[i] for i in indices)
+ while True:
+ for i in reversed(range(r)):
+ if indices[i] != i + n - r:
+ break
+ else:
+ return
+ indices[i] += 1
+ for j in range(i+1, r):
+ indices[j] = indices[j-1] + 1
+ yield tuple(pool[i] for i in indices)
+
+def is_happy_enough(servertoshnums, h, k):
+ """ I calculate whether servertoshnums achieves happiness level h. I do this with a naïve "brute force search" approach. (See src/allmydata/util/happinessutil.py for a better algorithm.) """
+ if len(servertoshnums) < h:
+ return False
+ # print "servertoshnums: ", servertoshnums, h, k
+ for happysetcombo in combinations(servertoshnums.iterkeys(), h):
+ # print "happysetcombo: ", happysetcombo
+ for subsetcombo in combinations(happysetcombo, k):
+ shnums = reduce(set.union, [ servertoshnums[s] for s in subsetcombo ])
+ # print "subsetcombo: ", subsetcombo, ", shnums: ", shnums
+ if len(shnums) < k:
+ # print "NOT HAAPP{Y", shnums, k
+ return False
+ # print "HAAPP{Y"
+ return True
+
+class FakeServerTracker:
+ def __init__(self, serverid, buckets):
+ self._serverid = serverid
+ self.buckets = buckets
+ def get_serverid(self):
+ return self._serverid
+
class EncodingParameters(GridTestMixin, unittest.TestCase, SetDEPMixin,
ShouldFailMixin):
+ def find_all_shares(self, unused=None):
+ """Locate shares on disk. Returns a dict that maps
+ server to set of sharenums.
+ """
+ assert self.g, "I tried to find a grid at self.g, but failed"
+ servertoshnums = {} # k: server, v: set(shnum)
+
+ for i, c in self.g.servers_by_number.iteritems():
+ for (dirp, dirns, fns) in os.walk(c.sharedir):
+ for fn in fns:
+ try:
+ sharenum = int(fn)
+ except TypeError:
+ # Whoops, I guess that's not a share file then.
+ pass
+ else:
+ servertoshnums.setdefault(i, set()).add(sharenum)
+
+ return servertoshnums
+
def _do_upload_with_broken_servers(self, servers_to_break):
"""
I act like a normal upload, but before I send the results of
- Tahoe2PeerSelector to the Encoder, I break the first servers_to_break
- PeerTrackers in the used_peers part of the return result.
+ Tahoe2ServerSelector to the Encoder, I break the first
+ servers_to_break ServerTrackers in the upload_servers part of the
+ return result.
"""
assert self.g, "I tried to find a grid at self.g, but failed"
broker = self.g.clients[0].storage_broker
sh = self.g.clients[0]._secret_holder
data = upload.Data("data" * 10000, convergence="")
- data.encoding_param_k = 3
- data.encoding_param_happy = 4
- data.encoding_param_n = 10
+ data.set_default_encoding_parameters({'k': 3, 'happy': 4, 'n': 10})
uploadable = upload.EncryptAnUploadable(data)
encoder = encode.Encoder()
encoder.set_encrypted_uploadable(uploadable)
status = upload.UploadStatus()
- selector = upload.Tahoe2PeerSelector("dglev", "test", status)
+ selector = upload.Tahoe2ServerSelector("dglev", "test", status)
storage_index = encoder.get_param("storage_index")
share_size = encoder.get_param("share_size")
block_size = encoder.get_param("block_size")
d = selector.get_shareholders(broker, sh, storage_index,
share_size, block_size, num_segments,
10, 3, 4)
- def _have_shareholders((used_peers, already_peers)):
- assert servers_to_break <= len(used_peers)
+ def _have_shareholders((upload_trackers, already_servers)):
+ assert servers_to_break <= len(upload_trackers)
for index in xrange(servers_to_break):
- server = list(used_peers)[index]
- for share in server.buckets.keys():
- server.buckets[share].abort()
+ tracker = list(upload_trackers)[index]
+ for share in tracker.buckets.keys():
+ tracker.buckets[share].abort()
buckets = {}
- servermap = already_peers.copy()
- for peer in used_peers:
- buckets.update(peer.buckets)
- for bucket in peer.buckets:
- servermap.setdefault(bucket, set()).add(peer.peerid)
+ servermap = already_servers.copy()
+ for tracker in upload_trackers:
+ buckets.update(tracker.buckets)
+ for bucket in tracker.buckets:
+ servermap.setdefault(bucket, set()).add(tracker.get_serverid())
encoder.set_shareholders(buckets, servermap)
d = encoder.start()
return d
d.addCallback(_have_shareholders)
return d
+ def _has_happy_share_distribution(self):
+ servertoshnums = self.find_all_shares()
+ k = self.g.clients[0].encoding_params['k']
+ h = self.g.clients[0].encoding_params['happy']
+ return is_happy_enough(servertoshnums, h, k)
def _add_server(self, server_number, readonly=False):
assert self.g, "I tried to find a grid at self.g, but failed"
ss = self.g.make_server(server_number, readonly)
+ log.msg("just created a server, number: %s => %s" % (server_number, ss,))
self.g.add_server(server_number, ss)
-
def _add_server_with_share(self, server_number, share_number=None,
readonly=False):
self._add_server(server_number, readonly)
def _copy_share_to_server(self, share_number, server_number):
ss = self.g.servers_by_number[server_number]
- # Copy share i from the directory associated with the first
+ # Copy share i from the directory associated with the first
# storage server to the directory associated with this one.
assert self.g, "I tried to find a grid at self.g, but failed"
assert self.shares, "I tried to find shares at self.shares, but failed"
str(share_number))
if old_share_location != new_share_location:
shutil.copy(old_share_location, new_share_location)
- shares = self.find_shares(self.uri)
+ shares = self.find_uri_shares(self.uri)
# Make sure that the storage server has the share.
self.failUnless((share_number, ss.my_nodeid, new_share_location)
in shares)
"""
self._setup_grid()
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 1
+ client.encoding_params['happy'] = 1
if "n" in kwargs and "k" in kwargs:
- client.DEFAULT_ENCODING_PARAMETERS['k'] = kwargs['k']
- client.DEFAULT_ENCODING_PARAMETERS['n'] = kwargs['n']
+ client.encoding_params['k'] = kwargs['k']
+ client.encoding_params['n'] = kwargs['n']
data = upload.Data("data" * 10000, convergence="")
self.data = data
d = client.upload(data)
def _store_uri(ur):
- self.uri = ur.uri
+ self.uri = ur.get_uri()
d.addCallback(_store_uri)
d.addCallback(lambda ign:
- self.find_shares(self.uri))
+ self.find_uri_shares(self.uri))
def _store_shares(shares):
self.shares = shares
d.addCallback(_store_shares)
return d
-
def test_configure_parameters(self):
self.basedir = self.mktemp()
hooks = {0: self._set_up_nodes_extra_config}
DATA = "data" * 100
u = upload.Data(DATA, convergence="")
d = c0.upload(u)
- d.addCallback(lambda ur: c0.create_node_from_uri(ur.uri))
+ d.addCallback(lambda ur: c0.create_node_from_uri(ur.get_uri()))
m = monitor.Monitor()
d.addCallback(lambda fn: fn.check(m))
def _check(cr):
- data = cr.get_data()
- self.failUnlessEqual(data["count-shares-needed"], 7)
- self.failUnlessEqual(data["count-shares-expected"], 12)
+ self.failUnlessEqual(cr.get_encoding_needed(), 7)
+ self.failUnlessEqual(cr.get_encoding_expected(), 12)
d.addCallback(_check)
return d
self.u.upload(DATA))
return d
+ def test_aborted_shares(self):
+ self.basedir = "upload/EncodingParameters/aborted_shares"
+ self.set_up_grid(num_servers=4)
+ c = self.g.clients[0]
+ DATA = upload.Data(100* "kittens", convergence="")
+ # These parameters are unsatisfiable with only 4 servers, but should
+ # work with 5, as long as the original 4 are not stuck in the open
+ # BucketWriter state (open() but not
+ parms = {"k":2, "happy":5, "n":5, "max_segment_size": 1*MiB}
+ c.encoding_params = parms
+ d = self.shouldFail(UploadUnhappinessError, "test_aborted_shares",
+ "shares could be placed on only 4 "
+ "server(s) such that any 2 of them have enough "
+ "shares to recover the file, but we were asked "
+ "to place shares on at least 5 such servers",
+ c.upload, DATA)
+ # now add the 5th server
+ d.addCallback(lambda ign: self._add_server(4, False))
+ # and this time the upload ought to succeed
+ d.addCallback(lambda ign: c.upload(DATA))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
+ return d
+
def test_problem_layout_comment_52(self):
def _basedir():
self.basedir = self.mktemp()
_basedir()
- # This scenario is at
- # http://allmydata.org/trac/tahoe/ticket/778#comment:52
+ # This scenario is at
+ # http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:52
#
# The scenario in comment:52 proposes that we have a layout
# like:
# server 1: share 0, read-only
# server 2: share 0, read-only
# server 3: share 0, read-only
- # To get access to the shares, we will first upload to one
- # server, which will then have shares 0 - 9. We'll then
+ # To get access to the shares, we will first upload to one
+ # server, which will then have shares 0 - 9. We'll then
# add three new servers, configure them to not accept any new
# shares, then write share 0 directly into the serverdir of each,
# and then remove share 0 from server 0 in the same way.
- # Then each of servers 1 - 3 will report that they have share 0,
+ # Then each of servers 1 - 3 will report that they have share 0,
# and will not accept any new share, while server 0 will report that
# it has shares 1 - 9 and will accept new shares.
# We'll then set 'happy' = 4, and see that an upload fails
# Set happy = 4 in the client.
def _prepare():
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ client.encoding_params['happy'] = 4
return client
d.addCallback(lambda ign:
_prepare())
readonly=True))
def _prepare2():
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ client.encoding_params['happy'] = 4
return client
d.addCallback(lambda ign:
_prepare2())
def test_problem_layout_comment_53(self):
# This scenario is at
- # http://allmydata.org/trac/tahoe/ticket/778#comment:53
+ # http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:53
#
# Set up the grid to have one server
def _change_basedir(ign):
# We start by uploading all of the shares to one server.
# Next, we'll add three new servers to our NoNetworkGrid. We'll add
# one share from our initial upload to each of these.
- # The counterintuitive ordering of the share numbers is to deal with
- # the permuting of these servers -- distributing the shares this
- # way ensures that the Tahoe2PeerSelector sees them in the order
+ # The counterintuitive ordering of the share numbers is to deal with
+ # the permuting of these servers -- distributing the shares this
+ # way ensures that the Tahoe2ServerSelector sees them in the order
# described below.
d = self._setup_and_upload()
d.addCallback(lambda ign:
# server 1: share 2
# server 2: share 0
# server 3: share 1
- # We change the 'happy' parameter in the client to 4.
- # The Tahoe2PeerSelector will see the peers permuted as:
+ # We change the 'happy' parameter in the client to 4.
+ # The Tahoe2ServerSelector will see the servers permuted as:
# 2, 3, 1, 0
# Ideally, a reupload of our original data should work.
def _reset_encoding_parameters(ign, happy=4):
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
+ client.encoding_params['happy'] = happy
return client
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
- # This scenario is basically comment:53, but changed so that the
- # Tahoe2PeerSelector sees the server with all of the shares before
+ # This scenario is basically comment:53, but changed so that the
+ # Tahoe2ServerSelector sees the server with all of the shares before
# any of the other servers.
# The layout is:
# server 2: shares 0 - 9
- # server 3: share 0
- # server 1: share 1
+ # server 3: share 0
+ # server 1: share 1
# server 4: share 2
- # The Tahoe2PeerSelector sees the peers permuted as:
+ # The Tahoe2ServerSelector sees the servers permuted as:
# 2, 3, 1, 4
- # Note that server 0 has been replaced by server 4; this makes it
- # easier to ensure that the last server seen by Tahoe2PeerSelector
- # has only one share.
+ # Note that server 0 has been replaced by server 4; this makes it
+ # easier to ensure that the last server seen by Tahoe2ServerSelector
+ # has only one share.
d.addCallback(_change_basedir)
d.addCallback(lambda ign:
self._setup_and_upload())
self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
d.addCallback(lambda ign:
self._add_server_with_share(server_number=4, share_number=0))
- # Now try uploading.
+ # Now try uploading.
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
# Try the same thing, but with empty servers after the first one
- # We want to make sure that Tahoe2PeerSelector will redistribute
+ # We want to make sure that Tahoe2ServerSelector will redistribute
# shares as necessary, not simply discover an existing layout.
# The layout is:
# server 2: shares 0 - 9
# Make sure that only as many shares as necessary to satisfy
# servers of happiness were pushed.
d.addCallback(lambda results:
- self.failUnlessEqual(results.pushed_shares, 3))
+ self.failUnlessEqual(results.get_pushed_shares(), 3))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
+ def test_problem_layout_ticket_1124(self):
+ self.basedir = self.mktemp()
+ d = self._setup_and_upload(k=2, n=4)
+
+ # server 0: shares 0, 1, 2, 3
+ # server 1: shares 0, 3
+ # server 2: share 1
+ # server 3: share 2
+ # With this layout, an upload should just be satisfied that the current distribution is good enough, right?
+ def _setup(ign):
+ self._add_server_with_share(server_number=0, share_number=None)
+ self._add_server_with_share(server_number=1, share_number=0)
+ self._add_server_with_share(server_number=2, share_number=1)
+ self._add_server_with_share(server_number=3, share_number=2)
+ # Copy shares
+ self._copy_share_to_server(3, 1)
+ client = self.g.clients[0]
+ client.encoding_params['happy'] = 4
+ return client
+
+ d.addCallback(_setup)
+ d.addCallback(lambda client:
+ client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
+ return d
+ test_problem_layout_ticket_1124.todo = "Fix this after 1.7.1 release."
- def test_happiness_with_some_readonly_peers(self):
+ def test_happiness_with_some_readonly_servers(self):
# Try the following layout
# server 2: shares 0-9
# server 4: share 0, read-only
readonly=True))
def _reset_encoding_parameters(ign, happy=4):
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
+ client.encoding_params['happy'] = happy
return client
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
- def test_happiness_with_all_readonly_peers(self):
+ def test_happiness_with_all_readonly_servers(self):
# server 3: share 1, read-only
# server 1: share 2, read-only
# server 2: shares 0-9, read-only
# server 4: share 0, read-only
# The idea with this test is to make sure that the survey of
- # read-only peers doesn't undercount servers of happiness
+ # read-only servers doesn't undercount servers of happiness
self.basedir = self.mktemp()
d = self._setup_and_upload()
d.addCallback(lambda ign:
self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
def _reset_encoding_parameters(ign, happy=4):
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
+ client.encoding_params['happy'] = happy
return client
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
def test_dropped_servers_in_encoder(self):
- # The Encoder does its own "servers_of_happiness" check if it
- # happens to lose a bucket during an upload (it assumes that
+ # The Encoder does its own "servers_of_happiness" check if it
+ # happens to lose a bucket during an upload (it assumes that
# the layout presented to it satisfies "servers_of_happiness"
# until a failure occurs)
- #
- # This test simulates an upload where servers break after peer
+ #
+ # This test simulates an upload where servers break after server
# selection, but before they are written to.
def _set_basedir(ign=None):
self.basedir = self.mktemp()
self._add_server(server_number=5)
d.addCallback(_do_server_setup)
# remove the original server
- # (necessary to ensure that the Tahoe2PeerSelector will distribute
+ # (necessary to ensure that the Tahoe2ServerSelector will distribute
# all the shares)
def _remove_server(ign):
server = self.g.servers_by_number[0]
self.g.remove_server(server.my_nodeid)
d.addCallback(_remove_server)
- # This should succeed; we still have 4 servers, and the
+ # This should succeed; we still have 4 servers, and the
# happiness of the upload is 4.
d.addCallback(lambda ign:
self._do_upload_with_broken_servers(1))
# Now, do the same thing over again, but drop 2 servers instead
- # of 1. This should fail, because servers_of_happiness is 4 and
+ # of 1. This should fail, because servers_of_happiness is 4 and
# we can't satisfy that.
d.addCallback(_set_basedir)
d.addCallback(lambda ign:
return d
- def test_merge_peers(self):
- # merge_peers merges a list of used_peers and a dict of
- # shareid -> peerid mappings.
+ def test_merge_servers(self):
+ # merge_servers merges a list of upload_servers and a dict of
+ # shareid -> serverid mappings.
shares = {
1 : set(["server1"]),
2 : set(["server2"]),
4 : set(["server4", "server5"]),
5 : set(["server1", "server2"]),
}
- # if not provided with a used_peers argument, it should just
+ # if not provided with a upload_servers argument, it should just
# return the first argument unchanged.
- self.failUnlessEqual(shares, merge_peers(shares, set([])))
- class FakePeerTracker:
- pass
+ self.failUnlessEqual(shares, merge_servers(shares, set([])))
trackers = []
for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
- t = FakePeerTracker()
- t.peerid = server
- t.buckets = [i]
+ t = FakeServerTracker(server, [i])
trackers.append(t)
expected = {
1 : set(["server1"]),
7 : set(["server7"]),
8 : set(["server8"]),
}
- self.failUnlessEqual(expected, merge_peers(shares, set(trackers)))
+ self.failUnlessEqual(expected, merge_servers(shares, set(trackers)))
shares2 = {}
expected = {
5 : set(["server5"]),
7 : set(["server7"]),
8 : set(["server8"]),
}
- self.failUnlessEqual(expected, merge_peers(shares2, set(trackers)))
+ self.failUnlessEqual(expected, merge_servers(shares2, set(trackers)))
shares3 = {}
trackers = []
expected = {}
for (i, server) in [(i, "server%d" % i) for i in xrange(10)]:
shares3[i] = set([server])
- t = FakePeerTracker()
- t.peerid = server
- t.buckets = [i]
+ t = FakeServerTracker(server, [i])
trackers.append(t)
expected[i] = set([server])
- self.failUnlessEqual(expected, merge_peers(shares3, set(trackers)))
+ self.failUnlessEqual(expected, merge_servers(shares3, set(trackers)))
def test_servers_of_happiness_utility_function(self):
# servers_of_happiness doesn't under or overcount the happiness
# value for given inputs.
- # servers_of_happiness expects a dict of
- # shnum => set(peerids) as a preexisting shares argument.
+ # servers_of_happiness expects a dict of
+ # shnum => set(serverids) as a preexisting shares argument.
test1 = {
1 : set(["server1"]),
2 : set(["server2"]),
# should be 3 instead of 4.
happy = servers_of_happiness(test1)
self.failUnlessEqual(3, happy)
- # The second argument of merge_peers should be a set of
- # objects with peerid and buckets as attributes. In actual use,
- # these will be PeerTracker instances, but for testing it is fine
- # to make a FakePeerTracker whose job is to hold those instance
- # variables to test that part.
- class FakePeerTracker:
- pass
+ # The second argument of merge_servers should be a set of objects with
+ # serverid and buckets as attributes. In actual use, these will be
+ # ServerTracker instances, but for testing it is fine to make a
+ # FakeServerTracker whose job is to hold those instance variables to
+ # test that part.
trackers = []
for (i, server) in [(i, "server%d" % i) for i in xrange(5, 9)]:
- t = FakePeerTracker()
- t.peerid = server
- t.buckets = [i]
+ t = FakeServerTracker(server, [i])
trackers.append(t)
# Recall that test1 is a server layout with servers_of_happiness
# = 3. Since there isn't any overlap between the shnum ->
- # set([peerid]) correspondences in test1 and those in trackers,
+ # set([serverid]) correspondences in test1 and those in trackers,
# the result here should be 7.
- test2 = merge_peers(test1, set(trackers))
+ test2 = merge_servers(test1, set(trackers))
happy = servers_of_happiness(test2)
self.failUnlessEqual(7, happy)
# Now add an overlapping server to trackers. This is redundant,
# so it should not cause the previously reported happiness value
# to change.
- t = FakePeerTracker()
- t.peerid = "server1"
- t.buckets = [1]
+ t = FakeServerTracker("server1", [1])
trackers.append(t)
- test2 = merge_peers(test1, set(trackers))
+ test2 = merge_servers(test1, set(trackers))
happy = servers_of_happiness(test2)
self.failUnlessEqual(7, happy)
test = {}
happy = servers_of_happiness(test)
self.failUnlessEqual(0, happy)
- # Test a more substantial overlap between the trackers and the
+ # Test a more substantial overlap between the trackers and the
# existing assignments.
test = {
1 : set(['server1']),
4 : set(['server4']),
}
trackers = []
- t = FakePeerTracker()
- t.peerid = 'server5'
- t.buckets = [4]
+ t = FakeServerTracker('server5', [4])
trackers.append(t)
- t = FakePeerTracker()
- t.peerid = 'server6'
- t.buckets = [3, 5]
+ t = FakeServerTracker('server6', [3, 5])
trackers.append(t)
- # The value returned by servers_of_happiness is the size
+ # The value returned by servers_of_happiness is the size
# of a maximum matching in the bipartite graph that
- # servers_of_happiness() makes between peerids and share
+ # servers_of_happiness() makes between serverids and share
# numbers. It should find something like this:
# (server 1, share 1)
# (server 2, share 2)
# (server 3, share 3)
# (server 5, share 4)
# (server 6, share 5)
- #
+ #
# and, since there are 5 edges in this matching, it should
# return 5.
- test2 = merge_peers(test, set(trackers))
+ test2 = merge_servers(test, set(trackers))
happy = servers_of_happiness(test2)
self.failUnlessEqual(5, happy)
- # Zooko's first puzzle:
+ # Zooko's first puzzle:
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:156)
#
# server 1: shares 0, 1
2 : set(['server2', 'server3']),
}
self.failUnlessEqual(3, servers_of_happiness(test))
- # Zooko's second puzzle:
+ # Zooko's second puzzle:
# (from http://allmydata.org/trac/tahoe-lafs/ticket/778#comment:158)
- #
+ #
# server 1: shares 0, 1
# server 2: share 1
- #
+ #
# This should yield happiness of 2.
test = {
0 : set(['server1']),
sbs = shares_by_server(test1)
self.failUnlessEqual(set([1, 2, 3]), sbs["server1"])
self.failUnlessEqual(set([4, 5]), sbs["server2"])
- # This should fail unless the peerid part of the mapping is a set
+ # This should fail unless the serverid part of the mapping is a set
test2 = {1: "server1"}
self.shouldFail(AssertionError,
"test_shares_by_server",
# server 2: empty
# server 3: empty
# server 4: empty
- # The purpose of this test is to make sure that the peer selector
+ # The purpose of this test is to make sure that the server selector
# knows about the shares on server 1, even though it is read-only.
# It used to simply filter these out, which would cause the test
# to fail when servers_of_happiness = 4.
self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
def _prepare_client(ign):
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ client.encoding_params['happy'] = 4
return client
d.addCallback(_prepare_client)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
def test_query_counting(self):
- # If peer selection fails, Tahoe2PeerSelector prints out a lot
+ # If server selection fails, Tahoe2ServerSelector prints out a lot
# of helpful diagnostic information, including query stats.
# This test helps make sure that that information is accurate.
self.basedir = self.mktemp()
self._add_server(server_number=i)
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
c = self.g.clients[0]
- # We set happy to an unsatisfiable value so that we can check the
+ # We set happy to an unsatisfiable value so that we can check the
# counting in the exception message. The same progress message
# is also used when the upload is successful, but in that case it
# only gets written to a log, so we can't see what it says.
- c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
+ c.encoding_params['happy'] = 45
return c
d.addCallback(_setup)
d.addCallback(lambda c:
c.upload, upload.Data("data" * 10000,
convergence="")))
# Now try with some readonly servers. We want to make sure that
- # the readonly peer share discovery phase is counted correctly.
+ # the readonly server share discovery phase is counted correctly.
def _reset(ign):
self.basedir = self.mktemp()
self.g = None
self._add_server(server_number=12, readonly=True)
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
c = self.g.clients[0]
- c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
+ c.encoding_params['happy'] = 45
return c
d.addCallback(_then)
d.addCallback(lambda c:
# shares that it wants to place on the first server, including
# the one that it wanted to allocate there. Though no shares will
# be allocated in this request, it should still be called
- # productive, since it caused some homeless shares to be
+ # productive, since it caused some homeless shares to be
# removed.
d.addCallback(_reset)
d.addCallback(lambda ign:
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
# Make happiness unsatisfiable
c = self.g.clients[0]
- c.DEFAULT_ENCODING_PARAMETERS['happy'] = 45
+ c.encoding_params['happy'] = 45
return c
d.addCallback(_next)
d.addCallback(lambda c:
self._add_server(server_number=i, readonly=True)
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
c = self.g.clients[0]
- c.DEFAULT_ENCODING_PARAMETERS['k'] = 2
- c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
- c.DEFAULT_ENCODING_PARAMETERS['n'] = 4
+ c.encoding_params['k'] = 2
+ c.encoding_params['happy'] = 4
+ c.encoding_params['n'] = 4
return c
d.addCallback(_then)
d.addCallback(lambda client:
self.shouldFail(UploadUnhappinessError,
"test_upper_limit_on_readonly_queries",
- "sent 8 queries to 8 peers",
+ "sent 8 queries to 8 servers",
client.upload,
upload.Data('data' * 10000, convergence="")))
return d
- def test_exception_messages_during_peer_selection(self):
+ def test_exception_messages_during_server_selection(self):
# server 1: read-only, no shares
# server 2: read-only, no shares
# server 3: read-only, no shares
self._add_server(server_number=5, readonly=True))
d.addCallback(lambda ign:
self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
- def _reset_encoding_parameters(ign):
+ def _reset_encoding_parameters(ign, happy=4):
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ client.encoding_params['happy'] = happy
return client
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
"total (10 homeless), want to place shares on at "
"least 4 servers such that any 3 of them have "
"enough shares to recover the file, "
- "sent 5 queries to 5 peers, 0 queries placed "
+ "sent 5 queries to 5 servers, 0 queries placed "
"some shares, 5 placed none "
"(of which 5 placed none due to the server being "
"full and 0 placed none due to an error)",
d.addCallback(lambda ign:
self._add_server(server_number=2))
def _break_server_2(ign):
- server = self.g.servers_by_number[2].my_nodeid
- # We have to break the server in servers_by_id,
- # because the one in servers_by_number isn't wrapped,
- # and doesn't look at its broken attribute when answering
- # queries.
- self.g.servers_by_id[server].broken = True
+ serverid = self.g.servers_by_number[2].my_nodeid
+ self.g.break_server(serverid)
d.addCallback(_break_server_2)
d.addCallback(lambda ign:
self._add_server(server_number=3, readonly=True))
self._add_server(server_number=5, readonly=True))
d.addCallback(lambda ign:
self.g.remove_server(self.g.servers_by_number[0].my_nodeid))
- def _reset_encoding_parameters(ign, happy=4):
- client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = happy
- return client
d.addCallback(_reset_encoding_parameters)
d.addCallback(lambda client:
self.shouldFail(UploadUnhappinessError, "test_selection_exceptions",
"total (10 homeless), want to place shares on at "
"least 4 servers such that any 3 of them have "
"enough shares to recover the file, "
- "sent 5 queries to 5 peers, 0 queries placed "
+ "sent 5 queries to 5 servers, 0 queries placed "
"some shares, 5 placed none "
"(of which 4 placed none due to the server being "
"full and 1 placed none due to an error)",
# This should place all of the shares, but fail with happy=4.
# Since the number of servers with shares is more than the number
# necessary to reconstitute the file, this will trigger a different
- # error message than either of those above.
+ # error message than either of those above.
d.addCallback(_reset)
d.addCallback(lambda ign:
self._setup_and_upload())
# Remove server 0
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
client = self.g.clients[0]
- client.DEFAULT_ENCODING_PARAMETERS['happy'] = 3
+ client.encoding_params['happy'] = 3
return client
d.addCallback(_setup)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
test_problem_layout_comment_187.todo = "this isn't fixed yet"
+ def test_problem_layout_ticket_1118(self):
+ # #1118 includes a report from a user who hit an assertion in
+ # the upload code with this layout.
+ self.basedir = self.mktemp()
+ d = self._setup_and_upload(k=2, n=4)
+
+ # server 0: no shares
+ # server 1: shares 0, 3
+ # server 3: share 1
+ # server 2: share 2
+ # The order that they get queries is 0, 1, 3, 2
+ def _setup(ign):
+ self._add_server(server_number=0)
+ self._add_server_with_share(server_number=1, share_number=0)
+ self._add_server_with_share(server_number=2, share_number=2)
+ self._add_server_with_share(server_number=3, share_number=1)
+ # Copy shares
+ self._copy_share_to_server(3, 1)
+ self.delete_all_shares(self.get_serverdir(0))
+ client = self.g.clients[0]
+ client.encoding_params['happy'] = 4
+ return client
+
+ d.addCallback(_setup)
+ # Note: actually it should succeed! See
+ # test_problem_layout_ticket_1128. But ticket 1118 is just to
+ # make it realize that it has failed, so if it raises
+ # UploadUnhappinessError then we'll give it the green light
+ # for now.
+ d.addCallback(lambda ignored:
+ self.shouldFail(UploadUnhappinessError,
+ "test_problem_layout_ticket_1118",
+ "",
+ self.g.clients[0].upload, upload.Data("data" * 10000,
+ convergence="")))
+ return d
+
+ def test_problem_layout_ticket_1128(self):
+ # #1118 includes a report from a user who hit an assertion in
+ # the upload code with this layout.
+ self.basedir = self.mktemp()
+ d = self._setup_and_upload(k=2, n=4)
+
+ # server 0: no shares
+ # server 1: shares 0, 3
+ # server 3: share 1
+ # server 2: share 2
+ # The order that they get queries is 0, 1, 3, 2
+ def _setup(ign):
+ self._add_server(server_number=0)
+ self._add_server_with_share(server_number=1, share_number=0)
+ self._add_server_with_share(server_number=2, share_number=2)
+ self._add_server_with_share(server_number=3, share_number=1)
+ # Copy shares
+ self._copy_share_to_server(3, 1)
+ #Remove shares from server 0
+ self.delete_all_shares(self.get_serverdir(0))
+ client = self.g.clients[0]
+ client.encoding_params['happy'] = 4
+ return client
+
+ d.addCallback(_setup)
+ d.addCallback(lambda client:
+ client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
+ return d
+ test_problem_layout_ticket_1128.todo = "Invent a smarter uploader that uploads successfully in this case."
def test_upload_succeeds_with_some_homeless_shares(self):
# If the upload is forced to stop trying to place shares before
self.g.remove_server(self.g.servers_by_number[0].my_nodeid)
# Set the client appropriately
c = self.g.clients[0]
- c.DEFAULT_ENCODING_PARAMETERS['happy'] = 4
+ c.encoding_params['happy'] = 4
return c
d.addCallback(_server_setup)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
return d
# Add some servers so that the upload will need to
# redistribute, but will first pass over a couple of servers
# that don't have enough shares to redistribute before
- # finding one that does have shares to redistribute.
+ # finding one that does have shares to redistribute.
self._add_server_with_share(server_number=1, share_number=0)
self._add_server_with_share(server_number=2, share_number=2)
self._add_server_with_share(server_number=3, share_number=1)
d.addCallback(_server_setup)
d.addCallback(lambda client:
client.upload(upload.Data("data" * 10000, convergence="")))
+ d.addCallback(lambda ign:
+ self.failUnless(self._has_happy_share_distribution()))
+ return d
+
+
+ def test_server_selector_bucket_abort(self):
+ # If server selection for an upload fails due to an unhappy
+ # layout, the server selection process should abort the buckets it
+ # allocates before failing, so that the space can be re-used.
+ self.basedir = self.mktemp()
+ self.set_up_grid(num_servers=5)
+
+ # Try to upload a file with happy=7, which is unsatisfiable with
+ # the current grid. This will fail, but should not take up any
+ # space on the storage servers after it fails.
+ client = self.g.clients[0]
+ client.encoding_params['happy'] = 7
+ d = defer.succeed(None)
+ d.addCallback(lambda ignored:
+ self.shouldFail(UploadUnhappinessError,
+ "test_server_selection_bucket_abort",
+ "",
+ client.upload, upload.Data("data" * 10000,
+ convergence="")))
+ # wait for the abort messages to get there.
+ def _turn_barrier(res):
+ return fireEventually(res)
+ d.addCallback(_turn_barrier)
+ def _then(ignored):
+ for server in self.g.servers_by_number.values():
+ self.failUnlessEqual(server.allocated_size(), 0)
+ d.addCallback(_then)
+ return d
+
+
+ def test_encoder_bucket_abort(self):
+ # If enough servers die in the process of encoding and uploading
+ # a file to make the layout unhappy, we should cancel the
+ # newly-allocated buckets before dying.
+ self.basedir = self.mktemp()
+ self.set_up_grid(num_servers=4)
+
+ client = self.g.clients[0]
+ client.encoding_params['happy'] = 7
+
+ d = defer.succeed(None)
+ d.addCallback(lambda ignored:
+ self.shouldFail(UploadUnhappinessError,
+ "test_encoder_bucket_abort",
+ "",
+ self._do_upload_with_broken_servers, 1))
+ def _turn_barrier(res):
+ return fireEventually(res)
+ d.addCallback(_turn_barrier)
+ def _then(ignored):
+ for server in self.g.servers_by_number.values():
+ self.failUnlessEqual(server.allocated_size(), 0)
+ d.addCallback(_then)
return d
return None
# TODO:
-# upload with exactly 75 peers (shares_of_happiness)
+# upload with exactly 75 servers (shares_of_happiness)
# have a download fail
# cancel a download (need to implement more cancel stuff)
+
+# from test_encode:
+# NoNetworkGrid, upload part of ciphertext, kill server, continue upload
+# check with Kevan, they want to live in test_upload, existing tests might cover
+# def test_lost_one_shareholder(self): # these are upload-side tests
+# def test_lost_one_shareholder_early(self):
+# def test_lost_many_shareholders(self):
+# def test_lost_all_shareholders(self):