]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blobdiff - src/allmydata/test/test_mutable.py
use more servers for test_mutable.Update, to test #2034 properly
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
index d4bc5a334b02e527b8374012ef3595c40d3a2d50..890d294e84d314d1e822a9a48c72362a601ddf0e 100644 (file)
@@ -1,38 +1,53 @@
-
-import os, struct
+import os, re, base64
 from cStringIO import StringIO
+
 from twisted.trial import unittest
 from twisted.internet import defer, reactor
-from twisted.python import failure
-from allmydata import uri, storage
-from allmydata.immutable import download
-from allmydata.immutable.encode import NotEnoughSharesError
-from allmydata.util import base32, testutil, idlib
-from allmydata.util.idlib import shortnodeid_b2a
-from allmydata.util.hashutil import tagged_hash
-from allmydata.util.fileutil import make_dirs
-from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \
-     FileTooLargeError, IRepairResults
-from foolscap.eventual import eventually, fireEventually
+
+from allmydata import uri, client
+from allmydata.nodemaker import NodeMaker
+from allmydata.util import base32, consumer, fileutil, mathutil
+from allmydata.util.fileutil import abspath_expanduser_unicode
+from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
+     ssk_pubkey_fingerprint_hash
+from allmydata.util.consumer import MemoryConsumer
+from allmydata.util.deferredutil import gatherResults
+from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
+     NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
+from allmydata.monitor import Monitor
+from allmydata.test.common import ShouldFailMixin
+from allmydata.test.no_network import GridTestMixin
+from foolscap.api import eventually, fireEventually
 from foolscap.logging import log
-import sha
+from allmydata.storage_client import StorageFarmBroker
+from allmydata.storage.common import storage_index_to_dir
+from allmydata.scripts import debug
 
-from allmydata.mutable.node import MutableFileNode, BackoffAgent
-from allmydata.mutable.common import DictOfSets, ResponseCache, \
+from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
+from allmydata.mutable.common import \
      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
      NotEnoughServersError, CorruptShareError
 from allmydata.mutable.retrieve import Retrieve
-from allmydata.mutable.publish import Publish
+from allmydata.mutable.publish import Publish, MutableFileHandle, \
+                                      MutableData, \
+                                      DEFAULT_MAX_SEGMENT_SIZE
 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
-from allmydata.mutable.layout import unpack_header, unpack_share
+from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
+from allmydata.mutable.repairer import MustForceRepairError
+
+import allmydata.test.common_util as testutil
+from allmydata.test.common import TEST_RSA_KEY_SIZE
+from allmydata.test.test_download import PausingConsumer, \
+     PausingAndStoppingConsumer, StoppingConsumer, \
+     ImmediatelyStoppingConsumer
 
-# this "FastMutableFileNode" exists solely to speed up tests by using smaller
-# public/private keys. Once we switch to fast DSA-based keys, we can get rid
-# of this.
+def eventuaaaaaly(res=None):
+    d = fireEventually(res)
+    d.addCallback(fireEventually)
+    d.addCallback(fireEventually)
+    return d
 
-class FastMutableFileNode(MutableFileNode):
-    SIGNATURE_KEY_SIZE = 522
 
 # this "FakeStorage" exists to put the share data in RAM and avoid using real
 # network connections, both to speed up the tests and to reduce the amount of
@@ -59,37 +74,30 @@ class FakeStorage:
         self._sequence = None
         self._pending = {}
         self._pending_timer = None
-        self._special_answers = {}
 
     def read(self, peerid, storage_index):
         shares = self._peers.get(peerid, {})
-        if self._special_answers.get(peerid, []):
-            mode = self._special_answers[peerid].pop(0)
-            if mode == "fail":
-                shares = failure.Failure(IntentionalError())
-            elif mode == "none":
-                shares = {}
-            elif mode == "normal":
-                pass
         if self._sequence is None:
-            return defer.succeed(shares)
+            return eventuaaaaaly(shares)
         d = defer.Deferred()
         if not self._pending:
             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
-        self._pending[peerid] = (d, shares)
+        if peerid not in self._pending:
+            self._pending[peerid] = []
+        self._pending[peerid].append( (d, shares) )
         return d
 
     def _fire_readers(self):
         self._pending_timer = None
         pending = self._pending
         self._pending = {}
-        extra = []
         for peerid in self._sequence:
             if peerid in pending:
-                d, shares = pending.pop(peerid)
+                for (d, shares) in pending.pop(peerid):
+                    eventually(d.callback, shares)
+        for peerid in pending:
+            for (d, shares) in pending[peerid]:
                 eventually(d.callback, shares)
-        for (d, shares) in pending.values():
-            eventually(d.callback, shares)
 
     def write(self, peerid, storage_index, shnum, offset, data):
         if peerid not in self._peers:
@@ -108,6 +116,7 @@ class FakeStorageServer:
         self.storage = storage
         self.queries = 0
     def callRemote(self, methname, *args, **kwargs):
+        self.queries += 1
         def _call():
             meth = getattr(self, methname)
             return meth(*args, **kwargs)
@@ -115,6 +124,15 @@ class FakeStorageServer:
         d.addCallback(lambda res: _call())
         return d
 
+    def callRemoteOnly(self, methname, *args, **kwargs):
+        self.queries += 1
+        d = self.callRemote(methname, *args, **kwargs)
+        d.addBoth(lambda ignore: None)
+        pass
+
+    def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
+        pass
+
     def slot_readv(self, storage_index, shnums, readv):
         d = self.storage.read(self.peerid, storage_index)
         def _read(shares):
@@ -150,83 +168,22 @@ class FakeStorageServer:
         return fireEventually(answer)
 
 
-# our "FakeClient" has just enough functionality of the real Client to let
-# the tests run.
-
-class FakeClient:
-    mutable_file_node_class = FastMutableFileNode
-
-    def __init__(self, num_peers=10):
-        self._storage = FakeStorage()
-        self._num_peers = num_peers
-        self._peerids = [tagged_hash("peerid", "%d" % i)[:20]
-                         for i in range(self._num_peers)]
-        self._connections = dict([(peerid, FakeStorageServer(peerid,
-                                                             self._storage))
-                                  for peerid in self._peerids])
-        self.nodeid = "fakenodeid"
-
-    def log(self, msg, **kw):
-        return log.msg(msg, **kw)
-
-    def get_renewal_secret(self):
-        return "I hereby permit you to renew my files"
-    def get_cancel_secret(self):
-        return "I hereby permit you to cancel my leases"
-
-    def create_mutable_file(self, contents=""):
-        n = self.mutable_file_node_class(self)
-        d = n.create(contents)
-        d.addCallback(lambda res: n)
-        return d
-
-    def notify_retrieve(self, r):
-        pass
-    def notify_publish(self, p, size):
-        pass
-    def notify_mapupdate(self, u):
-        pass
-
-    def create_node_from_uri(self, u):
-        u = IURI(u)
-        assert IMutableFileURI.providedBy(u), u
-        res = self.mutable_file_node_class(self).init_from_uri(u)
-        return res
-
-    def get_permuted_peers(self, service_name, key):
-        """
-        @return: list of (peerid, connection,)
-        """
-        results = []
-        for (peerid, connection) in self._connections.items():
-            assert isinstance(peerid, str)
-            permuted = sha.new(key + peerid).digest()
-            results.append((permuted, peerid, connection))
-        results.sort()
-        results = [ (r[1],r[2]) for r in results]
-        return results
-
-    def upload(self, uploadable):
-        assert IUploadable.providedBy(uploadable)
-        d = uploadable.get_size()
-        d.addCallback(lambda length: uploadable.read(length))
-        #d.addCallback(self.create_mutable_file)
-        def _got_data(datav):
-            data = "".join(datav)
-            #newnode = FastMutableFileNode(self)
-            return uri.LiteralFileURI(data)
-        d.addCallback(_got_data)
-        return d
-
-
 def flip_bit(original, byte_offset):
     return (original[:byte_offset] +
             chr(ord(original[byte_offset]) ^ 0x01) +
             original[byte_offset+1:])
 
+def add_two(original, byte_offset):
+    # It isn't enough to simply flip the bit for the version number,
+    # because 1 is a valid version number. So we add two instead.
+    return (original[:byte_offset] +
+            chr(ord(original[byte_offset]) ^ 0x02) +
+            original[byte_offset+1:])
+
 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
     # list of shnums to corrupt.
+    ds = []
     for peerid in s._peers:
         shares = s._peers[peerid]
         for shnum in shares:
@@ -234,44 +191,233 @@ def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
                 and shnum not in shnums_to_corrupt):
                 continue
             data = shares[shnum]
-            (version,
-             seqnum,
-             root_hash,
-             IV,
-             k, N, segsize, datalen,
-             o) = unpack_header(data)
-            if isinstance(offset, tuple):
-                offset1, offset2 = offset
-            else:
-                offset1 = offset
-                offset2 = 0
-            if offset1 == "pubkey":
-                real_offset = 107
-            elif offset1 in o:
-                real_offset = o[offset1]
-            else:
-                real_offset = offset1
-            real_offset = int(real_offset) + offset2 + offset_offset
-            assert isinstance(real_offset, int), offset
-            shares[shnum] = flip_bit(data, real_offset)
-    return res
+            # We're feeding the reader all of the share data, so it
+            # won't need to use the rref that we didn't provide, nor the
+            # storage index that we didn't provide. We do this because
+            # the reader will work for both MDMF and SDMF.
+            reader = MDMFSlotReadProxy(None, None, shnum, data)
+            # We need to get the offsets for the next part.
+            d = reader.get_verinfo()
+            def _do_corruption(verinfo, data, shnum, shares):
+                (seqnum,
+                 root_hash,
+                 IV,
+                 segsize,
+                 datalen,
+                 k, n, prefix, o) = verinfo
+                if isinstance(offset, tuple):
+                    offset1, offset2 = offset
+                else:
+                    offset1 = offset
+                    offset2 = 0
+                if offset1 == "pubkey" and IV:
+                    real_offset = 107
+                elif offset1 in o:
+                    real_offset = o[offset1]
+                else:
+                    real_offset = offset1
+                real_offset = int(real_offset) + offset2 + offset_offset
+                assert isinstance(real_offset, int), offset
+                if offset1 == 0: # verbyte
+                    f = add_two
+                else:
+                    f = flip_bit
+                shares[shnum] = f(data, real_offset)
+            d.addCallback(_do_corruption, data, shnum, shares)
+            ds.append(d)
+    dl = defer.DeferredList(ds)
+    dl.addCallback(lambda ignored: res)
+    return dl
+
+def make_storagebroker(s=None, num_peers=10):
+    if not s:
+        s = FakeStorage()
+    peerids = [tagged_hash("peerid", "%d" % i)[:20]
+               for i in range(num_peers)]
+    storage_broker = StorageFarmBroker(None, True)
+    for peerid in peerids:
+        fss = FakeStorageServer(peerid, s)
+        ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
+               "permutation-seed-base32": base32.b2a(peerid) }
+        storage_broker.test_add_rref(peerid, fss, ann)
+    return storage_broker
+
+def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
+    storage_broker = make_storagebroker(s, num_peers)
+    sh = client.SecretHolder("lease secret", "convergence secret")
+    keygen = client.KeyGenerator()
+    if keysize:
+        keygen.set_default_keysize(keysize)
+    nodemaker = NodeMaker(storage_broker, sh, None,
+                          None, None,
+                          {"k": 3, "n": 10}, SDMF_VERSION, keygen)
+    return nodemaker
 
 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
+    # this used to be in Publish, but we removed the limit. Some of
+    # these tests test whether the new code correctly allows files
+    # larger than the limit.
+    OLD_MAX_SEGMENT_SIZE = 3500000
     def setUp(self):
-        self.client = FakeClient()
+        self._storage = s = FakeStorage()
+        self.nodemaker = make_nodemaker(s)
 
     def test_create(self):
-        d = self.client.create_mutable_file()
+        d = self.nodemaker.create_mutable_file()
         def _created(n):
-            self.failUnless(isinstance(n, FastMutableFileNode))
-            peer0 = self.client._peerids[0]
-            shnums = self.client._storage._peers[peer0].keys()
+            self.failUnless(isinstance(n, MutableFileNode))
+            self.failUnlessEqual(n.get_storage_index(), n._storage_index)
+            sb = self.nodemaker.storage_broker
+            peer0 = sorted(sb.get_all_serverids())[0]
+            shnums = self._storage._peers[peer0].keys()
             self.failUnlessEqual(len(shnums), 1)
         d.addCallback(_created)
         return d
 
+
+    def test_create_mdmf(self):
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            self.failUnlessEqual(n.get_storage_index(), n._storage_index)
+            sb = self.nodemaker.storage_broker
+            peer0 = sorted(sb.get_all_serverids())[0]
+            shnums = self._storage._peers[peer0].keys()
+            self.failUnlessEqual(len(shnums), 1)
+        d.addCallback(_created)
+        return d
+
+    def test_single_share(self):
+        # Make sure that we tolerate publishing a single share.
+        self.nodemaker.default_encoding_parameters['k'] = 1
+        self.nodemaker.default_encoding_parameters['happy'] = 1
+        self.nodemaker.default_encoding_parameters['n'] = 1
+        d = defer.succeed(None)
+        for v in (SDMF_VERSION, MDMF_VERSION):
+            d.addCallback(lambda ignored, v=v:
+                self.nodemaker.create_mutable_file(version=v))
+            def _created(n):
+                self.failUnless(isinstance(n, MutableFileNode))
+                self._node = n
+                return n
+            d.addCallback(_created)
+            d.addCallback(lambda n:
+                n.overwrite(MutableData("Contents" * 50000)))
+            d.addCallback(lambda ignored:
+                self._node.download_best_version())
+            d.addCallback(lambda contents:
+                self.failUnlessEqual(contents, "Contents" * 50000))
+        return d
+
+    def test_max_shares(self):
+        self.nodemaker.default_encoding_parameters['n'] = 255
+        d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            self.failUnlessEqual(n.get_storage_index(), n._storage_index)
+            sb = self.nodemaker.storage_broker
+            num_shares = sum([len(self._storage._peers[x].keys()) for x \
+                              in sb.get_all_serverids()])
+            self.failUnlessEqual(num_shares, 255)
+            self._node = n
+            return n
+        d.addCallback(_created)
+        # Now we upload some contents
+        d.addCallback(lambda n:
+            n.overwrite(MutableData("contents" * 50000)))
+        # ...then download contents
+        d.addCallback(lambda ignored:
+            self._node.download_best_version())
+        # ...and check to make sure everything went okay.
+        d.addCallback(lambda contents:
+            self.failUnlessEqual("contents" * 50000, contents))
+        return d
+
+    def test_max_shares_mdmf(self):
+        # Test how files behave when there are 255 shares.
+        self.nodemaker.default_encoding_parameters['n'] = 255
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            self.failUnlessEqual(n.get_storage_index(), n._storage_index)
+            sb = self.nodemaker.storage_broker
+            num_shares = sum([len(self._storage._peers[x].keys()) for x \
+                              in sb.get_all_serverids()])
+            self.failUnlessEqual(num_shares, 255)
+            self._node = n
+            return n
+        d.addCallback(_created)
+        d.addCallback(lambda n:
+            n.overwrite(MutableData("contents" * 50000)))
+        d.addCallback(lambda ignored:
+            self._node.download_best_version())
+        d.addCallback(lambda contents:
+            self.failUnlessEqual(contents, "contents" * 50000))
+        return d
+
+    def test_mdmf_filenode_cap(self):
+        # Test that an MDMF filenode, once created, returns an MDMF URI.
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            cap = n.get_cap()
+            self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
+            rcap = n.get_readcap()
+            self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
+            vcap = n.get_verify_cap()
+            self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
+        d.addCallback(_created)
+        return d
+
+
+    def test_create_from_mdmf_writecap(self):
+        # Test that the nodemaker is capable of creating an MDMF
+        # filenode given an MDMF cap.
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            s = n.get_uri()
+            self.failUnless(s.startswith("URI:MDMF"))
+            n2 = self.nodemaker.create_from_cap(s)
+            self.failUnless(isinstance(n2, MutableFileNode))
+            self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
+            self.failUnlessEqual(n.get_uri(), n2.get_uri())
+        d.addCallback(_created)
+        return d
+
+
+    def test_create_from_mdmf_readcap(self):
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            s = n.get_readonly_uri()
+            n2 = self.nodemaker.create_from_cap(s)
+            self.failUnless(isinstance(n2, MutableFileNode))
+
+            # Check that it's a readonly node
+            self.failUnless(n2.is_readonly())
+        d.addCallback(_created)
+        return d
+
+
+    def test_internal_version_from_cap(self):
+        # MutableFileNodes and MutableFileVersions have an internal
+        # switch that tells them whether they're dealing with an SDMF or
+        # MDMF mutable file when they start doing stuff. We want to make
+        # sure that this is set appropriately given an MDMF cap.
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            self.uri = n.get_uri()
+            self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
+
+            n2 = self.nodemaker.create_from_cap(self.uri)
+            self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
+        d.addCallback(_created)
+        return d
+
+
     def test_serialize(self):
-        n = MutableFileNode(self.client)
+        n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
         calls = []
         def _callback(*args, **kwargs):
             self.failUnlessEqual(args, (4,) )
@@ -292,24 +438,25 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
         return d
 
     def test_upload_and_download(self):
-        d = self.client.create_mutable_file()
+        d = self.nodemaker.create_mutable_file()
         def _created(n):
             d = defer.succeed(None)
             d.addCallback(lambda res: n.get_servermap(MODE_READ))
             d.addCallback(lambda smap: smap.dump(StringIO()))
             d.addCallback(lambda sio:
                           self.failUnless("3-of-10" in sio.getvalue()))
-            d.addCallback(lambda res: n.overwrite("contents 1"))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
-            d.addCallback(lambda res: n.overwrite("contents 2"))
+            d.addCallback(lambda res: n.get_size_of_best_version())
+            d.addCallback(lambda size:
+                          self.failUnlessEqual(size, len("contents 1")))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
-            d.addCallback(lambda res: n.download(download.Data()))
-            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
-            d.addCallback(lambda smap: n.upload("contents 3", smap))
+            d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
@@ -321,7 +468,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
             # mapupdate-to-retrieve data caching (i.e. make the shares larger
             # than the default readsize, which is 2000 bytes). A 15kB file
             # will have 5kB shares.
-            d.addCallback(lambda res: n.overwrite("large size file" * 1000))
+            d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res:
                           self.failUnlessEqual(res, "large size file" * 1000))
@@ -329,113 +476,323 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
         d.addCallback(_created)
         return d
 
+
+    def test_upload_and_download_mdmf(self):
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(n):
+            d = defer.succeed(None)
+            d.addCallback(lambda ignored:
+                n.get_servermap(MODE_READ))
+            def _then(servermap):
+                dumped = servermap.dump(StringIO())
+                self.failUnlessIn("3-of-10", dumped.getvalue())
+            d.addCallback(_then)
+            # Now overwrite the contents with some new contents. We want
+            # to make them big enough to force the file to be uploaded
+            # in more than one segment.
+            big_contents = "contents1" * 100000 # about 900 KiB
+            big_contents_uploadable = MutableData(big_contents)
+            d.addCallback(lambda ignored:
+                n.overwrite(big_contents_uploadable))
+            d.addCallback(lambda ignored:
+                n.download_best_version())
+            d.addCallback(lambda data:
+                self.failUnlessEqual(data, big_contents))
+            # Overwrite the contents again with some new contents. As
+            # before, they need to be big enough to force multiple
+            # segments, so that we make the downloader deal with
+            # multiple segments.
+            bigger_contents = "contents2" * 1000000 # about 9MiB
+            bigger_contents_uploadable = MutableData(bigger_contents)
+            d.addCallback(lambda ignored:
+                n.overwrite(bigger_contents_uploadable))
+            d.addCallback(lambda ignored:
+                n.download_best_version())
+            d.addCallback(lambda data:
+                self.failUnlessEqual(data, bigger_contents))
+            return d
+        d.addCallback(_created)
+        return d
+
+
+    def test_retrieve_producer_mdmf(self):
+        # We should make sure that the retriever is able to pause and stop
+        # correctly.
+        data = "contents1" * 100000
+        d = self.nodemaker.create_mutable_file(MutableData(data),
+                                               version=MDMF_VERSION)
+        d.addCallback(lambda node: node.get_best_mutable_version())
+        d.addCallback(self._test_retrieve_producer, "MDMF", data)
+        return d
+
+    # note: SDMF has only one big segment, so we can't use the usual
+    # after-the-first-write() trick to pause or stop the download.
+    # Disabled until we find a better approach.
+    def OFF_test_retrieve_producer_sdmf(self):
+        data = "contents1" * 100000
+        d = self.nodemaker.create_mutable_file(MutableData(data),
+                                               version=SDMF_VERSION)
+        d.addCallback(lambda node: node.get_best_mutable_version())
+        d.addCallback(self._test_retrieve_producer, "SDMF", data)
+        return d
+
+    def _test_retrieve_producer(self, version, kind, data):
+        # Now we'll retrieve it into a pausing consumer.
+        c = PausingConsumer()
+        d = version.read(c)
+        d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
+
+        c2 = PausingAndStoppingConsumer()
+        d.addCallback(lambda ign:
+                      self.shouldFail(DownloadStopped, kind+"_pause_stop",
+                                      "our Consumer called stopProducing()",
+                                      version.read, c2))
+
+        c3 = StoppingConsumer()
+        d.addCallback(lambda ign:
+                      self.shouldFail(DownloadStopped, kind+"_stop",
+                                      "our Consumer called stopProducing()",
+                                      version.read, c3))
+
+        c4 = ImmediatelyStoppingConsumer()
+        d.addCallback(lambda ign:
+                      self.shouldFail(DownloadStopped, kind+"_stop_imm",
+                                      "our Consumer called stopProducing()",
+                                      version.read, c4))
+
+        def _then(ign):
+            c5 = MemoryConsumer()
+            d1 = version.read(c5)
+            c5.producer.stopProducing()
+            return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
+                                   "our Consumer called stopProducing()",
+                                   lambda: d1)
+        d.addCallback(_then)
+        return d
+
+    def test_download_from_mdmf_cap(self):
+        # We should be able to download an MDMF file given its cap
+        d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
+        def _created(node):
+            self.uri = node.get_uri()
+            # also confirm that the cap has no extension fields
+            pieces = self.uri.split(":")
+            self.failUnlessEqual(len(pieces), 4)
+
+            return node.overwrite(MutableData("contents1" * 100000))
+        def _then(ignored):
+            node = self.nodemaker.create_from_cap(self.uri)
+            return node.download_best_version()
+        def _downloaded(data):
+            self.failUnlessEqual(data, "contents1" * 100000)
+        d.addCallback(_created)
+        d.addCallback(_then)
+        d.addCallback(_downloaded)
+        return d
+
+
+    def test_mdmf_write_count(self):
+        # Publishing an MDMF file should only cause one write for each
+        # share that is to be published. Otherwise, we introduce
+        # undesirable semantics that are a regression from SDMF
+        upload = MutableData("MDMF" * 100000) # about 400 KiB
+        d = self.nodemaker.create_mutable_file(upload,
+                                               version=MDMF_VERSION)
+        def _check_server_write_counts(ignored):
+            sb = self.nodemaker.storage_broker
+            for server in sb.servers.itervalues():
+                self.failUnlessEqual(server.get_rref().queries, 1)
+        d.addCallback(_check_server_write_counts)
+        return d
+
+
     def test_create_with_initial_contents(self):
-        d = self.client.create_mutable_file("contents 1")
+        upload1 = MutableData("contents 1")
+        d = self.nodemaker.create_mutable_file(upload1)
         def _created(n):
             d = n.download_best_version()
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
-            d.addCallback(lambda res: n.overwrite("contents 2"))
+            upload2 = MutableData("contents 2")
+            d.addCallback(lambda res: n.overwrite(upload2))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
             return d
         d.addCallback(_created)
         return d
 
+
+    def test_create_mdmf_with_initial_contents(self):
+        initial_contents = "foobarbaz" * 131072 # 900KiB
+        initial_contents_uploadable = MutableData(initial_contents)
+        d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
+                                               version=MDMF_VERSION)
+        def _created(n):
+            d = n.download_best_version()
+            d.addCallback(lambda data:
+                self.failUnlessEqual(data, initial_contents))
+            uploadable2 = MutableData(initial_contents + "foobarbaz")
+            d.addCallback(lambda ignored:
+                n.overwrite(uploadable2))
+            d.addCallback(lambda ignored:
+                n.download_best_version())
+            d.addCallback(lambda data:
+                self.failUnlessEqual(data, initial_contents +
+                                           "foobarbaz"))
+            return d
+        d.addCallback(_created)
+        return d
+
+    def test_create_with_initial_contents_function(self):
+        data = "initial contents"
+        def _make_contents(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            key = n.get_writekey()
+            self.failUnless(isinstance(key, str), key)
+            self.failUnlessEqual(len(key), 16) # AES key size
+            return MutableData(data)
+        d = self.nodemaker.create_mutable_file(_make_contents)
+        def _created(n):
+            return n.download_best_version()
+        d.addCallback(_created)
+        d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
+        return d
+
+
+    def test_create_mdmf_with_initial_contents_function(self):
+        data = "initial contents" * 100000
+        def _make_contents(n):
+            self.failUnless(isinstance(n, MutableFileNode))
+            key = n.get_writekey()
+            self.failUnless(isinstance(key, str), key)
+            self.failUnlessEqual(len(key), 16)
+            return MutableData(data)
+        d = self.nodemaker.create_mutable_file(_make_contents,
+                                               version=MDMF_VERSION)
+        d.addCallback(lambda n:
+            n.download_best_version())
+        d.addCallback(lambda data2:
+            self.failUnlessEqual(data2, data))
+        return d
+
+
     def test_create_with_too_large_contents(self):
-        BIG = "a" * (Publish.MAX_SEGMENT_SIZE+1)
-        d = self.shouldFail(FileTooLargeError, "too_large",
-                            "SDMF is limited to one segment, and %d > %d" %
-                            (len(BIG), Publish.MAX_SEGMENT_SIZE),
-                            self.client.create_mutable_file, BIG)
-        d.addCallback(lambda res: self.client.create_mutable_file("small"))
+        BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
+        BIG_uploadable = MutableData(BIG)
+        d = self.nodemaker.create_mutable_file(BIG_uploadable)
         def _created(n):
-            return self.shouldFail(FileTooLargeError, "too_large_2",
-                                   "SDMF is limited to one segment, and %d > %d" %
-                                   (len(BIG), Publish.MAX_SEGMENT_SIZE),
-                                   n.overwrite, BIG)
+            other_BIG_uploadable = MutableData(BIG)
+            d = n.overwrite(other_BIG_uploadable)
+            return d
         d.addCallback(_created)
         return d
 
-    def failUnlessCurrentSeqnumIs(self, n, expected_seqnum):
+    def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
         d = n.get_servermap(MODE_READ)
         d.addCallback(lambda servermap: servermap.best_recoverable_version())
         d.addCallback(lambda verinfo:
-                      self.failUnlessEqual(verinfo[0], expected_seqnum))
+                      self.failUnlessEqual(verinfo[0], expected_seqnum, which))
         return d
 
     def test_modify(self):
-        def _modifier(old_contents):
-            return old_contents + "line2"
-        def _non_modifier(old_contents):
+        def _modifier(old_contents, servermap, first_time):
+            new_contents = old_contents + "line2"
+            return new_contents
+        def _non_modifier(old_contents, servermap, first_time):
             return old_contents
-        def _none_modifier(old_contents):
+        def _none_modifier(old_contents, servermap, first_time):
             return None
-        def _error_modifier(old_contents):
+        def _error_modifier(old_contents, servermap, first_time):
             raise ValueError("oops")
-        def _toobig_modifier(old_contents):
-            return "b" * (Publish.MAX_SEGMENT_SIZE+1)
+        def _toobig_modifier(old_contents, servermap, first_time):
+            new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
+            return new_content
         calls = []
-        def _ucw_error_modifier(old_contents):
+        def _ucw_error_modifier(old_contents, servermap, first_time):
             # simulate an UncoordinatedWriteError once
             calls.append(1)
             if len(calls) <= 1:
                 raise UncoordinatedWriteError("simulated")
-            return old_contents + "line3"
+            new_contents = old_contents + "line3"
+            return new_contents
+        def _ucw_error_non_modifier(old_contents, servermap, first_time):
+            # simulate an UncoordinatedWriteError once, and don't actually
+            # modify the contents on subsequent invocations
+            calls.append(1)
+            if len(calls) <= 1:
+                raise UncoordinatedWriteError("simulated")
+            return old_contents
 
-        d = self.client.create_mutable_file("line1")
+        initial_contents = "line1"
+        d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
         def _created(n):
             d = n.modify(_modifier)
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
 
             d.addCallback(lambda res: n.modify(_non_modifier))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
 
             d.addCallback(lambda res: n.modify(_none_modifier))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
 
             d.addCallback(lambda res:
                           self.shouldFail(ValueError, "error_modifier", None,
                                           n.modify, _error_modifier))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
+
 
-            d.addCallback(lambda res:
-                          self.shouldFail(FileTooLargeError, "toobig_modifier",
-                                          "SDMF is limited to one segment",
-                                          n.modify, _toobig_modifier))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
 
             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res,
                                                            "line1line2line3"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
 
+            def _reset_ucw_error_modifier(res):
+                calls[:] = []
+                return res
+            d.addCallback(_reset_ucw_error_modifier)
+
+            # in practice, this n.modify call should publish twice: the first
+            # one gets a UCWE, the second does not. But our test jig (in
+            # which the modifier raises the UCWE) skips over the first one,
+            # so in this test there will be only one publish, and the seqnum
+            # will only be one larger than the previous test, not two (i.e. 4
+            # instead of 5).
+            d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
+            d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
+            d.addCallback(lambda res: n.download_best_version())
+            d.addCallback(lambda res: self.failUnlessEqual(res,
+                                                           "line1line2line3"))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
+            d.addCallback(lambda res: n.modify(_toobig_modifier))
             return d
         d.addCallback(_created)
         return d
 
+
     def test_modify_backoffer(self):
-        def _modifier(old_contents):
+        def _modifier(old_contents, servermap, first_time):
             return old_contents + "line2"
         calls = []
-        def _ucw_error_modifier(old_contents):
+        def _ucw_error_modifier(old_contents, servermap, first_time):
             # simulate an UncoordinatedWriteError once
             calls.append(1)
             if len(calls) <= 1:
                 raise UncoordinatedWriteError("simulated")
             return old_contents + "line3"
-        def _always_ucw_error_modifier(old_contents):
+        def _always_ucw_error_modifier(old_contents, servermap, first_time):
             raise UncoordinatedWriteError("simulated")
         def _backoff_stopper(node, f):
             return f
@@ -449,12 +806,12 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
         giveuper._delay = 0.1
         giveuper.factor = 1
 
-        d = self.client.create_mutable_file("line1")
+        d = self.nodemaker.create_mutable_file(MutableData("line1"))
         def _created(n):
             d = n.modify(_modifier)
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
 
             d.addCallback(lambda res:
                           self.shouldFail(UncoordinatedWriteError,
@@ -463,7 +820,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
                                           _backoff_stopper))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
 
             def _reset_ucw_error_modifier(res):
                 calls[:] = []
@@ -474,7 +831,7 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res,
                                                            "line1line2line3"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
 
             d.addCallback(lambda res:
                           self.shouldFail(UncoordinatedWriteError,
@@ -484,32 +841,30 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res,
                                                            "line1line2line3"))
-            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3))
+            d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
 
             return d
         d.addCallback(_created)
         return d
 
     def test_upload_and_download_full_size_keys(self):
-        self.client.mutable_file_node_class = MutableFileNode
-        d = self.client.create_mutable_file()
+        self.nodemaker.key_generator = client.KeyGenerator()
+        d = self.nodemaker.create_mutable_file()
         def _created(n):
             d = defer.succeed(None)
             d.addCallback(lambda res: n.get_servermap(MODE_READ))
             d.addCallback(lambda smap: smap.dump(StringIO()))
             d.addCallback(lambda sio:
                           self.failUnless("3-of-10" in sio.getvalue()))
-            d.addCallback(lambda res: n.overwrite("contents 1"))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
-            d.addCallback(lambda res: n.overwrite("contents 2"))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
-            d.addCallback(lambda res: n.download(download.Data()))
-            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
-            d.addCallback(lambda smap: n.upload("contents 3", smap))
+            d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
             d.addCallback(lambda res: n.download_best_version())
             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
@@ -522,121 +877,167 @@ class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
         return d
 
 
-class MakeShares(unittest.TestCase):
-    def test_encrypt(self):
-        c = FakeClient()
-        fn = FastMutableFileNode(c)
-        CONTENTS = "some initial contents"
-        d = fn.create(CONTENTS)
-        def _created(res):
-            p = Publish(fn, None)
-            p.salt = "SALT" * 4
-            p.readkey = "\x00" * 16
-            p.newdata = CONTENTS
-            p.required_shares = 3
-            p.total_shares = 10
-            p.setup_encoding_parameters()
-            return p._encrypt_and_encode()
+    def test_size_after_servermap_update(self):
+        # a mutable file node should have something to say about how big
+        # it is after a servermap update is performed, since this tells
+        # us how large the best version of that mutable file is.
+        d = self.nodemaker.create_mutable_file()
+        def _created(n):
+            self.n = n
+            return n.get_servermap(MODE_READ)
         d.addCallback(_created)
-        def _done(shares_and_shareids):
-            (shares, share_ids) = shares_and_shareids
-            self.failUnlessEqual(len(shares), 10)
-            for sh in shares:
-                self.failUnless(isinstance(sh, str))
-                self.failUnlessEqual(len(sh), 7)
-            self.failUnlessEqual(len(share_ids), 10)
-        d.addCallback(_done)
-        return d
-
-    def test_generate(self):
-        c = FakeClient()
-        fn = FastMutableFileNode(c)
-        CONTENTS = "some initial contents"
-        d = fn.create(CONTENTS)
-        def _created(res):
-            p = Publish(fn, None)
-            self._p = p
-            p.newdata = CONTENTS
-            p.required_shares = 3
-            p.total_shares = 10
-            p.setup_encoding_parameters()
-            p._new_seqnum = 3
-            p.salt = "SALT" * 4
-            # make some fake shares
-            shares_and_ids = ( ["%07d" % i for i in range(10)], range(10) )
-            p._privkey = fn.get_privkey()
-            p._encprivkey = fn.get_encprivkey()
-            p._pubkey = fn.get_pubkey()
-            return p._generate_shares(shares_and_ids)
+        d.addCallback(lambda ignored:
+            self.failUnlessEqual(self.n.get_size(), 0))
+        d.addCallback(lambda ignored:
+            self.n.overwrite(MutableData("foobarbaz")))
+        d.addCallback(lambda ignored:
+            self.failUnlessEqual(self.n.get_size(), 9))
+        d.addCallback(lambda ignored:
+            self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
         d.addCallback(_created)
-        def _generated(res):
-            p = self._p
-            final_shares = p.shares
-            root_hash = p.root_hash
-            self.failUnlessEqual(len(root_hash), 32)
-            self.failUnless(isinstance(final_shares, dict))
-            self.failUnlessEqual(len(final_shares), 10)
-            self.failUnlessEqual(sorted(final_shares.keys()), range(10))
-            for i,sh in final_shares.items():
-                self.failUnless(isinstance(sh, str))
-                # feed the share through the unpacker as a sanity-check
-                pieces = unpack_share(sh)
-                (u_seqnum, u_root_hash, IV, k, N, segsize, datalen,
-                 pubkey, signature, share_hash_chain, block_hash_tree,
-                 share_data, enc_privkey) = pieces
-                self.failUnlessEqual(u_seqnum, 3)
-                self.failUnlessEqual(u_root_hash, root_hash)
-                self.failUnlessEqual(k, 3)
-                self.failUnlessEqual(N, 10)
-                self.failUnlessEqual(segsize, 21)
-                self.failUnlessEqual(datalen, len(CONTENTS))
-                self.failUnlessEqual(pubkey, p._pubkey.serialize())
-                sig_material = struct.pack(">BQ32s16s BBQQ",
-                                           0, p._new_seqnum, root_hash, IV,
-                                           k, N, segsize, datalen)
-                self.failUnless(p._pubkey.verify(sig_material, signature))
-                #self.failUnlessEqual(signature, p._privkey.sign(sig_material))
-                self.failUnless(isinstance(share_hash_chain, dict))
-                self.failUnlessEqual(len(share_hash_chain), 4) # ln2(10)++
-                for shnum,share_hash in share_hash_chain.items():
-                    self.failUnless(isinstance(shnum, int))
-                    self.failUnless(isinstance(share_hash, str))
-                    self.failUnlessEqual(len(share_hash), 32)
-                self.failUnless(isinstance(block_hash_tree, list))
-                self.failUnlessEqual(len(block_hash_tree), 1) # very small tree
-                self.failUnlessEqual(IV, "SALT"*4)
-                self.failUnlessEqual(len(share_data), len("%07d" % 1))
-                self.failUnlessEqual(enc_privkey, fn.get_encprivkey())
-        d.addCallback(_generated)
-        return d
-
-    # TODO: when we publish to 20 peers, we should get one share per peer on 10
-    # when we publish to 3 peers, we should get either 3 or 4 shares per peer
-    # when we publish to zero peers, we should get a NotEnoughSharesError
-
-class Servermap(unittest.TestCase):
-    def setUp(self):
+        d.addCallback(lambda ignored:
+            self.failUnlessEqual(self.n.get_size(), 9))
+        return d
+
+
+class PublishMixin:
+    def publish_one(self):
         # publish a file and create shares, which can then be manipulated
         # later.
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file("New contents go here")
+        self.CONTENTS = "New contents go here" * 1000
+        self.uploadable = MutableData(self.CONTENTS)
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage)
+        self._storage_broker = self._nodemaker.storage_broker
+        d = self._nodemaker.create_mutable_file(self.uploadable)
+        def _created(node):
+            self._fn = node
+            self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
+        d.addCallback(_created)
+        return d
+
+    def publish_mdmf(self):
+        # like publish_one, except that the result is guaranteed to be
+        # an MDMF file.
+        # self.CONTENTS should have more than one segment.
+        self.CONTENTS = "This is an MDMF file" * 100000
+        self.uploadable = MutableData(self.CONTENTS)
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage)
+        self._storage_broker = self._nodemaker.storage_broker
+        d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
+        def _created(node):
+            self._fn = node
+            self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
+        d.addCallback(_created)
+        return d
+
+
+    def publish_sdmf(self):
+        # like publish_one, except that the result is guaranteed to be
+        # an SDMF file
+        self.CONTENTS = "This is an SDMF file" * 1000
+        self.uploadable = MutableData(self.CONTENTS)
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage)
+        self._storage_broker = self._nodemaker.storage_broker
+        d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
+        def _created(node):
+            self._fn = node
+            self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
+        d.addCallback(_created)
+        return d
+
+    def publish_empty_sdmf(self):
+        self.CONTENTS = ""
+        self.uploadable = MutableData(self.CONTENTS)
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage, keysize=None)
+        self._storage_broker = self._nodemaker.storage_broker
+        d = self._nodemaker.create_mutable_file(self.uploadable,
+                                                version=SDMF_VERSION)
+        def _created(node):
+            self._fn = node
+            self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
+        d.addCallback(_created)
+        return d
+
+
+    def publish_multiple(self, version=0):
+        self.CONTENTS = ["Contents 0",
+                         "Contents 1",
+                         "Contents 2",
+                         "Contents 3a",
+                         "Contents 3b"]
+        self.uploadables = [MutableData(d) for d in self.CONTENTS]
+        self._copied_shares = {}
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage)
+        d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
         def _created(node):
             self._fn = node
-            self._fn2 = self._client.create_node_from_uri(node.get_uri())
+            # now create multiple versions of the same file, and accumulate
+            # their shares, so we can mix and match them later.
+            d = defer.succeed(None)
+            d.addCallback(self._copy_shares, 0)
+            d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
+            d.addCallback(self._copy_shares, 1)
+            d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
+            d.addCallback(self._copy_shares, 2)
+            d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
+            d.addCallback(self._copy_shares, 3)
+            # now we replace all the shares with version s3, and upload a new
+            # version to get s4b.
+            rollback = dict([(i,2) for i in range(10)])
+            d.addCallback(lambda res: self._set_versions(rollback))
+            d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
+            d.addCallback(self._copy_shares, 4)
+            # we leave the storage in state 4
+            return d
         d.addCallback(_created)
         return d
 
-    def make_servermap(self, mode=MODE_CHECK, fn=None):
+
+    def _copy_shares(self, ignored, index):
+        shares = self._storage._peers
+        # we need a deep copy
+        new_shares = {}
+        for peerid in shares:
+            new_shares[peerid] = {}
+            for shnum in shares[peerid]:
+                new_shares[peerid][shnum] = shares[peerid][shnum]
+        self._copied_shares[index] = new_shares
+
+    def _set_versions(self, versionmap):
+        # versionmap maps shnums to which version (0,1,2,3,4) we want the
+        # share to be at. Any shnum which is left out of the map will stay at
+        # its current version.
+        shares = self._storage._peers
+        oldshares = self._copied_shares
+        for peerid in shares:
+            for shnum in shares[peerid]:
+                if shnum in versionmap:
+                    index = versionmap[shnum]
+                    shares[peerid][shnum] = oldshares[index][peerid][shnum]
+
+class Servermap(unittest.TestCase, PublishMixin):
+    def setUp(self):
+        return self.publish_one()
+
+    def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
+                       update_range=None):
         if fn is None:
             fn = self._fn
-        smu = ServermapUpdater(fn, ServerMap(), mode)
+        if sb is None:
+            sb = self._storage_broker
+        smu = ServermapUpdater(fn, sb, Monitor(),
+                               ServerMap(), mode, update_range=update_range)
         d = smu.update()
         return d
 
     def update_servermap(self, oldmap, mode=MODE_CHECK):
-        smu = ServermapUpdater(self._fn, oldmap, mode)
+        smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
+                               oldmap, mode)
         d = smu.update()
         return d
 
@@ -648,10 +1049,10 @@ class Servermap(unittest.TestCase):
         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
         self.failUnlessEqual(len(sm.shares_available()), 1)
         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
-        shnum, peerids = sm.make_sharemap().items()[0]
-        peerid = list(peerids)[0]
-        self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
-        self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
+        shnum, servers = sm.make_sharemap().items()[0]
+        server = list(servers)[0]
+        self.failUnlessEqual(sm.version_on_server(server, shnum), best)
+        self.failUnlessEqual(sm.version_on_server(server, 666), None)
         return sm
 
     def test_basic(self):
@@ -664,7 +1065,7 @@ class Servermap(unittest.TestCase):
         d.addCallback(lambda res: ms(mode=MODE_WRITE))
         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
         d.addCallback(lambda res: ms(mode=MODE_READ))
-        # this more stops at k+epsilon, and epsilon=k, so 6 shares
+        # this mode stops at k+epsilon, and epsilon=k, so 6 shares
         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
         # this mode stops at 'k' shares
@@ -696,18 +1097,19 @@ class Servermap(unittest.TestCase):
         # create a new file, which is large enough to knock the privkey out
         # of the early part of the file
         LARGE = "These are Larger contents" * 200 # about 5KB
-        d.addCallback(lambda res: self._client.create_mutable_file(LARGE))
+        LARGE_uploadable = MutableData(LARGE)
+        d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
         def _created(large_fn):
-            large_fn2 = self._client.create_node_from_uri(large_fn.get_uri())
+            large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
             return self.make_servermap(MODE_WRITE, large_fn2)
         d.addCallback(_created)
         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
         return d
 
+
     def test_mark_bad(self):
         d = defer.succeed(None)
         ms = self.make_servermap
-        us = self.update_servermap
 
         d.addCallback(lambda res: ms(mode=MODE_READ))
         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
@@ -720,10 +1122,10 @@ class Servermap(unittest.TestCase):
             # mark the first 5 shares as corrupt, then update the servermap.
             # The map should not have the marked shares it in any more, and
             # new shares should be found to replace the missing ones.
-            for (shnum, peerid, timestamp) in shares:
+            for (shnum, server, timestamp) in shares:
                 if shnum < 5:
-                    self._corrupted.add( (peerid, shnum) )
-                    sm.mark_bad_share(peerid, shnum, "")
+                    self._corrupted.add( (server, shnum) )
+                    sm.mark_bad_share(server, shnum, "")
             return self.update_servermap(sm, MODE_WRITE)
         d.addCallback(_made_map)
         def _check_map(sm):
@@ -731,10 +1133,10 @@ class Servermap(unittest.TestCase):
             v = sm.best_recoverable_version()
             vm = sm.make_versionmap()
             shares = list(vm[v])
-            for (peerid, shnum) in self._corrupted:
-                peer_shares = sm.shares_on_peer(peerid)
-                self.failIf(shnum in peer_shares,
-                            "%d was in %s" % (shnum, peer_shares))
+            for (server, shnum) in self._corrupted:
+                server_shares = sm.debug_shares_on_server(server)
+                self.failIf(shnum in server_shares,
+                            "%d was in %s" % (shnum, server_shares))
             self.failUnlessEqual(len(shares), 5)
         d.addCallback(_check_map)
         return d
@@ -747,10 +1149,10 @@ class Servermap(unittest.TestCase):
         self.failUnlessEqual(len(sm.shares_available()), 0)
 
     def test_no_shares(self):
-        self._client._storage._peers = {} # delete all shares
+        self._storage._peers = {} # delete all shares
         ms = self.make_servermap
         d = defer.succeed(None)
-
+#
         d.addCallback(lambda res: ms(mode=MODE_CHECK))
         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
 
@@ -775,7 +1177,7 @@ class Servermap(unittest.TestCase):
         return sm
 
     def test_not_quite_enough_shares(self):
-        s = self._client._storage
+        s = self._storage
         ms = self.make_servermap
         num_shares = len(s._peers)
         for peerid in s._peers:
@@ -802,25 +1204,60 @@ class Servermap(unittest.TestCase):
         return d
 
 
+    def test_servermapupdater_finds_mdmf_files(self):
+        # setUp already published an MDMF file for us. We just need to
+        # make sure that when we run the ServermapUpdater, the file is
+        # reported to have one recoverable version.
+        d = defer.succeed(None)
+        d.addCallback(lambda ignored:
+            self.publish_mdmf())
+        d.addCallback(lambda ignored:
+            self.make_servermap(mode=MODE_CHECK))
+        # Calling make_servermap also updates the servermap in the mode
+        # that we specify, so we just need to see what it says.
+        def _check_servermap(sm):
+            self.failUnlessEqual(len(sm.recoverable_versions()), 1)
+        d.addCallback(_check_servermap)
+        return d
 
-class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
-    def setUp(self):
-        # publish a file and create shares, which can then be manipulated
-        # later.
-        self.CONTENTS = "New contents go here" * 1000
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file(self.CONTENTS)
-        def _created(node):
-            self._fn = node
-        d.addCallback(_created)
+
+    def test_fetch_update(self):
+        d = defer.succeed(None)
+        d.addCallback(lambda ignored:
+            self.publish_mdmf())
+        d.addCallback(lambda ignored:
+            self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
+        def _check_servermap(sm):
+            # 10 shares
+            self.failUnlessEqual(len(sm.update_data), 10)
+            # one version
+            for data in sm.update_data.itervalues():
+                self.failUnlessEqual(len(data), 1)
+        d.addCallback(_check_servermap)
         return d
 
-    def make_servermap(self, mode=MODE_READ, oldmap=None):
+
+    def test_servermapupdater_finds_sdmf_files(self):
+        d = defer.succeed(None)
+        d.addCallback(lambda ignored:
+            self.publish_sdmf())
+        d.addCallback(lambda ignored:
+            self.make_servermap(mode=MODE_CHECK))
+        d.addCallback(lambda servermap:
+            self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
+        return d
+
+
+class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
+    def setUp(self):
+        return self.publish_one()
+
+    def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
         if oldmap is None:
             oldmap = ServerMap()
-        smu = ServermapUpdater(self._fn, oldmap, mode)
+        if sb is None:
+            sb = self._storage_broker
+        smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
         d = smu.update()
         return d
 
@@ -849,8 +1286,12 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
     def do_download(self, servermap, version=None):
         if version is None:
             version = servermap.best_recoverable_version()
-        r = Retrieve(self._fn, servermap, version)
-        return r.download()
+        r = Retrieve(self._fn, self._storage_broker, servermap, version)
+        c = consumer.MemoryConsumer()
+        d = r.download(consumer=c)
+        d.addCallback(lambda mc: "".join(mc.chunks))
+        return d
+
 
     def test_basic(self):
         d = self.make_servermap()
@@ -886,30 +1327,46 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
                 shares.clear()
             d1 = self.shouldFail(NotEnoughSharesError,
                                  "test_all_shares_vanished",
-                                 "ran out of peers",
+                                 "ran out of servers",
                                  self.do_download, servermap)
             return d1
         d.addCallback(_remove_shares)
         return d
 
+    def test_all_but_two_shares_vanished_updated_servermap(self):
+        # tests error reporting for ticket #1742
+        d = self.make_servermap()
+        def _remove_shares(servermap):
+            self._version = servermap.best_recoverable_version()
+            for shares in self._storage._peers.values()[2:]:
+                shares.clear()
+            return self.make_servermap(servermap)
+        d.addCallback(_remove_shares)
+        def _check(updated_servermap):
+            d1 = self.shouldFail(NotEnoughSharesError,
+                                 "test_all_but_two_shares_vanished_updated_servermap",
+                                 "ran out of servers",
+                                 self.do_download, updated_servermap, version=self._version)
+            return d1
+        d.addCallback(_check)
+        return d
+
     def test_no_servers(self):
-        c2 = FakeClient(0)
-        self._fn._client = c2
+        sb2 = make_storagebroker(num_peers=0)
         # if there are no servers, then a MODE_READ servermap should come
         # back empty
-        d = self.make_servermap()
+        d = self.make_servermap(sb=sb2)
         def _check_servermap(servermap):
             self.failUnlessEqual(servermap.best_recoverable_version(), None)
             self.failIf(servermap.recoverable_versions())
             self.failIf(servermap.unrecoverable_versions())
-            self.failIf(servermap.all_peers())
+            self.failIf(servermap.all_servers())
         d.addCallback(_check_servermap)
         return d
-    test_no_servers.timeout = 15
 
     def test_no_servers_download(self):
-        c2 = FakeClient(0)
-        self._fn._client = c2
+        sb2 = make_storagebroker(num_peers=0)
+        self._fn._storage_broker = sb2
         d = self.shouldFail(UnrecoverableFileError,
                             "test_no_servers_download",
                             "no recoverable versions",
@@ -919,18 +1376,20 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
             # anybody should not prevent a subsequent download from working.
             # This isn't quite the webapi-driven test that #463 wants, but it
             # should be close enough.
-            self._fn._client = self._client
+            self._fn._storage_broker = self._storage_broker
             return self._fn.download_best_version()
         def _retrieved(new_contents):
             self.failUnlessEqual(new_contents, self.CONTENTS)
         d.addCallback(_restore)
         d.addCallback(_retrieved)
         return d
-    test_no_servers_download.timeout = 15
+
 
     def _test_corrupt_all(self, offset, substring,
-                          should_succeed=False, corrupt_early=True,
-                          failure_checker=None):
+                          should_succeed=False,
+                          corrupt_early=True,
+                          failure_checker=None,
+                          fetch_privkey=False):
         d = defer.succeed(None)
         if corrupt_early:
             d.addCallback(corrupt, self._storage, offset)
@@ -943,18 +1402,21 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
                 # no recoverable versions == not succeeding. The problem
                 # should be noted in the servermap's list of problems.
                 if substring:
-                    allproblems = [str(f) for f in servermap.problems]
-                    self.failUnless(substring in "".join(allproblems))
+                    allproblems = [str(f) for f in servermap.get_problems()]
+                    self.failUnlessIn(substring, "".join(allproblems))
                 return servermap
             if should_succeed:
-                d1 = self._fn.download_version(servermap, ver)
+                d1 = self._fn.download_version(servermap, ver,
+                                               fetch_privkey)
                 d1.addCallback(lambda new_contents:
                                self.failUnlessEqual(new_contents, self.CONTENTS))
             else:
                 d1 = self.shouldFail(NotEnoughSharesError,
                                      "_corrupt_all(offset=%s)" % (offset,),
                                      substring,
-                                     self._fn.download_version, servermap, ver)
+                                     self._fn.download_version, servermap,
+                                                                ver,
+                                                                fetch_privkey)
             if failure_checker:
                 d1.addCallback(failure_checker)
             d1.addCallback(lambda res: servermap)
@@ -963,14 +1425,14 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
         return d
 
     def test_corrupt_all_verbyte(self):
-        # when the version byte is not 0, we hit an assertion error in
-        # unpack_share().
-        d = self._test_corrupt_all(0, "AssertionError")
+        # when the version byte is not 0 or 1, we hit an UnknownVersionError
+        # error in unpack_share().
+        d = self._test_corrupt_all(0, "UnknownVersionError")
         def _check_servermap(servermap):
             # and the dump should mention the problems
             s = StringIO()
             dump = servermap.dump(s).getvalue()
-            self.failUnless("10 PROBLEMS" in dump, dump)
+            self.failUnless("30 PROBLEMS" in dump, dump)
         d.addCallback(_check_servermap)
         return d
 
@@ -1040,23 +1502,31 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
 
 
-    def test_corrupt_all_seqnum_late(self):
-        # corrupting the seqnum between mapupdate and retrieve should result
-        # in NotEnoughSharesError, since each share will look invalid
-        def _check(res):
-            f = res[0]
-            self.failUnless(f.check(NotEnoughSharesError))
-            self.failUnless("someone wrote to the data since we read the servermap" in str(f))
-        return self._test_corrupt_all(1, "ran out of peers",
+    def test_corrupt_all_encprivkey_late(self):
+        # this should work for the same reason as above, but we corrupt
+        # after the servermap update to exercise the error handling
+        # code.
+        # We need to remove the privkey from the node, or the retrieve
+        # process won't know to update it.
+        self._fn._privkey = None
+        return self._test_corrupt_all("enc_privkey",
+                                      None, # this shouldn't fail
+                                      should_succeed=True,
                                       corrupt_early=False,
-                                      failure_checker=_check)
+                                      fetch_privkey=True)
+
 
-    def test_corrupt_all_block_hash_tree_late(self):
+    # disabled until retrieve tests checkstring on each blockfetch. I didn't
+    # just use a .todo because the failing-but-ignored test emits about 30kB
+    # of noise.
+    def OFF_test_corrupt_all_seqnum_late(self):
+        # corrupting the seqnum between mapupdate and retrieve should result
+        # in NotEnoughSharesError, since each share will look invalid
         def _check(res):
             f = res[0]
             self.failUnless(f.check(NotEnoughSharesError))
-        return self._test_corrupt_all("block_hash_tree",
-                                      "block hash tree failure",
+            self.failUnless("uncoordinated write" in str(f))
+        return self._test_corrupt_all(1, "ran out of servers",
                                       corrupt_early=False,
                                       failure_checker=_check)
 
@@ -1085,132 +1555,260 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin):
                       shnums_to_corrupt=range(0, N-k))
         d.addCallback(lambda res: self.make_servermap())
         def _do_retrieve(servermap):
-            self.failUnless(servermap.problems)
+            self.failUnless(servermap.get_problems())
             self.failUnless("pubkey doesn't match fingerprint"
-                            in str(servermap.problems[0]))
+                            in str(servermap.get_problems()[0]))
             ver = servermap.best_recoverable_version()
-            r = Retrieve(self._fn, servermap, ver)
-            return r.download()
+            r = Retrieve(self._fn, self._storage_broker, servermap, ver)
+            c = consumer.MemoryConsumer()
+            return r.download(c)
         d.addCallback(_do_retrieve)
+        d.addCallback(lambda mc: "".join(mc.chunks))
         d.addCallback(lambda new_contents:
                       self.failUnlessEqual(new_contents, self.CONTENTS))
         return d
 
-    def test_corrupt_some(self):
-        # corrupt the data of first five shares (so the servermap thinks
-        # they're good but retrieve marks them as bad), so that the
-        # MODE_READ set of 6 will be insufficient, forcing node.download to
-        # retry with more servers.
-        corrupt(None, self._storage, "share_data", range(5))
-        d = self.make_servermap()
+
+    def _test_corrupt_some(self, offset, mdmf=False):
+        if mdmf:
+            d = self.publish_mdmf()
+        else:
+            d = defer.succeed(None)
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, offset, range(5)))
+        d.addCallback(lambda ignored:
+            self.make_servermap())
         def _do_retrieve(servermap):
             ver = servermap.best_recoverable_version()
             self.failUnless(ver)
             return self._fn.download_best_version()
         d.addCallback(_do_retrieve)
         d.addCallback(lambda new_contents:
-                      self.failUnlessEqual(new_contents, self.CONTENTS))
+            self.failUnlessEqual(new_contents, self.CONTENTS))
         return d
 
+
+    def test_corrupt_some(self):
+        # corrupt the data of first five shares (so the servermap thinks
+        # they're good but retrieve marks them as bad), so that the
+        # MODE_READ set of 6 will be insufficient, forcing node.download to
+        # retry with more servers.
+        return self._test_corrupt_some("share_data")
+
+
     def test_download_fails(self):
-        corrupt(None, self._storage, "signature")
-        d = self.shouldFail(UnrecoverableFileError, "test_download_anyway",
+        d = corrupt(None, self._storage, "signature")
+        d.addCallback(lambda ignored:
+            self.shouldFail(UnrecoverableFileError, "test_download_anyway",
                             "no recoverable versions",
-                            self._fn.download_best_version)
+                            self._fn.download_best_version))
+        return d
+
+
+
+    def test_corrupt_mdmf_block_hash_tree(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            self._test_corrupt_all(("block_hash_tree", 12 * 32),
+                                   "block hash tree failure",
+                                   corrupt_early=True,
+                                   should_succeed=False))
+        return d
+
+
+    def test_corrupt_mdmf_block_hash_tree_late(self):
+        # Note - there is no SDMF counterpart to this test, as the SDMF
+        # files are guaranteed to have exactly one block, and therefore
+        # the block hash tree fits within the initial read (#1240).
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            self._test_corrupt_all(("block_hash_tree", 12 * 32),
+                                   "block hash tree failure",
+                                   corrupt_early=False,
+                                   should_succeed=False))
+        return d
+
+
+    def test_corrupt_mdmf_share_data(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            # TODO: Find out what the block size is and corrupt a
+            # specific block, rather than just guessing.
+            self._test_corrupt_all(("share_data", 12 * 40),
+                                    "block hash tree failure",
+                                    corrupt_early=True,
+                                    should_succeed=False))
         return d
 
 
+    def test_corrupt_some_mdmf(self):
+        return self._test_corrupt_some(("share_data", 12 * 40),
+                                       mdmf=True)
+
+
 class CheckerMixin:
     def check_good(self, r, where):
-        self.failUnless(r.healthy, where)
-        self.failIf(r.problems, where)
+        self.failUnless(r.is_healthy(), where)
         return r
 
     def check_bad(self, r, where):
-        self.failIf(r.healthy, where)
+        self.failIf(r.is_healthy(), where)
         return r
 
     def check_expected_failure(self, r, expected_exception, substring, where):
-        for (peerid, storage_index, shnum, f) in r.problems:
+        for (peerid, storage_index, shnum, f) in r.get_share_problems():
             if f.check(expected_exception):
                 self.failUnless(substring in str(f),
                                 "%s: substring '%s' not in '%s'" %
                                 (where, substring, str(f)))
                 return
         self.fail("%s: didn't see expected exception %s in problems %s" %
-                  (where, expected_exception, r.problems))
+                  (where, expected_exception, r.get_share_problems()))
 
 
-class Checker(unittest.TestCase, CheckerMixin):
+class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
     def setUp(self):
-        # publish a file and create shares, which can then be manipulated
-        # later.
-        self.CONTENTS = "New contents go here" * 1000
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file(self.CONTENTS)
-        def _created(node):
-            self._fn = node
-        d.addCallback(_created)
-        return d
+        return self.publish_one()
 
 
     def test_check_good(self):
-        d = self._fn.check()
+        d = self._fn.check(Monitor())
         d.addCallback(self.check_good, "test_check_good")
         return d
 
+    def test_check_mdmf_good(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
+        d.addCallback(self.check_good, "test_check_mdmf_good")
+        return d
+
     def test_check_no_shares(self):
         for shares in self._storage._peers.values():
             shares.clear()
-        d = self._fn.check()
+        d = self._fn.check(Monitor())
         d.addCallback(self.check_bad, "test_check_no_shares")
         return d
 
+    def test_check_mdmf_no_shares(self):
+        d = self.publish_mdmf()
+        def _then(ignored):
+            for share in self._storage._peers.values():
+                share.clear()
+        d.addCallback(_then)
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
+        d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
+        return d
+
     def test_check_not_enough_shares(self):
         for shares in self._storage._peers.values():
             for shnum in shares.keys():
                 if shnum > 0:
                     del shares[shnum]
-        d = self._fn.check()
+        d = self._fn.check(Monitor())
         d.addCallback(self.check_bad, "test_check_not_enough_shares")
         return d
 
+    def test_check_mdmf_not_enough_shares(self):
+        d = self.publish_mdmf()
+        def _then(ignored):
+            for shares in self._storage._peers.values():
+                for shnum in shares.keys():
+                    if shnum > 0:
+                        del shares[shnum]
+        d.addCallback(_then)
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
+        d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
+        return d
+
+
     def test_check_all_bad_sig(self):
-        corrupt(None, self._storage, 1) # bad sig
-        d = self._fn.check()
+        d = corrupt(None, self._storage, 1) # bad sig
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
         d.addCallback(self.check_bad, "test_check_all_bad_sig")
         return d
 
+    def test_check_mdmf_all_bad_sig(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, 1))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
+        d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
+        return d
+
+    def test_verify_mdmf_all_bad_sharedata(self):
+        d = self.publish_mdmf()
+        # On 8 of the shares, corrupt the beginning of the share data.
+        # The signature check during the servermap update won't catch this.
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, "share_data", range(8)))
+        # On 2 of the shares, corrupt the end of the share data.
+        # The signature check during the servermap update won't catch
+        # this either, and the retrieval process will have to process
+        # all of the segments before it notices.
+        d.addCallback(lambda ignored:
+            # the block hash tree comes right after the share data, so if we
+            # corrupt a little before the block hash tree, we'll corrupt in the
+            # last block of each share.
+            corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
+        # The verifier should flag the file as unhealthy, and should
+        # list all 10 shares as bad.
+        d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
+        def _check_num_bad(r):
+            self.failIf(r.is_recoverable())
+            smap = r.get_servermap()
+            self.failUnlessEqual(len(smap.get_bad_shares()), 10)
+        d.addCallback(_check_num_bad)
+        return d
+
     def test_check_all_bad_blocks(self):
-        corrupt(None, self._storage, "share_data", [9]) # bad blocks
+        d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
         # the Checker won't notice this.. it doesn't look at actual data
-        d = self._fn.check()
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
         d.addCallback(self.check_good, "test_check_all_bad_blocks")
         return d
 
+
+    def test_check_mdmf_all_bad_blocks(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, "share_data"))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor()))
+        d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
+        return d
+
     def test_verify_good(self):
-        d = self._fn.check(verify=True)
+        d = self._fn.check(Monitor(), verify=True)
         d.addCallback(self.check_good, "test_verify_good")
         return d
 
     def test_verify_all_bad_sig(self):
-        corrupt(None, self._storage, 1) # bad sig
-        d = self._fn.check(verify=True)
+        d = corrupt(None, self._storage, 1) # bad sig
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
         return d
 
     def test_verify_one_bad_sig(self):
-        corrupt(None, self._storage, 1, [9]) # bad sig
-        d = self._fn.check(verify=True)
+        d = corrupt(None, self._storage, 1, [9]) # bad sig
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
         return d
 
     def test_verify_one_bad_block(self):
-        corrupt(None, self._storage, "share_data", [9]) # bad blocks
+        d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
         # the Verifier *will* notice this, since it examines every byte
-        d = self._fn.check(verify=True)
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
         d.addCallback(self.check_bad, "test_verify_one_bad_block")
         d.addCallback(self.check_expected_failure,
                       CorruptShareError, "block hash tree failure",
@@ -1218,8 +1816,9 @@ class Checker(unittest.TestCase, CheckerMixin):
         return d
 
     def test_verify_one_bad_sharehash(self):
-        corrupt(None, self._storage, "share_hash_chain", [9], 5)
-        d = self._fn.check(verify=True)
+        d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
         d.addCallback(self.check_expected_failure,
                       CorruptShareError, "corrupt hashes",
@@ -1227,8 +1826,9 @@ class Checker(unittest.TestCase, CheckerMixin):
         return d
 
     def test_verify_one_bad_encprivkey(self):
-        corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
-        d = self._fn.check(verify=True)
+        d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
         d.addCallback(self.check_expected_failure,
                       CorruptShareError, "invalid privkey",
@@ -1236,28 +1836,76 @@ class Checker(unittest.TestCase, CheckerMixin):
         return d
 
     def test_verify_one_bad_encprivkey_uncheckable(self):
-        corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
+        d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
         readonly_fn = self._fn.get_readonly()
         # a read-only node has no way to validate the privkey
-        d = readonly_fn.check(verify=True)
+        d.addCallback(lambda ignored:
+            readonly_fn.check(Monitor(), verify=True))
         d.addCallback(self.check_good,
                       "test_verify_one_bad_encprivkey_uncheckable")
         return d
 
-class Repair(unittest.TestCase, CheckerMixin):
-    def setUp(self):
-        # publish a file and create shares, which can then be manipulated
-        # later.
-        self.CONTENTS = "New contents go here" * 1000
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file(self.CONTENTS)
-        def _created(node):
-            self._fn = node
-        d.addCallback(_created)
+
+    def test_verify_mdmf_good(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
+        d.addCallback(self.check_good, "test_verify_mdmf_good")
+        return d
+
+
+    def test_verify_mdmf_one_bad_block(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, "share_data", [1]))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
+        # We should find one bad block here
+        d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
+        d.addCallback(self.check_expected_failure,
+                      CorruptShareError, "block hash tree failure",
+                      "test_verify_mdmf_one_bad_block")
         return d
 
+
+    def test_verify_mdmf_bad_encprivkey(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, "enc_privkey", [0]))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
+        d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
+        d.addCallback(self.check_expected_failure,
+                      CorruptShareError, "privkey",
+                      "test_verify_mdmf_bad_encprivkey")
+        return d
+
+
+    def test_verify_mdmf_bad_sig(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, 1, [1]))
+        d.addCallback(lambda ignored:
+            self._fn.check(Monitor(), verify=True))
+        d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
+        return d
+
+
+    def test_verify_mdmf_bad_encprivkey_uncheckable(self):
+        d = self.publish_mdmf()
+        d.addCallback(lambda ignored:
+            corrupt(None, self._storage, "enc_privkey", [1]))
+        d.addCallback(lambda ignored:
+            self._fn.get_readonly())
+        d.addCallback(lambda fn:
+            fn.check(Monitor(), verify=True))
+        d.addCallback(self.check_good,
+                      "test_verify_mdmf_bad_encprivkey_uncheckable")
+        return d
+
+
+class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
+
     def get_shares(self, s):
         all_shares = {} # maps (peerid, shnum) to share data
         for peerid in s._peers:
@@ -1267,16 +1915,28 @@ class Repair(unittest.TestCase, CheckerMixin):
                 all_shares[ (peerid, shnum) ] = data
         return all_shares
 
-    def test_repair_nop(self):
-        initial_shares = self.get_shares(self._storage)
+    def copy_shares(self, ignored=None):
+        self.old_shares.append(self.get_shares(self._storage))
 
-        d = self._fn.check()
-        d.addCallback(self._fn.repair)
+    def test_repair_nop(self):
+        self.old_shares = []
+        d = self.publish_one()
+        d.addCallback(self.copy_shares)
+        d.addCallback(lambda res: self._fn.check(Monitor()))
+        d.addCallback(lambda check_results: self._fn.repair(check_results))
         def _check_results(rres):
             self.failUnless(IRepairResults.providedBy(rres))
+            self.failUnless(rres.get_successful())
             # TODO: examine results
 
-            new_shares = self.get_shares(self._storage)
+            self.copy_shares()
+
+            initial_shares = self.old_shares[0]
+            new_shares = self.old_shares[1]
+            # TODO: this really shouldn't change anything. When we implement
+            # a "minimal-bandwidth" repairer", change this test to assert:
+            #self.failUnlessEqual(new_shares, initial_shares)
+
             # all shares should be in the same place as before
             self.failUnlessEqual(set(initial_shares.keys()),
                                  set(new_shares.keys()))
@@ -1304,27 +1964,237 @@ class Repair(unittest.TestCase, CheckerMixin):
         d.addCallback(_check_results)
         return d
 
+    def failIfSharesChanged(self, ignored=None):
+        old_shares = self.old_shares[-2]
+        current_shares = self.old_shares[-1]
+        self.failUnlessEqual(old_shares, current_shares)
+
+
+    def _test_whether_repairable(self, publisher, nshares, expected_result):
+        d = publisher()
+        def _delete_some_shares(ign):
+            shares = self._storage._peers
+            for peerid in shares:
+                for shnum in list(shares[peerid]):
+                    if shnum >= nshares:
+                        del shares[peerid][shnum]
+        d.addCallback(_delete_some_shares)
+        d.addCallback(lambda ign: self._fn.check(Monitor()))
+        def _check(cr):
+            self.failIf(cr.is_healthy())
+            self.failUnlessEqual(cr.is_recoverable(), expected_result)
+            return cr
+        d.addCallback(_check)
+        d.addCallback(lambda check_results: self._fn.repair(check_results))
+        d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
+        return d
+
+    def test_unrepairable_0shares(self):
+        return self._test_whether_repairable(self.publish_one, 0, False)
+
+    def test_mdmf_unrepairable_0shares(self):
+        return self._test_whether_repairable(self.publish_mdmf, 0, False)
+
+    def test_unrepairable_1share(self):
+        return self._test_whether_repairable(self.publish_one, 1, False)
+
+    def test_mdmf_unrepairable_1share(self):
+        return self._test_whether_repairable(self.publish_mdmf, 1, False)
+
+    def test_repairable_5shares(self):
+        return self._test_whether_repairable(self.publish_one, 5, True)
+
+    def test_mdmf_repairable_5shares(self):
+        return self._test_whether_repairable(self.publish_mdmf, 5, True)
+
+    def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
+        """
+        Like the _test_whether_repairable tests, but invoking check_and_repair
+        instead of invoking check and then invoking repair.
+        """
+        d = publisher()
+        def _delete_some_shares(ign):
+            shares = self._storage._peers
+            for peerid in shares:
+                for shnum in list(shares[peerid]):
+                    if shnum >= nshares:
+                        del shares[peerid][shnum]
+        d.addCallback(_delete_some_shares)
+        d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
+        d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
+        return d
+
+    def test_unrepairable_0shares_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_one, 0, False)
+
+    def test_mdmf_unrepairable_0shares_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
+
+    def test_unrepairable_1share_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_one, 1, False)
+
+    def test_mdmf_unrepairable_1share_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
+
+    def test_repairable_5shares_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_one, 5, True)
+
+    def test_mdmf_repairable_5shares_checkandrepair(self):
+        return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
+
+
+    def test_merge(self):
+        self.old_shares = []
+        d = self.publish_multiple()
+        # repair will refuse to merge multiple highest seqnums unless you
+        # pass force=True
+        d.addCallback(lambda res:
+                      self._set_versions({0:3,2:3,4:3,6:3,8:3,
+                                          1:4,3:4,5:4,7:4,9:4}))
+        d.addCallback(self.copy_shares)
+        d.addCallback(lambda res: self._fn.check(Monitor()))
+        def _try_repair(check_results):
+            ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
+            d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
+                                 self._fn.repair, check_results)
+            d2.addCallback(self.copy_shares)
+            d2.addCallback(self.failIfSharesChanged)
+            d2.addCallback(lambda res: check_results)
+            return d2
+        d.addCallback(_try_repair)
+        d.addCallback(lambda check_results:
+                      self._fn.repair(check_results, force=True))
+        # this should give us 10 shares of the highest roothash
+        def _check_repair_results(rres):
+            self.failUnless(rres.get_successful())
+            pass # TODO
+        d.addCallback(_check_repair_results)
+        d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
+        def _check_smap(smap):
+            self.failUnlessEqual(len(smap.recoverable_versions()), 1)
+            self.failIf(smap.unrecoverable_versions())
+            # now, which should have won?
+            roothash_s4a = self.get_roothash_for(3)
+            roothash_s4b = self.get_roothash_for(4)
+            if roothash_s4b > roothash_s4a:
+                expected_contents = self.CONTENTS[4]
+            else:
+                expected_contents = self.CONTENTS[3]
+            new_versionid = smap.best_recoverable_version()
+            self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
+            d2 = self._fn.download_version(smap, new_versionid)
+            d2.addCallback(self.failUnlessEqual, expected_contents)
+            return d2
+        d.addCallback(_check_smap)
+        return d
+
+    def test_non_merge(self):
+        self.old_shares = []
+        d = self.publish_multiple()
+        # repair should not refuse a repair that doesn't need to merge. In
+        # this case, we combine v2 with v3. The repair should ignore v2 and
+        # copy v3 into a new v5.
+        d.addCallback(lambda res:
+                      self._set_versions({0:2,2:2,4:2,6:2,8:2,
+                                          1:3,3:3,5:3,7:3,9:3}))
+        d.addCallback(lambda res: self._fn.check(Monitor()))
+        d.addCallback(lambda check_results: self._fn.repair(check_results))
+        # this should give us 10 shares of v3
+        def _check_repair_results(rres):
+            self.failUnless(rres.get_successful())
+            pass # TODO
+        d.addCallback(_check_repair_results)
+        d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
+        def _check_smap(smap):
+            self.failUnlessEqual(len(smap.recoverable_versions()), 1)
+            self.failIf(smap.unrecoverable_versions())
+            # now, which should have won?
+            expected_contents = self.CONTENTS[3]
+            new_versionid = smap.best_recoverable_version()
+            self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
+            d2 = self._fn.download_version(smap, new_versionid)
+            d2.addCallback(self.failUnlessEqual, expected_contents)
+            return d2
+        d.addCallback(_check_smap)
+        return d
+
+    def get_roothash_for(self, index):
+        # return the roothash for the first share we see in the saved set
+        shares = self._copied_shares[index]
+        for peerid in shares:
+            for shnum in shares[peerid]:
+                share = shares[peerid][shnum]
+                (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
+                          unpack_header(share)
+                return root_hash
+
+    def test_check_and_repair_readcap(self):
+        # we can't currently repair from a mutable readcap: #625
+        self.old_shares = []
+        d = self.publish_one()
+        d.addCallback(self.copy_shares)
+        def _get_readcap(res):
+            self._fn3 = self._fn.get_readonly()
+            # also delete some shares
+            for peerid,shares in self._storage._peers.items():
+                shares.pop(0, None)
+        d.addCallback(_get_readcap)
+        d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
+        def _check_results(crr):
+            self.failUnless(ICheckAndRepairResults.providedBy(crr))
+            # we should detect the unhealthy, but skip over mutable-readcap
+            # repairs until #625 is fixed
+            self.failIf(crr.get_pre_repair_results().is_healthy())
+            self.failIf(crr.get_repair_attempted())
+            self.failIf(crr.get_post_repair_results().is_healthy())
+        d.addCallback(_check_results)
+        return d
+
+    def test_repair_empty(self):
+        # bug 1689: delete one share of an empty mutable file, then repair.
+        # In the buggy version, the check that precedes the retrieve+publish
+        # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
+        # privkey that repair needs.
+        d = self.publish_empty_sdmf()
+        def _delete_one_share(ign):
+            shares = self._storage._peers
+            for peerid in shares:
+                for shnum in list(shares[peerid]):
+                    if shnum == 0:
+                        del shares[peerid][shnum]
+        d.addCallback(_delete_one_share)
+        d.addCallback(lambda ign: self._fn2.check(Monitor()))
+        d.addCallback(lambda check_results: self._fn2.repair(check_results))
+        def _check(crr):
+            self.failUnlessEqual(crr.get_successful(), True)
+        d.addCallback(_check)
+        return d
+
+class DevNullDictionary(dict):
+    def __setitem__(self, key, value):
+        return
 
 class MultipleEncodings(unittest.TestCase):
     def setUp(self):
         self.CONTENTS = "New contents go here"
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file(self.CONTENTS)
+        self.uploadable = MutableData(self.CONTENTS)
+        self._storage = FakeStorage()
+        self._nodemaker = make_nodemaker(self._storage, num_peers=20)
+        self._storage_broker = self._nodemaker.storage_broker
+        d = self._nodemaker.create_mutable_file(self.uploadable)
         def _created(node):
             self._fn = node
         d.addCallback(_created)
         return d
 
-    def _encode(self, k, n, data):
+    def _encode(self, k, n, data, version=SDMF_VERSION):
         # encode 'data' into a peerid->shares dict.
 
-        fn2 = FastMutableFileNode(self._client)
-        # init_from_uri populates _uri, _writekey, _readkey, _storage_index,
-        # and _fingerprint
         fn = self._fn
-        fn2.init_from_uri(fn.get_uri())
+        # disable the nodecache, since for these tests we explicitly need
+        # multiple nodes pointing at the same file
+        self._nodemaker._node_cache = DevNullDictionary()
+        fn2 = self._nodemaker.create_from_cap(fn.get_uri())
         # then we copy over other fields that are normally fetched from the
         # existing shares
         fn2._pubkey = fn._pubkey
@@ -1334,10 +2204,11 @@ class MultipleEncodings(unittest.TestCase):
         fn2._required_shares = k
         fn2._total_shares = n
 
-        s = self._client._storage
+        s = self._storage
         s._peers = {} # clear existing storage
-        p2 = Publish(fn2, None)
-        d = p2.publish(data)
+        p2 = Publish(fn2, self._storage_broker, None)
+        uploadable = MutableData(data)
+        d = p2.publish(uploadable)
         def _published(res):
             shares = s._peers
             s._peers = {}
@@ -1348,7 +2219,8 @@ class MultipleEncodings(unittest.TestCase):
     def make_servermap(self, mode=MODE_READ, oldmap=None):
         if oldmap is None:
             oldmap = ServerMap()
-        smu = ServermapUpdater(self._fn, oldmap, mode)
+        smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
+                               oldmap, mode)
         d = smu.update()
         return d
 
@@ -1357,14 +2229,13 @@ class MultipleEncodings(unittest.TestCase):
         # then mix up the shares, to make sure that download survives seeing
         # a variety of encodings. This is actually kind of tricky to set up.
 
-        contents1 = "Contents for encoding 1 (3-of-10) go here"
-        contents2 = "Contents for encoding 2 (4-of-9) go here"
-        contents3 = "Contents for encoding 3 (4-of-7) go here"
+        contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
+        contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
+        contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
 
         # we make a retrieval object that doesn't know what encoding
         # parameters to use
-        fn3 = FastMutableFileNode(self._client)
-        fn3.init_from_uri(self._fn.get_uri())
+        fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
 
         # now we upload a file through fn1, and grab its shares
         d = self._encode(3, 10, contents1)
@@ -1407,19 +2278,18 @@ class MultipleEncodings(unittest.TestCase):
             places = [2, 2, 3, 2, 1, 1, 1, 2]
 
             sharemap = {}
+            sb = self._storage_broker
 
-            for i,peerid in enumerate(self._client._peerids):
-                peerid_s = shortnodeid_b2a(peerid)
+            for peerid in sorted(sb.get_all_serverids()):
                 for shnum in self._shares1.get(peerid, {}):
                     if shnum < len(places):
                         which = places[shnum]
                     else:
                         which = "x"
-                    self._client._storage._peers[peerid] = peers = {}
+                    self._storage._peers[peerid] = peers = {}
                     in_1 = shnum in self._shares1[peerid]
                     in_2 = shnum in self._shares2.get(peerid, {})
                     in_3 = shnum in self._shares3.get(peerid, {})
-                    #print peerid_s, shnum, which, in_1, in_2, in_3
                     if which == 1:
                         if in_1:
                             peers[shnum] = self._shares1[peerid][shnum]
@@ -1437,7 +2307,7 @@ class MultipleEncodings(unittest.TestCase):
             # now sort the sequence so that share 0 is returned first
             new_sequence = [sharemap[shnum]
                             for shnum in sorted(sharemap.keys())]
-            self._client._storage._sequence = new_sequence
+            self._storage._sequence = new_sequence
             log.msg("merge done")
         d.addCallback(_merge)
         d.addCallback(lambda res: fn3.download_best_version())
@@ -1447,62 +2317,11 @@ class MultipleEncodings(unittest.TestCase):
         d.addCallback(_retrieved)
         return d
 
-class MultipleVersions(unittest.TestCase, CheckerMixin):
-    def setUp(self):
-        self.CONTENTS = ["Contents 0",
-                         "Contents 1",
-                         "Contents 2",
-                         "Contents 3a",
-                         "Contents 3b"]
-        self._copied_shares = {}
-        num_peers = 20
-        self._client = FakeClient(num_peers)
-        self._storage = self._client._storage
-        d = self._client.create_mutable_file(self.CONTENTS[0]) # seqnum=1
-        def _created(node):
-            self._fn = node
-            # now create multiple versions of the same file, and accumulate
-            # their shares, so we can mix and match them later.
-            d = defer.succeed(None)
-            d.addCallback(self._copy_shares, 0)
-            d.addCallback(lambda res: node.overwrite(self.CONTENTS[1])) #s2
-            d.addCallback(self._copy_shares, 1)
-            d.addCallback(lambda res: node.overwrite(self.CONTENTS[2])) #s3
-            d.addCallback(self._copy_shares, 2)
-            d.addCallback(lambda res: node.overwrite(self.CONTENTS[3])) #s4a
-            d.addCallback(self._copy_shares, 3)
-            # now we replace all the shares with version s3, and upload a new
-            # version to get s4b.
-            rollback = dict([(i,2) for i in range(10)])
-            d.addCallback(lambda res: self._set_versions(rollback))
-            d.addCallback(lambda res: node.overwrite(self.CONTENTS[4])) #s4b
-            d.addCallback(self._copy_shares, 4)
-            # we leave the storage in state 4
-            return d
-        d.addCallback(_created)
-        return d
 
-    def _copy_shares(self, ignored, index):
-        shares = self._client._storage._peers
-        # we need a deep copy
-        new_shares = {}
-        for peerid in shares:
-            new_shares[peerid] = {}
-            for shnum in shares[peerid]:
-                new_shares[peerid][shnum] = shares[peerid][shnum]
-        self._copied_shares[index] = new_shares
+class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
 
-    def _set_versions(self, versionmap):
-        # versionmap maps shnums to which version (0,1,2,3,4) we want the
-        # share to be at. Any shnum which is left out of the map will stay at
-        # its current version.
-        shares = self._client._storage._peers
-        oldshares = self._copied_shares
-        for peerid in shares:
-            for shnum in shares[peerid]:
-                if shnum in versionmap:
-                    index = versionmap[shnum]
-                    shares[peerid][shnum] = oldshares[index][peerid][shnum]
+    def setUp(self):
+        return self.publish_multiple()
 
     def test_multiple_versions(self):
         # if we see a mix of versions in the grid, download_best_version
@@ -1511,7 +2330,7 @@ class MultipleVersions(unittest.TestCase, CheckerMixin):
         d = self._fn.download_best_version()
         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
         # and the checker should report problems
-        d.addCallback(lambda res: self._fn.check())
+        d.addCallback(lambda res: self._fn.check(Monitor()))
         d.addCallback(self.check_bad, "test_multiple_versions")
 
         # but if everything is at version 2, that's what we should download
@@ -1564,7 +2383,7 @@ class MultipleVersions(unittest.TestCase, CheckerMixin):
         target[0] = 3 # seqnum4
         self._set_versions(target)
 
-        def _modify(oldversion):
+        def _modify(oldversion, servermap, first_time):
             return oldversion + " modified"
         d = self._fn.modify(_modify)
         d.addCallback(lambda res: self._fn.download_best_version())
@@ -1580,88 +2399,6 @@ class MultipleVersions(unittest.TestCase, CheckerMixin):
         return d
 
 
-class Utils(unittest.TestCase):
-    def test_dict_of_sets(self):
-        ds = DictOfSets()
-        ds.add(1, "a")
-        ds.add(2, "b")
-        ds.add(2, "b")
-        ds.add(2, "c")
-        self.failUnlessEqual(ds[1], set(["a"]))
-        self.failUnlessEqual(ds[2], set(["b", "c"]))
-        ds.discard(3, "d") # should not raise an exception
-        ds.discard(2, "b")
-        self.failUnlessEqual(ds[2], set(["c"]))
-        ds.discard(2, "c")
-        self.failIf(2 in ds)
-
-    def _do_inside(self, c, x_start, x_length, y_start, y_length):
-        # we compare this against sets of integers
-        x = set(range(x_start, x_start+x_length))
-        y = set(range(y_start, y_start+y_length))
-        should_be_inside = x.issubset(y)
-        self.failUnlessEqual(should_be_inside, c._inside(x_start, x_length,
-                                                         y_start, y_length),
-                             str((x_start, x_length, y_start, y_length)))
-
-    def test_cache_inside(self):
-        c = ResponseCache()
-        x_start = 10
-        x_length = 5
-        for y_start in range(8, 17):
-            for y_length in range(8):
-                self._do_inside(c, x_start, x_length, y_start, y_length)
-
-    def _do_overlap(self, c, x_start, x_length, y_start, y_length):
-        # we compare this against sets of integers
-        x = set(range(x_start, x_start+x_length))
-        y = set(range(y_start, y_start+y_length))
-        overlap = bool(x.intersection(y))
-        self.failUnlessEqual(overlap, c._does_overlap(x_start, x_length,
-                                                      y_start, y_length),
-                             str((x_start, x_length, y_start, y_length)))
-
-    def test_cache_overlap(self):
-        c = ResponseCache()
-        x_start = 10
-        x_length = 5
-        for y_start in range(8, 17):
-            for y_length in range(8):
-                self._do_overlap(c, x_start, x_length, y_start, y_length)
-
-    def test_cache(self):
-        c = ResponseCache()
-        # xdata = base62.b2a(os.urandom(100))[:100]
-        xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
-        ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
-        nope = (None, None)
-        c.add("v1", 1, 0, xdata, "time0")
-        c.add("v1", 1, 2000, ydata, "time1")
-        self.failUnlessEqual(c.read("v2", 1, 10, 11), nope)
-        self.failUnlessEqual(c.read("v1", 2, 10, 11), nope)
-        self.failUnlessEqual(c.read("v1", 1, 0, 10), (xdata[:10], "time0"))
-        self.failUnlessEqual(c.read("v1", 1, 90, 10), (xdata[90:], "time0"))
-        self.failUnlessEqual(c.read("v1", 1, 300, 10), nope)
-        self.failUnlessEqual(c.read("v1", 1, 2050, 5), (ydata[50:55], "time1"))
-        self.failUnlessEqual(c.read("v1", 1, 0, 101), nope)
-        self.failUnlessEqual(c.read("v1", 1, 99, 1), (xdata[99:100], "time0"))
-        self.failUnlessEqual(c.read("v1", 1, 100, 1), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 9), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 10), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 11), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 15), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 19), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 20), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 21), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1990, 25), nope)
-        self.failUnlessEqual(c.read("v1", 1, 1999, 25), nope)
-
-        # optional: join fragments
-        c = ResponseCache()
-        c.add("v1", 1, 0, xdata[:10], "time0")
-        c.add("v1", 1, 10, xdata[10:20], "time1")
-        #self.failUnlessEqual(c.read("v1", 1, 0, 20), (xdata[:20], "time0"))
-
 class Exceptions(unittest.TestCase):
     def test_repr(self):
         nmde = NeedMoreDataError(100, 50, 100)
@@ -1669,51 +2406,45 @@ class Exceptions(unittest.TestCase):
         ucwe = UncoordinatedWriteError()
         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
 
-# we can't do this test with a FakeClient, since it uses FakeStorageServer
-# instances which always succeed. So we need a less-fake one.
 
-class IntentionalError(Exception):
-    pass
+class SameKeyGenerator:
+    def __init__(self, pubkey, privkey):
+        self.pubkey = pubkey
+        self.privkey = privkey
+    def generate(self, keysize=None):
+        return defer.succeed( (self.pubkey, self.privkey) )
 
-class LocalWrapper:
-    def __init__(self, original):
-        self.original = original
-        self.broken = False
-        self.post_call_notifier = None
-    def callRemote(self, methname, *args, **kwargs):
-        def _call():
-            if self.broken:
-                raise IntentionalError("I was asked to break")
-            meth = getattr(self.original, "remote_" + methname)
-            return meth(*args, **kwargs)
-        d = fireEventually()
-        d.addCallback(lambda res: _call())
-        if self.post_call_notifier:
-            d.addCallback(self.post_call_notifier, methname)
-        return d
-
-class LessFakeClient(FakeClient):
-
-    def __init__(self, basedir, num_peers=10):
-        self._num_peers = num_peers
-        self._peerids = [tagged_hash("peerid", "%d" % i)[:20]
-                         for i in range(self._num_peers)]
-        self._connections = {}
-        for peerid in self._peerids:
-            peerdir = os.path.join(basedir, idlib.shortnodeid_b2a(peerid))
-            make_dirs(peerdir)
-            ss = storage.StorageServer(peerdir)
-            ss.setNodeID(peerid)
-            lw = LocalWrapper(ss)
-            self._connections[peerid] = lw
-        self.nodeid = "fakenodeid"
-
-
-class Problems(unittest.TestCase, testutil.ShouldFailMixin):
-    def test_publish_surprise(self):
-        basedir = os.path.join("mutable/CollidingWrites/test_surprise")
-        self.client = LessFakeClient(basedir)
-        d = self.client.create_mutable_file("contents 1")
+class FirstServerGetsKilled:
+    done = False
+    def notify(self, retval, wrapper, methname):
+        if not self.done:
+            wrapper.broken = True
+            self.done = True
+        return retval
+
+class FirstServerGetsDeleted:
+    def __init__(self):
+        self.done = False
+        self.silenced = None
+    def notify(self, retval, wrapper, methname):
+        if not self.done:
+            # this query will work, but later queries should think the share
+            # has been deleted
+            self.done = True
+            self.silenced = wrapper
+            return retval
+        if wrapper == self.silenced:
+            assert methname == "slot_testv_and_readv_and_writev"
+            return (True, {})
+        return retval
+
+class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
+    def do_publish_surprise(self, version):
+        self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        d = nm.create_mutable_file(MutableData("contents 1"),
+                                    version=version)
         def _created(n):
             d = defer.succeed(None)
             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
@@ -1723,7 +2454,7 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
             d.addCallback(_got_smap1)
             # then modify the file, leaving the old map untouched
             d.addCallback(lambda res: log.msg("starting winning write"))
-            d.addCallback(lambda res: n.overwrite("contents 2"))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
             # now attempt to modify the file with the old servermap. This
             # will look just like an uncoordinated write, in which every
             # single share got updated between our mapupdate and our publish
@@ -1732,15 +2463,22 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
                           self.shouldFail(UncoordinatedWriteError,
                                           "test_publish_surprise", None,
                                           n.upload,
-                                          "contents 2a", self.old_map))
+                                          MutableData("contents 2a"), self.old_map))
             return d
         d.addCallback(_created)
         return d
 
+    def test_publish_surprise_sdmf(self):
+        return self.do_publish_surprise(SDMF_VERSION)
+
+    def test_publish_surprise_mdmf(self):
+        return self.do_publish_surprise(MDMF_VERSION)
+
     def test_retrieve_surprise(self):
-        basedir = os.path.join("mutable/CollidingWrites/test_retrieve")
-        self.client = LessFakeClient(basedir)
-        d = self.client.create_mutable_file("contents 1")
+        self.basedir = "mutable/Problems/test_retrieve_surprise"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        d = nm.create_mutable_file(MutableData("contents 1"*4000))
         def _created(n):
             d = defer.succeed(None)
             d.addCallback(lambda res: n.get_servermap(MODE_READ))
@@ -1750,16 +2488,15 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
             d.addCallback(_got_smap1)
             # then modify the file, leaving the old map untouched
             d.addCallback(lambda res: log.msg("starting winning write"))
-            d.addCallback(lambda res: n.overwrite("contents 2"))
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
             # now attempt to retrieve the old version with the old servermap.
             # This will look like someone has changed the file since we
             # updated the servermap.
-            d.addCallback(lambda res: n._cache._clear())
             d.addCallback(lambda res: log.msg("starting doomed read"))
             d.addCallback(lambda res:
                           self.shouldFail(NotEnoughSharesError,
                                           "test_retrieve_surprise",
-                                          "ran out of peers: have 0 shares (k=3)",
+                                          "ran out of servers: have 0 of 1",
                                           n.download_version,
                                           self.old_map,
                                           self.old_map.best_recoverable_version(),
@@ -1768,15 +2505,17 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
         d.addCallback(_created)
         return d
 
+
     def test_unexpected_shares(self):
         # upload the file, take a servermap, shut down one of the servers,
         # upload it again (causing shares to appear on a new server), then
         # upload using the old servermap. The last upload should fail with an
         # UncoordinatedWriteError, because of the shares that didn't appear
         # in the servermap.
-        basedir = os.path.join("mutable/CollidingWrites/test_unexpexted_shares")
-        self.client = LessFakeClient(basedir)
-        d = self.client.create_mutable_file("contents 1")
+        self.basedir = "mutable/Problems/test_unexpected_shares"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        d = nm.create_mutable_file(MutableData("contents 1"))
         def _created(n):
             d = defer.succeed(None)
             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
@@ -1784,11 +2523,11 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
                 # stash the old state of the file
                 self.old_map = smap
                 # now shut down one of the servers
-                peer0 = list(smap.make_sharemap()[0])[0]
-                self.client._connections.pop(peer0)
+                peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
+                self.g.remove_server(peer0)
                 # then modify the file, leaving the old map untouched
                 log.msg("starting winning write")
-                return n.overwrite("contents 2")
+                return n.overwrite(MutableData("contents 2"))
             d.addCallback(_got_smap1)
             # now attempt to modify the file with the old servermap. This
             # will look just like an uncoordinated write, in which every
@@ -1798,137 +2537,1252 @@ class Problems(unittest.TestCase, testutil.ShouldFailMixin):
                           self.shouldFail(UncoordinatedWriteError,
                                           "test_surprise", None,
                                           n.upload,
-                                          "contents 2a", self.old_map))
+                                          MutableData("contents 2a"), self.old_map))
             return d
         d.addCallback(_created)
         return d
 
+    def test_multiply_placed_shares(self):
+        self.basedir = "mutable/Problems/test_multiply_placed_shares"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        d = nm.create_mutable_file(MutableData("contents 1"))
+        # remove one of the servers and reupload the file.
+        def _created(n):
+            self._node = n
+
+            servers = self.g.get_all_serverids()
+            self.ss = self.g.remove_server(servers[len(servers)-1])
+
+            new_server = self.g.make_server(len(servers)-1)
+            self.g.add_server(len(servers)-1, new_server)
+
+            return self._node.download_best_version()
+        d.addCallback(_created)
+        d.addCallback(lambda data: MutableData(data))
+        d.addCallback(lambda data: self._node.overwrite(data))
+
+        # restore the server we removed earlier, then download+upload
+        # the file again
+        def _overwritten(ign):
+            self.g.add_server(len(self.g.servers_by_number), self.ss)
+            return self._node.download_best_version()
+        d.addCallback(_overwritten)
+        d.addCallback(lambda data: MutableData(data))
+        d.addCallback(lambda data: self._node.overwrite(data))
+        d.addCallback(lambda ignored:
+            self._node.get_servermap(MODE_CHECK))
+        def _overwritten_again(smap):
+            # Make sure that all shares were updated by making sure that
+            # there aren't any other versions in the sharemap.
+            self.failUnlessEqual(len(smap.recoverable_versions()), 1)
+            self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
+        d.addCallback(_overwritten_again)
+        return d
+
     def test_bad_server(self):
         # Break one server, then create the file: the initial publish should
         # complete with an alternate server. Breaking a second server should
         # not prevent an update from succeeding either.
-        basedir = os.path.join("mutable/CollidingWrites/test_bad_server")
-        self.client = LessFakeClient(basedir, 20)
+        self.basedir = "mutable/Problems/test_bad_server"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+
         # to make sure that one of the initial peers is broken, we have to
-        # get creative. We create the keys, so we can figure out the storage
-        # index, but we hold off on doing the initial publish until we've
-        # broken the server on which the first share wants to be stored.
-        n = FastMutableFileNode(self.client)
-        d = defer.succeed(None)
-        d.addCallback(n._generate_pubprivkeys)
-        d.addCallback(n._generated)
+        # get creative. We create an RSA key and compute its storage-index.
+        # Then we make a KeyGenerator that always returns that one key, and
+        # use it to create the mutable file. This will get easier when we can
+        # use #467 static-server-selection to disable permutation and force
+        # the choice of server for share[0].
+
+        d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
+        def _got_key( (pubkey, privkey) ):
+            nm.key_generator = SameKeyGenerator(pubkey, privkey)
+            pubkey_s = pubkey.serialize()
+            privkey_s = privkey.serialize()
+            u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
+                                        ssk_pubkey_fingerprint_hash(pubkey_s))
+            self._storage_index = u.get_storage_index()
+        d.addCallback(_got_key)
         def _break_peer0(res):
-            si = n.get_storage_index()
-            peerlist = self.client.get_permuted_peers("storage", si)
-            peerid0, connection0 = peerlist[0]
-            peerid1, connection1 = peerlist[1]
-            connection0.broken = True
-            self.connection1 = connection1
+            si = self._storage_index
+            servers = nm.storage_broker.get_servers_for_psi(si)
+            self.g.break_server(servers[0].get_serverid())
+            self.server1 = servers[1]
         d.addCallback(_break_peer0)
-        # now let the initial publish finally happen
-        d.addCallback(lambda res: n._upload("contents 1", None))
+        # now "create" the file, using the pre-established key, and let the
+        # initial publish finally happen
+        d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
         # that ought to work
-        d.addCallback(lambda res: n.download_best_version())
-        d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
-        # now break the second peer
-        def _break_peer1(res):
-            self.connection1.broken = True
-        d.addCallback(_break_peer1)
-        d.addCallback(lambda res: n.overwrite("contents 2"))
-        # that ought to work too
-        d.addCallback(lambda res: n.download_best_version())
-        d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
+        def _got_node(n):
+            d = n.download_best_version()
+            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
+            # now break the second peer
+            def _break_peer1(res):
+                self.g.break_server(self.server1.get_serverid())
+            d.addCallback(_break_peer1)
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
+            # that ought to work too
+            d.addCallback(lambda res: n.download_best_version())
+            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
+            def _explain_error(f):
+                print f
+                if f.check(NotEnoughServersError):
+                    print "first_error:", f.value.first_error
+                return f
+            d.addErrback(_explain_error)
+            return d
+        d.addCallback(_got_node)
+        return d
+
+    def test_bad_server_overlap(self):
+        # like test_bad_server, but with no extra unused servers to fall back
+        # upon. This means that we must re-use a server which we've already
+        # used. If we don't remember the fact that we sent them one share
+        # already, we'll mistakenly think we're experiencing an
+        # UncoordinatedWriteError.
+
+        # Break one server, then create the file: the initial publish should
+        # complete with an alternate server. Breaking a second server should
+        # not prevent an update from succeeding either.
+        self.basedir = "mutable/Problems/test_bad_server_overlap"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        sb = nm.storage_broker
+
+        peerids = [s.get_serverid() for s in sb.get_connected_servers()]
+        self.g.break_server(peerids[0])
+
+        d = nm.create_mutable_file(MutableData("contents 1"))
+        def _created(n):
+            d = n.download_best_version()
+            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
+            # now break one of the remaining servers
+            def _break_second_server(res):
+                self.g.break_server(peerids[1])
+            d.addCallback(_break_second_server)
+            d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
+            # that ought to work too
+            d.addCallback(lambda res: n.download_best_version())
+            d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
+            return d
+        d.addCallback(_created)
         return d
 
     def test_publish_all_servers_bad(self):
         # Break all servers: the publish should fail
-        basedir = os.path.join("mutable/CollidingWrites/publish_all_servers_bad")
-        self.client = LessFakeClient(basedir, 20)
-        for connection in self.client._connections.values():
-            connection.broken = True
+        self.basedir = "mutable/Problems/test_publish_all_servers_bad"
+        self.set_up_grid()
+        nm = self.g.clients[0].nodemaker
+        for s in nm.storage_broker.get_connected_servers():
+            s.get_rref().broken = True
+
         d = self.shouldFail(NotEnoughServersError,
                             "test_publish_all_servers_bad",
-                            "Ran out of non-bad servers",
-                            self.client.create_mutable_file, "contents")
+                            "ran out of good servers",
+                            nm.create_mutable_file, MutableData("contents"))
         return d
 
     def test_publish_no_servers(self):
         # no servers at all: the publish should fail
-        basedir = os.path.join("mutable/CollidingWrites/publish_no_servers")
-        self.client = LessFakeClient(basedir, 0)
+        self.basedir = "mutable/Problems/test_publish_no_servers"
+        self.set_up_grid(num_servers=0)
+        nm = self.g.clients[0].nodemaker
+
         d = self.shouldFail(NotEnoughServersError,
                             "test_publish_no_servers",
                             "Ran out of non-bad servers",
-                            self.client.create_mutable_file, "contents")
+                            nm.create_mutable_file, MutableData("contents"))
         return d
-    test_publish_no_servers.timeout = 30
 
 
     def test_privkey_query_error(self):
         # when a servermap is updated with MODE_WRITE, it tries to get the
         # privkey. Something might go wrong during this query attempt.
-        self.client = FakeClient(20)
+        # Exercise the code in _privkey_query_failed which tries to handle
+        # such an error.
+        self.basedir = "mutable/Problems/test_privkey_query_error"
+        self.set_up_grid(num_servers=20)
+        nm = self.g.clients[0].nodemaker
+        nm._node_cache = DevNullDictionary() # disable the nodecache
+
         # we need some contents that are large enough to push the privkey out
         # of the early part of the file
-        LARGE = "These are Larger contents" * 200 # about 5KB
-        d = self.client.create_mutable_file(LARGE)
+        LARGE = "These are Larger contents" * 2000 # about 50KB
+        LARGE_uploadable = MutableData(LARGE)
+        d = nm.create_mutable_file(LARGE_uploadable)
         def _created(n):
             self.uri = n.get_uri()
-            self.n2 = self.client.create_node_from_uri(self.uri)
-            # we start by doing a map update to figure out which is the first
-            # server.
-            return n.get_servermap(MODE_WRITE)
+            self.n2 = nm.create_from_cap(self.uri)
+
+            # When a mapupdate is performed on a node that doesn't yet know
+            # the privkey, a short read is sent to a batch of servers, to get
+            # the verinfo and (hopefully, if the file is short enough) the
+            # encprivkey. Our file is too large to let this first read
+            # contain the encprivkey. Each non-encprivkey-bearing response
+            # that arrives (until the node gets the encprivkey) will trigger
+            # a second read to specifically read the encprivkey.
+            #
+            # So, to exercise this case:
+            #  1. notice which server gets a read() call first
+            #  2. tell that server to start throwing errors
+            killer = FirstServerGetsKilled()
+            for s in nm.storage_broker.get_connected_servers():
+                s.get_rref().post_call_notifier = killer.notify
         d.addCallback(_created)
-        d.addCallback(lambda res: fireEventually(res))
-        def _got_smap1(smap):
-            peer0 = list(smap.make_sharemap()[0])[0]
-            # we tell the server to respond to this peer first, so that it
-            # will be asked for the privkey first
-            self.client._storage._sequence = [peer0]
-            # now we make the peer fail their second query
-            self.client._storage._special_answers[peer0] = ["normal", "fail"]
-        d.addCallback(_got_smap1)
+
         # now we update a servermap from a new node (which doesn't have the
-        # privkey yet, forcing it to use a separate privkey query). Each
-        # query response will trigger a privkey query, and since we're using
-        # _sequence to make the peer0 response come back first, we'll send it
-        # a privkey query first, and _sequence will again ensure that the
-        # peer0 query will also come back before the others, and then
-        # _special_answers will make sure that the query raises an exception.
-        # The whole point of these hijinks is to exercise the code in
-        # _privkey_query_failed. Note that the map-update will succeed, since
-        # we'll just get a copy from one of the other shares.
+        # privkey yet, forcing it to use a separate privkey query). Note that
+        # the map-update will succeed, since we'll just get a copy from one
+        # of the other shares.
         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
-        # Using FakeStorage._sequence means there will be read requests still
-        # floating around.. wait for them to retire
-        def _cancel_timer(res):
-            if self.client._storage._pending_timer:
-                self.client._storage._pending_timer.cancel()
-            return res
-        d.addBoth(_cancel_timer)
+
         return d
 
     def test_privkey_query_missing(self):
         # like test_privkey_query_error, but the shares are deleted by the
         # second query, instead of raising an exception.
-        self.client = FakeClient(20)
-        LARGE = "These are Larger contents" * 200 # about 5KB
-        d = self.client.create_mutable_file(LARGE)
+        self.basedir = "mutable/Problems/test_privkey_query_missing"
+        self.set_up_grid(num_servers=20)
+        nm = self.g.clients[0].nodemaker
+        LARGE = "These are Larger contents" * 2000 # about 50KiB
+        LARGE_uploadable = MutableData(LARGE)
+        nm._node_cache = DevNullDictionary() # disable the nodecache
+
+        d = nm.create_mutable_file(LARGE_uploadable)
         def _created(n):
             self.uri = n.get_uri()
-            self.n2 = self.client.create_node_from_uri(self.uri)
-            return n.get_servermap(MODE_WRITE)
+            self.n2 = nm.create_from_cap(self.uri)
+            deleter = FirstServerGetsDeleted()
+            for s in nm.storage_broker.get_connected_servers():
+                s.get_rref().post_call_notifier = deleter.notify
         d.addCallback(_created)
-        d.addCallback(lambda res: fireEventually(res))
-        def _got_smap1(smap):
-            peer0 = list(smap.make_sharemap()[0])[0]
-            self.client._storage._sequence = [peer0]
-            self.client._storage._special_answers[peer0] = ["normal", "none"]
-        d.addCallback(_got_smap1)
         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
-        def _cancel_timer(res):
-            if self.client._storage._pending_timer:
-                self.client._storage._pending_timer.cancel()
-            return res
-        d.addBoth(_cancel_timer)
+        return d
+
+
+    def test_block_and_hash_query_error(self):
+        # This tests for what happens when a query to a remote server
+        # fails in either the hash validation step or the block getting
+        # step (because of batching, this is the same actual query).
+        # We need to have the storage server persist up until the point
+        # that its prefix is validated, then suddenly die. This
+        # exercises some exception handling code in Retrieve.
+        self.basedir = "mutable/Problems/test_block_and_hash_query_error"
+        self.set_up_grid(num_servers=20)
+        nm = self.g.clients[0].nodemaker
+        CONTENTS = "contents" * 2000
+        CONTENTS_uploadable = MutableData(CONTENTS)
+        d = nm.create_mutable_file(CONTENTS_uploadable)
+        def _created(node):
+            self._node = node
+        d.addCallback(_created)
+        d.addCallback(lambda ignored:
+            self._node.get_servermap(MODE_READ))
+        def _then(servermap):
+            # we have our servermap. Now we set up the servers like the
+            # tests above -- the first one that gets a read call should
+            # start throwing errors, but only after returning its prefix
+            # for validation. Since we'll download without fetching the
+            # private key, the next query to the remote server will be
+            # for either a block and salt or for hashes, either of which
+            # will exercise the error handling code.
+            killer = FirstServerGetsKilled()
+            for s in nm.storage_broker.get_connected_servers():
+                s.get_rref().post_call_notifier = killer.notify
+            ver = servermap.best_recoverable_version()
+            assert ver
+            return self._node.download_version(servermap, ver)
+        d.addCallback(_then)
+        d.addCallback(lambda data:
+            self.failUnlessEqual(data, CONTENTS))
+        return d
+
+    def test_1654(self):
+        # test that the Retrieve object unconditionally verifies the block
+        # hash tree root for mutable shares. The failure mode is that
+        # carefully crafted shares can cause undetected corruption (the
+        # retrieve appears to finish successfully, but the result is
+        # corrupted). When fixed, these shares always cause a
+        # CorruptShareError, which results in NotEnoughSharesError in this
+        # 2-of-2 file.
+        self.basedir = "mutable/Problems/test_1654"
+        self.set_up_grid(num_servers=2)
+        cap = uri.from_string(TEST_1654_CAP)
+        si = cap.get_storage_index()
+
+        for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
+            sharedata = base64.b64decode(share)
+            storedir = self.get_serverdir(shnum)
+            storage_path = os.path.join(storedir, "shares",
+                                        storage_index_to_dir(si))
+            fileutil.make_dirs(storage_path)
+            fileutil.write(os.path.join(storage_path, "%d" % shnum),
+                           sharedata)
+
+        nm = self.g.clients[0].nodemaker
+        n = nm.create_from_cap(TEST_1654_CAP)
+        # to exercise the problem correctly, we must ensure that sh0 is
+        # processed first, and sh1 second. NoNetworkGrid has facilities to
+        # stall the first request from a single server, but it's not
+        # currently easy to extend that to stall the second request (mutable
+        # retrievals will see two: first the mapupdate, then the fetch).
+        # However, repeated executions of this run without the #1654 fix
+        # suggests that we're failing reliably even without explicit stalls,
+        # probably because the servers are queried in a fixed order. So I'm
+        # ok with relying upon that.
+        d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
+                            "ran out of servers",
+                            n.download_best_version)
+        return d
+
+
+TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
+
+TEST_1654_SH0 = """\
+VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
+AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
+AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
+UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
+uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
+AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
+ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
+vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
+CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
+Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
+FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
+DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
+AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
+Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
+/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
+73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
+GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
+ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
++QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
+bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
+z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
+eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
+d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
+dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
+2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
+wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
+sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
+eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
+PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
+CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
+Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
+Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
+tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
+Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
+LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
+ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
+jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
+fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
+DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
+tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
+7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
+jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
+TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
+4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
+bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
+72mXGlqyLyWYuAAAAAA="""
+
+TEST_1654_SH1 = """\
+VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
+9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
+AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
+MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
+uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
+AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
+ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
+vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
+CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
+Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
+FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
+DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
+AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
+Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
+/KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
+73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
+GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
+ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
++QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
+98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
+z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
+eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
+d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
+dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
+2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
+wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
+sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
+eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
+PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
+CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
+Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
+Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
+tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
+Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
+LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
+ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
+jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
+fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
+DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
+tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
+7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
+jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
+TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
+4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
+bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
+72mXGlqyLyWYuAAAAAA="""
+
+
+class FileHandle(unittest.TestCase):
+    def setUp(self):
+        self.test_data = "Test Data" * 50000
+        self.sio = StringIO(self.test_data)
+        self.uploadable = MutableFileHandle(self.sio)
+
+
+    def test_filehandle_read(self):
+        self.basedir = "mutable/FileHandle/test_filehandle_read"
+        chunk_size = 10
+        for i in xrange(0, len(self.test_data), chunk_size):
+            data = self.uploadable.read(chunk_size)
+            data = "".join(data)
+            start = i
+            end = i + chunk_size
+            self.failUnlessEqual(data, self.test_data[start:end])
+
+
+    def test_filehandle_get_size(self):
+        self.basedir = "mutable/FileHandle/test_filehandle_get_size"
+        actual_size = len(self.test_data)
+        size = self.uploadable.get_size()
+        self.failUnlessEqual(size, actual_size)
+
+
+    def test_filehandle_get_size_out_of_order(self):
+        # We should be able to call get_size whenever we want without
+        # disturbing the location of the seek pointer.
+        chunk_size = 100
+        data = self.uploadable.read(chunk_size)
+        self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
+
+        # Now get the size.
+        size = self.uploadable.get_size()
+        self.failUnlessEqual(size, len(self.test_data))
+
+        # Now get more data. We should be right where we left off.
+        more_data = self.uploadable.read(chunk_size)
+        start = chunk_size
+        end = chunk_size * 2
+        self.failUnlessEqual("".join(more_data), self.test_data[start:end])
+
+
+    def test_filehandle_file(self):
+        # Make sure that the MutableFileHandle works on a file as well
+        # as a StringIO object, since in some cases it will be asked to
+        # deal with files.
+        self.basedir = self.mktemp()
+        # necessary? What am I doing wrong here?
+        os.mkdir(self.basedir)
+        f_path = os.path.join(self.basedir, "test_file")
+        f = open(f_path, "w")
+        f.write(self.test_data)
+        f.close()
+        f = open(f_path, "r")
+
+        uploadable = MutableFileHandle(f)
+
+        data = uploadable.read(len(self.test_data))
+        self.failUnlessEqual("".join(data), self.test_data)
+        size = uploadable.get_size()
+        self.failUnlessEqual(size, len(self.test_data))
+
+
+    def test_close(self):
+        # Make sure that the MutableFileHandle closes its handle when
+        # told to do so.
+        self.uploadable.close()
+        self.failUnless(self.sio.closed)
+
+
+class DataHandle(unittest.TestCase):
+    def setUp(self):
+        self.test_data = "Test Data" * 50000
+        self.uploadable = MutableData(self.test_data)
+
+
+    def test_datahandle_read(self):
+        chunk_size = 10
+        for i in xrange(0, len(self.test_data), chunk_size):
+            data = self.uploadable.read(chunk_size)
+            data = "".join(data)
+            start = i
+            end = i + chunk_size
+            self.failUnlessEqual(data, self.test_data[start:end])
+
+
+    def test_datahandle_get_size(self):
+        actual_size = len(self.test_data)
+        size = self.uploadable.get_size()
+        self.failUnlessEqual(size, actual_size)
+
+
+    def test_datahandle_get_size_out_of_order(self):
+        # We should be able to call get_size whenever we want without
+        # disturbing the location of the seek pointer.
+        chunk_size = 100
+        data = self.uploadable.read(chunk_size)
+        self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
+
+        # Now get the size.
+        size = self.uploadable.get_size()
+        self.failUnlessEqual(size, len(self.test_data))
+
+        # Now get more data. We should be right where we left off.
+        more_data = self.uploadable.read(chunk_size)
+        start = chunk_size
+        end = chunk_size * 2
+        self.failUnlessEqual("".join(more_data), self.test_data[start:end])
+
+
+class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
+              PublishMixin):
+    def setUp(self):
+        GridTestMixin.setUp(self)
+        self.basedir = self.mktemp()
+        self.set_up_grid()
+        self.c = self.g.clients[0]
+        self.nm = self.c.nodemaker
+        self.data = "test data" * 100000 # about 900 KiB; MDMF
+        self.small_data = "test data" * 10 # about 90 B; SDMF
+
+
+    def do_upload_mdmf(self):
+        d = self.nm.create_mutable_file(MutableData(self.data),
+                                        version=MDMF_VERSION)
+        def _then(n):
+            assert isinstance(n, MutableFileNode)
+            assert n._protocol_version == MDMF_VERSION
+            self.mdmf_node = n
+            return n
+        d.addCallback(_then)
+        return d
+
+    def do_upload_sdmf(self):
+        d = self.nm.create_mutable_file(MutableData(self.small_data))
+        def _then(n):
+            assert isinstance(n, MutableFileNode)
+            assert n._protocol_version == SDMF_VERSION
+            self.sdmf_node = n
+            return n
+        d.addCallback(_then)
+        return d
+
+    def do_upload_empty_sdmf(self):
+        d = self.nm.create_mutable_file(MutableData(""))
+        def _then(n):
+            assert isinstance(n, MutableFileNode)
+            self.sdmf_zero_length_node = n
+            assert n._protocol_version == SDMF_VERSION
+            return n
+        d.addCallback(_then)
+        return d
+
+    def do_upload(self):
+        d = self.do_upload_mdmf()
+        d.addCallback(lambda ign: self.do_upload_sdmf())
+        return d
+
+    def test_debug(self):
+        d = self.do_upload_mdmf()
+        def _debug(n):
+            fso = debug.FindSharesOptions()
+            storage_index = base32.b2a(n.get_storage_index())
+            fso.si_s = storage_index
+            fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(unicode(storedir)))
+                            for (i,ss,storedir)
+                            in self.iterate_servers()]
+            fso.stdout = StringIO()
+            fso.stderr = StringIO()
+            debug.find_shares(fso)
+            sharefiles = fso.stdout.getvalue().splitlines()
+            expected = self.nm.default_encoding_parameters["n"]
+            self.failUnlessEqual(len(sharefiles), expected)
+
+            do = debug.DumpOptions()
+            do["filename"] = sharefiles[0]
+            do.stdout = StringIO()
+            debug.dump_share(do)
+            output = do.stdout.getvalue()
+            lines = set(output.splitlines())
+            self.failUnless("Mutable slot found:" in lines, output)
+            self.failUnless(" share_type: MDMF" in lines, output)
+            self.failUnless(" num_extra_leases: 0" in lines, output)
+            self.failUnless(" MDMF contents:" in lines, output)
+            self.failUnless("  seqnum: 1" in lines, output)
+            self.failUnless("  required_shares: 3" in lines, output)
+            self.failUnless("  total_shares: 10" in lines, output)
+            self.failUnless("  segsize: 131073" in lines, output)
+            self.failUnless("  datalen: %d" % len(self.data) in lines, output)
+            vcap = n.get_verify_cap().to_string()
+            self.failUnless("  verify-cap: %s" % vcap in lines, output)
+
+            cso = debug.CatalogSharesOptions()
+            cso.nodedirs = fso.nodedirs
+            cso.stdout = StringIO()
+            cso.stderr = StringIO()
+            debug.catalog_shares(cso)
+            shares = cso.stdout.getvalue().splitlines()
+            oneshare = shares[0] # all shares should be MDMF
+            self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
+            self.failUnless(oneshare.startswith("MDMF"), oneshare)
+            fields = oneshare.split()
+            self.failUnlessEqual(fields[0], "MDMF")
+            self.failUnlessEqual(fields[1], storage_index)
+            self.failUnlessEqual(fields[2], "3/10")
+            self.failUnlessEqual(fields[3], "%d" % len(self.data))
+            self.failUnless(fields[4].startswith("#1:"), fields[3])
+            # the rest of fields[4] is the roothash, which depends upon
+            # encryption salts and is not constant. fields[5] is the
+            # remaining time on the longest lease, which is timing dependent.
+            # The rest of the line is the quoted pathname to the share.
+        d.addCallback(_debug)
+        return d
+
+    def test_get_sequence_number(self):
+        d = self.do_upload()
+        d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv:
+            self.failUnlessEqual(bv.get_sequence_number(), 1))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv:
+            self.failUnlessEqual(bv.get_sequence_number(), 1))
+        # Now update. The sequence number in both cases should be 1 in
+        # both cases.
+        def _do_update(ignored):
+            new_data = MutableData("foo bar baz" * 100000)
+            new_small_data = MutableData("foo bar baz" * 10)
+            d1 = self.mdmf_node.overwrite(new_data)
+            d2 = self.sdmf_node.overwrite(new_small_data)
+            dl = gatherResults([d1, d2])
+            return dl
+        d.addCallback(_do_update)
+        d.addCallback(lambda ignored:
+            self.mdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv:
+            self.failUnlessEqual(bv.get_sequence_number(), 2))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv:
+            self.failUnlessEqual(bv.get_sequence_number(), 2))
+        return d
+
+
+    def test_cap_after_upload(self):
+        # If we create a new mutable file and upload things to it, and
+        # it's an MDMF file, we should get an MDMF cap back from that
+        # file and should be able to use that.
+        # That's essentially what MDMF node is, so just check that.
+        d = self.do_upload_mdmf()
+        def _then(ign):
+            mdmf_uri = self.mdmf_node.get_uri()
+            cap = uri.from_string(mdmf_uri)
+            self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
+            readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
+            cap = uri.from_string(readonly_mdmf_uri)
+            self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
+        d.addCallback(_then)
+        return d
+
+    def test_mutable_version(self):
+        # assert that getting parameters from the IMutableVersion object
+        # gives us the same data as getting them from the filenode itself
+        d = self.do_upload()
+        d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
+        def _check_mdmf(bv):
+            n = self.mdmf_node
+            self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
+            self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
+            self.failIf(bv.is_readonly())
+        d.addCallback(_check_mdmf)
+        d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
+        def _check_sdmf(bv):
+            n = self.sdmf_node
+            self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
+            self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
+            self.failIf(bv.is_readonly())
+        d.addCallback(_check_sdmf)
+        return d
+
+
+    def test_get_readonly_version(self):
+        d = self.do_upload()
+        d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
+
+        # Attempting to get a mutable version of a mutable file from a
+        # filenode initialized with a readcap should return a readonly
+        # version of that same node.
+        d.addCallback(lambda ign: self.mdmf_node.get_readonly())
+        d.addCallback(lambda ro: ro.get_best_mutable_version())
+        d.addCallback(lambda v: self.failUnless(v.is_readonly()))
+
+        d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
+        d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
+
+        d.addCallback(lambda ign: self.sdmf_node.get_readonly())
+        d.addCallback(lambda ro: ro.get_best_mutable_version())
+        d.addCallback(lambda v: self.failUnless(v.is_readonly()))
+        return d
+
+
+    def test_toplevel_overwrite(self):
+        new_data = MutableData("foo bar baz" * 100000)
+        new_small_data = MutableData("foo bar baz" * 10)
+        d = self.do_upload()
+        d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
+        d.addCallback(lambda ignored:
+            self.mdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessEqual(data, "foo bar baz" * 100000))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.overwrite(new_small_data))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessEqual(data, "foo bar baz" * 10))
+        return d
+
+
+    def test_toplevel_modify(self):
+        d = self.do_upload()
+        def modifier(old_contents, servermap, first_time):
+            return old_contents + "modified"
+        d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
+        d.addCallback(lambda ignored:
+            self.mdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessIn("modified", data))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.modify(modifier))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessIn("modified", data))
+        return d
+
+
+    def test_version_modify(self):
+        # TODO: When we can publish multiple versions, alter this test
+        # to modify a version other than the best usable version, then
+        # test to see that the best recoverable version is that.
+        d = self.do_upload()
+        def modifier(old_contents, servermap, first_time):
+            return old_contents + "modified"
+        d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
+        d.addCallback(lambda ignored:
+            self.mdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessIn("modified", data))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.modify(modifier))
+        d.addCallback(lambda ignored:
+            self.sdmf_node.download_best_version())
+        d.addCallback(lambda data:
+            self.failUnlessIn("modified", data))
+        return d
+
+
+    def test_download_version(self):
+        d = self.publish_multiple()
+        # We want to have two recoverable versions on the grid.
+        d.addCallback(lambda res:
+                      self._set_versions({0:0,2:0,4:0,6:0,8:0,
+                                          1:1,3:1,5:1,7:1,9:1}))
+        # Now try to download each version. We should get the plaintext
+        # associated with that version.
+        d.addCallback(lambda ignored:
+            self._fn.get_servermap(mode=MODE_READ))
+        def _got_servermap(smap):
+            versions = smap.recoverable_versions()
+            assert len(versions) == 2
+
+            self.servermap = smap
+            self.version1, self.version2 = versions
+            assert self.version1 != self.version2
+
+            self.version1_seqnum = self.version1[0]
+            self.version2_seqnum = self.version2[0]
+            self.version1_index = self.version1_seqnum - 1
+            self.version2_index = self.version2_seqnum - 1
+
+        d.addCallback(_got_servermap)
+        d.addCallback(lambda ignored:
+            self._fn.download_version(self.servermap, self.version1))
+        d.addCallback(lambda results:
+            self.failUnlessEqual(self.CONTENTS[self.version1_index],
+                                 results))
+        d.addCallback(lambda ignored:
+            self._fn.download_version(self.servermap, self.version2))
+        d.addCallback(lambda results:
+            self.failUnlessEqual(self.CONTENTS[self.version2_index],
+                                 results))
+        return d
+
+
+    def test_download_nonexistent_version(self):
+        d = self.do_upload_mdmf()
+        d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
+        def _set_servermap(servermap):
+            self.servermap = servermap
+        d.addCallback(_set_servermap)
+        d.addCallback(lambda ignored:
+           self.shouldFail(UnrecoverableFileError, "nonexistent version",
+                           None,
+                           self.mdmf_node.download_version, self.servermap,
+                           "not a version"))
+        return d
+
+
+    def test_partial_read(self):
+        d = self.do_upload_mdmf()
+        d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
+        modes = [("start_on_segment_boundary",
+                  mathutil.next_multiple(128 * 1024, 3), 50),
+                 ("ending_one_byte_after_segment_boundary",
+                  mathutil.next_multiple(128 * 1024, 3)-50, 51),
+                 ("zero_length_at_start", 0, 0),
+                 ("zero_length_in_middle", 50, 0),
+                 ("zero_length_at_segment_boundary",
+                  mathutil.next_multiple(128 * 1024, 3), 0),
+                 ]
+        for (name, offset, length) in modes:
+            d.addCallback(self._do_partial_read, name, offset, length)
+        # then read only a few bytes at a time, and see that the results are
+        # what we expect.
+        def _read_data(version):
+            c = consumer.MemoryConsumer()
+            d2 = defer.succeed(None)
+            for i in xrange(0, len(self.data), 10000):
+                d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
+            d2.addCallback(lambda ignored:
+                self.failUnlessEqual(self.data, "".join(c.chunks)))
+            return d2
+        d.addCallback(_read_data)
+        return d
+    def _do_partial_read(self, version, name, offset, length):
+        c = consumer.MemoryConsumer()
+        d = version.read(c, offset, length)
+        expected = self.data[offset:offset+length]
+        d.addCallback(lambda ignored: "".join(c.chunks))
+        def _check(results):
+            if results != expected:
+                print
+                print "got: %s ... %s" % (results[:20], results[-20:])
+                print "exp: %s ... %s" % (expected[:20], expected[-20:])
+                self.fail("results[%s] != expected" % name)
+            return version # daisy-chained to next call
+        d.addCallback(_check)
+        return d
+
+
+    def _test_read_and_download(self, node, expected):
+        d = node.get_best_readable_version()
+        def _read_data(version):
+            c = consumer.MemoryConsumer()
+            d2 = defer.succeed(None)
+            d2.addCallback(lambda ignored: version.read(c))
+            d2.addCallback(lambda ignored:
+                self.failUnlessEqual(expected, "".join(c.chunks)))
+            return d2
+        d.addCallback(_read_data)
+        d.addCallback(lambda ignored: node.download_best_version())
+        d.addCallback(lambda data: self.failUnlessEqual(expected, data))
+        return d
+
+    def test_read_and_download_mdmf(self):
+        d = self.do_upload_mdmf()
+        d.addCallback(self._test_read_and_download, self.data)
+        return d
+
+    def test_read_and_download_sdmf(self):
+        d = self.do_upload_sdmf()
+        d.addCallback(self._test_read_and_download, self.small_data)
+        return d
+
+    def test_read_and_download_sdmf_zero_length(self):
+        d = self.do_upload_empty_sdmf()
+        d.addCallback(self._test_read_and_download, "")
+        return d
+
+
+class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
+    timeout = 400 # these tests are too big, 120s is not enough on slow
+                  # platforms
+    def setUp(self):
+        GridTestMixin.setUp(self)
+        self.basedir = self.mktemp()
+        self.set_up_grid(num_servers=13)
+        self.c = self.g.clients[0]
+        self.nm = self.c.nodemaker
+        self.data = "testdata " * 100000 # about 900 KiB; MDMF
+        self.small_data = "test data" * 10 # about 90 B; SDMF
+
+
+    def do_upload_sdmf(self):
+        d = self.nm.create_mutable_file(MutableData(self.small_data))
+        def _then(n):
+            assert isinstance(n, MutableFileNode)
+            self.sdmf_node = n
+            # Make SDMF node that has 255 shares.
+            self.nm.default_encoding_parameters['n'] = 255
+            self.nm.default_encoding_parameters['k'] = 127
+            return self.nm.create_mutable_file(MutableData(self.small_data))
+        d.addCallback(_then)
+        def _then2(n):
+            assert isinstance(n, MutableFileNode)
+            self.sdmf_max_shares_node = n
+        d.addCallback(_then2)
+        return d
+
+    def do_upload_mdmf(self):
+        d = self.nm.create_mutable_file(MutableData(self.data),
+                                        version=MDMF_VERSION)
+        def _then(n):
+            assert isinstance(n, MutableFileNode)
+            self.mdmf_node = n
+            # Make MDMF node that has 255 shares.
+            self.nm.default_encoding_parameters['n'] = 255
+            self.nm.default_encoding_parameters['k'] = 127
+            return self.nm.create_mutable_file(MutableData(self.data),
+                                               version=MDMF_VERSION)
+        d.addCallback(_then)
+        def _then2(n):
+            assert isinstance(n, MutableFileNode)
+            self.mdmf_max_shares_node = n
+        d.addCallback(_then2)
+        return d
+
+    def _test_replace(self, offset, new_data):
+        expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            d = defer.succeed(None)
+            for node in (self.mdmf_node, self.mdmf_max_shares_node):
+                # close over 'node'.
+                d.addCallback(lambda ign, node=node:
+                              node.get_best_mutable_version())
+                d.addCallback(lambda mv:
+                              mv.update(MutableData(new_data), offset))
+                d.addCallback(lambda ign, node=node:
+                              node.download_best_version())
+                def _check(results):
+                    if results != expected:
+                        print
+                        print "got: %s ... %s" % (results[:20], results[-20:])
+                        print "exp: %s ... %s" % (expected[:20], expected[-20:])
+                        self.fail("results != expected")
+                d.addCallback(_check)
+            return d
+        d0.addCallback(_run)
+        return d0
+
+    def test_append(self):
+        # We should be able to append data to a mutable file and get
+        # what we expect.
+        return self._test_replace(len(self.data), "appended")
+
+    def test_replace_middle(self):
+        # We should be able to replace data in the middle of a mutable
+        # file and get what we expect back.
+        return self._test_replace(100, "replaced")
+
+    def test_replace_beginning(self):
+        # We should be able to replace data at the beginning of the file
+        # without truncating the file
+        return self._test_replace(0, "beginning")
+
+    def test_replace_segstart1(self):
+        return self._test_replace(128*1024+1, "NNNN")
+
+    def test_replace_zero_length_beginning(self):
+        return self._test_replace(0, "")
+
+    def test_replace_zero_length_middle(self):
+        return self._test_replace(50, "")
+
+    def test_replace_zero_length_segstart1(self):
+        return self._test_replace(128*1024+1, "")
+
+    def test_replace_and_extend(self):
+        # We should be able to replace data in the middle of a mutable
+        # file and extend that mutable file and get what we expect.
+        return self._test_replace(100, "modified " * 100000)
+
+
+    def _check_differences(self, got, expected):
+        # displaying arbitrary file corruption is tricky for a
+        # 1MB file of repeating data,, so look for likely places
+        # with problems and display them separately
+        gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
+        expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
+        gotspans = ["%d:%d=%s" % (start,end,got[start:end])
+                    for (start,end) in gotmods]
+        expspans = ["%d:%d=%s" % (start,end,expected[start:end])
+                    for (start,end) in expmods]
+        #print "expecting: %s" % expspans
+
+        SEGSIZE = 128*1024
+        if got != expected:
+            print "differences:"
+            for segnum in range(len(expected)//SEGSIZE):
+                start = segnum * SEGSIZE
+                end = (segnum+1) * SEGSIZE
+                got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
+                exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
+                if got_ends != exp_ends:
+                    print "expected[%d]: %s" % (start, exp_ends)
+                    print "got     [%d]: %s" % (start, got_ends)
+            if expspans != gotspans:
+                print "expected: %s" % expspans
+                print "got     : %s" % gotspans
+            open("EXPECTED","wb").write(expected)
+            open("GOT","wb").write(got)
+            print "wrote data to EXPECTED and GOT"
+            self.fail("didn't get expected data")
+
+
+    def test_replace_locations(self):
+        # exercise fencepost conditions
+        SEGSIZE = 128*1024
+        suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
+        letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            expected = self.data
+            d = defer.succeed(None)
+            for offset in suspects:
+                new_data = letters.next()*2 # "AA", then "BB", etc
+                expected = expected[:offset]+new_data+expected[offset+2:]
+                d.addCallback(lambda ign:
+                              self.mdmf_node.get_best_mutable_version())
+                def _modify(mv, offset=offset, new_data=new_data):
+                    # close over 'offset','new_data'
+                    md = MutableData(new_data)
+                    return mv.update(md, offset)
+                d.addCallback(_modify)
+                d.addCallback(lambda ignored:
+                              self.mdmf_node.download_best_version())
+                d.addCallback(self._check_differences, expected)
+            return d
+        d0.addCallback(_run)
+        return d0
+
+    def test_replace_locations_max_shares(self):
+        # exercise fencepost conditions
+        SEGSIZE = 128*1024
+        suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
+        letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            expected = self.data
+            d = defer.succeed(None)
+            for offset in suspects:
+                new_data = letters.next()*2 # "AA", then "BB", etc
+                expected = expected[:offset]+new_data+expected[offset+2:]
+                d.addCallback(lambda ign:
+                              self.mdmf_max_shares_node.get_best_mutable_version())
+                def _modify(mv, offset=offset, new_data=new_data):
+                    # close over 'offset','new_data'
+                    md = MutableData(new_data)
+                    return mv.update(md, offset)
+                d.addCallback(_modify)
+                d.addCallback(lambda ignored:
+                              self.mdmf_max_shares_node.download_best_version())
+                d.addCallback(self._check_differences, expected)
+            return d
+        d0.addCallback(_run)
+        return d0
+
+
+    def test_append_power_of_two(self):
+        # If we attempt to extend a mutable file so that its segment
+        # count crosses a power-of-two boundary, the update operation
+        # should know how to reencode the file.
+
+        # Note that the data populating self.mdmf_node is about 900 KiB
+        # long -- this is 7 segments in the default segment size. So we
+        # need to add 2 segments worth of data to push it over a
+        # power-of-two boundary.
+        segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
+        new_data = self.data + (segment * 2)
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            d = defer.succeed(None)
+            for node in (self.mdmf_node, self.mdmf_max_shares_node):
+                # close over 'node'.
+                d.addCallback(lambda ign, node=node:
+                              node.get_best_mutable_version())
+                d.addCallback(lambda mv:
+                              mv.update(MutableData(segment * 2), len(self.data)))
+                d.addCallback(lambda ign, node=node:
+                              node.download_best_version())
+                d.addCallback(lambda results:
+                              self.failUnlessEqual(results, new_data))
+            return d
+        d0.addCallback(_run)
+        return d0
+
+    def test_update_sdmf(self):
+        # Running update on a single-segment file should still work.
+        new_data = self.small_data + "appended"
+        d0 = self.do_upload_sdmf()
+        def _run(ign):
+            d = defer.succeed(None)
+            for node in (self.sdmf_node, self.sdmf_max_shares_node):
+                # close over 'node'.
+                d.addCallback(lambda ign, node=node:
+                              node.get_best_mutable_version())
+                d.addCallback(lambda mv:
+                              mv.update(MutableData("appended"), len(self.small_data)))
+                d.addCallback(lambda ign, node=node:
+                              node.download_best_version())
+                d.addCallback(lambda results:
+                              self.failUnlessEqual(results, new_data))
+            return d
+        d0.addCallback(_run)
+        return d0
+
+    def test_replace_in_last_segment(self):
+        # The wrapper should know how to handle the tail segment
+        # appropriately.
+        replace_offset = len(self.data) - 100
+        new_data = self.data[:replace_offset] + "replaced"
+        rest_offset = replace_offset + len("replaced")
+        new_data += self.data[rest_offset:]
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            d = defer.succeed(None)
+            for node in (self.mdmf_node, self.mdmf_max_shares_node):
+                # close over 'node'.
+                d.addCallback(lambda ign, node=node:
+                              node.get_best_mutable_version())
+                d.addCallback(lambda mv:
+                              mv.update(MutableData("replaced"), replace_offset))
+                d.addCallback(lambda ign, node=node:
+                              node.download_best_version())
+                d.addCallback(lambda results:
+                              self.failUnlessEqual(results, new_data))
+            return d
+        d0.addCallback(_run)
+        return d0
+
+    def test_multiple_segment_replace(self):
+        replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
+        new_data = self.data[:replace_offset]
+        new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
+        new_data += 2 * new_segment
+        new_data += "replaced"
+        rest_offset = len(new_data)
+        new_data += self.data[rest_offset:]
+        d0 = self.do_upload_mdmf()
+        def _run(ign):
+            d = defer.succeed(None)
+            for node in (self.mdmf_node, self.mdmf_max_shares_node):
+                # close over 'node'.
+                d.addCallback(lambda ign, node=node:
+                              node.get_best_mutable_version())
+                d.addCallback(lambda mv:
+                              mv.update(MutableData((2 * new_segment) + "replaced"),
+                                        replace_offset))
+                d.addCallback(lambda ignored, node=node:
+                              node.download_best_version())
+                d.addCallback(lambda results:
+                              self.failUnlessEqual(results, new_data))
+            return d
+        d0.addCallback(_run)
+        return d0
+
+class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
+    sdmf_old_shares = {}
+    sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
+    sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
+    sdmf_old_contents = "This is a test file.\n"
+    def copy_sdmf_shares(self):
+        # We'll basically be short-circuiting the upload process.
+        servernums = self.g.servers_by_number.keys()
+        assert len(servernums) == 10
+
+        assignments = zip(self.sdmf_old_shares.keys(), servernums)
+        # Get the storage index.
+        cap = uri.from_string(self.sdmf_old_cap)
+        si = cap.get_storage_index()
+
+        # Now execute each assignment by writing the storage.
+        for (share, servernum) in assignments:
+            sharedata = base64.b64decode(self.sdmf_old_shares[share])
+            storedir = self.get_serverdir(servernum)
+            storage_path = os.path.join(storedir, "shares",
+                                        storage_index_to_dir(si))
+            fileutil.make_dirs(storage_path)
+            fileutil.write(os.path.join(storage_path, "%d" % share),
+                           sharedata)
+        # ...and verify that the shares are there.
+        shares = self.find_uri_shares(self.sdmf_old_cap)
+        assert len(shares) == 10
+
+    def test_new_downloader_can_read_old_shares(self):
+        self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
+        self.set_up_grid()
+        self.copy_sdmf_shares()
+        nm = self.g.clients[0].nodemaker
+        n = nm.create_from_cap(self.sdmf_old_cap)
+        d = n.download_best_version()
+        d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
+        return d
+
+class DifferentEncoding(unittest.TestCase):
+    def setUp(self):
+        self._storage = s = FakeStorage()
+        self.nodemaker = make_nodemaker(s)
+
+    def test_filenode(self):
+        # create a file with 3-of-20, then modify it with a client configured
+        # to do 3-of-10. #1510 tracks a failure here
+        self.nodemaker.default_encoding_parameters["n"] = 20
+        d = self.nodemaker.create_mutable_file("old contents")
+        def _created(n):
+            filecap = n.get_cap().to_string()
+            del n # we want a new object, not the cached one
+            self.nodemaker.default_encoding_parameters["n"] = 10
+            n2 = self.nodemaker.create_from_cap(filecap)
+            return n2
+        d.addCallback(_created)
+        def modifier(old_contents, servermap, first_time):
+            return "new contents"
+        d.addCallback(lambda n: n.modify(modifier))
         return d