]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
new introducer: signed extensible dictionary-based messages! refs #466
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1 import os, re, base64
2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9      ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
22
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import ResponseCache, \
25      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27      NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
30                                       MutableData, \
31                                       DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
35
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39      PausingAndStoppingConsumer, StoppingConsumer, \
40      ImmediatelyStoppingConsumer
41
42
43 # this "FakeStorage" exists to put the share data in RAM and avoid using real
44 # network connections, both to speed up the tests and to reduce the amount of
45 # non-mutable.py code being exercised.
46
47 class FakeStorage:
48     # this class replaces the collection of storage servers, allowing the
49     # tests to examine and manipulate the published shares. It also lets us
50     # control the order in which read queries are answered, to exercise more
51     # of the error-handling code in Retrieve .
52     #
53     # Note that we ignore the storage index: this FakeStorage instance can
54     # only be used for a single storage index.
55
56
57     def __init__(self):
58         self._peers = {}
59         # _sequence is used to cause the responses to occur in a specific
60         # order. If it is in use, then we will defer queries instead of
61         # answering them right away, accumulating the Deferreds in a dict. We
62         # don't know exactly how many queries we'll get, so exactly one
63         # second after the first query arrives, we will release them all (in
64         # order).
65         self._sequence = None
66         self._pending = {}
67         self._pending_timer = None
68
69     def read(self, peerid, storage_index):
70         shares = self._peers.get(peerid, {})
71         if self._sequence is None:
72             return defer.succeed(shares)
73         d = defer.Deferred()
74         if not self._pending:
75             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
76         if peerid not in self._pending:
77             self._pending[peerid] = []
78         self._pending[peerid].append( (d, shares) )
79         return d
80
81     def _fire_readers(self):
82         self._pending_timer = None
83         pending = self._pending
84         self._pending = {}
85         for peerid in self._sequence:
86             if peerid in pending:
87                 for (d, shares) in pending.pop(peerid):
88                     eventually(d.callback, shares)
89         for peerid in pending:
90             for (d, shares) in pending[peerid]:
91                 eventually(d.callback, shares)
92
93     def write(self, peerid, storage_index, shnum, offset, data):
94         if peerid not in self._peers:
95             self._peers[peerid] = {}
96         shares = self._peers[peerid]
97         f = StringIO()
98         f.write(shares.get(shnum, ""))
99         f.seek(offset)
100         f.write(data)
101         shares[shnum] = f.getvalue()
102
103
104 class FakeStorageServer:
105     def __init__(self, peerid, storage):
106         self.peerid = peerid
107         self.storage = storage
108         self.queries = 0
109     def callRemote(self, methname, *args, **kwargs):
110         self.queries += 1
111         def _call():
112             meth = getattr(self, methname)
113             return meth(*args, **kwargs)
114         d = fireEventually()
115         d.addCallback(lambda res: _call())
116         return d
117
118     def callRemoteOnly(self, methname, *args, **kwargs):
119         self.queries += 1
120         d = self.callRemote(methname, *args, **kwargs)
121         d.addBoth(lambda ignore: None)
122         pass
123
124     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
125         pass
126
127     def slot_readv(self, storage_index, shnums, readv):
128         d = self.storage.read(self.peerid, storage_index)
129         def _read(shares):
130             response = {}
131             for shnum in shares:
132                 if shnums and shnum not in shnums:
133                     continue
134                 vector = response[shnum] = []
135                 for (offset, length) in readv:
136                     assert isinstance(offset, (int, long)), offset
137                     assert isinstance(length, (int, long)), length
138                     vector.append(shares[shnum][offset:offset+length])
139             return response
140         d.addCallback(_read)
141         return d
142
143     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
144                                         tw_vectors, read_vector):
145         # always-pass: parrot the test vectors back to them.
146         readv = {}
147         for shnum, (testv, writev, new_length) in tw_vectors.items():
148             for (offset, length, op, specimen) in testv:
149                 assert op in ("le", "eq", "ge")
150             # TODO: this isn't right, the read is controlled by read_vector,
151             # not by testv
152             readv[shnum] = [ specimen
153                              for (offset, length, op, specimen)
154                              in testv ]
155             for (offset, data) in writev:
156                 self.storage.write(self.peerid, storage_index, shnum,
157                                    offset, data)
158         answer = (True, readv)
159         return fireEventually(answer)
160
161
162 def flip_bit(original, byte_offset):
163     return (original[:byte_offset] +
164             chr(ord(original[byte_offset]) ^ 0x01) +
165             original[byte_offset+1:])
166
167 def add_two(original, byte_offset):
168     # It isn't enough to simply flip the bit for the version number,
169     # because 1 is a valid version number. So we add two instead.
170     return (original[:byte_offset] +
171             chr(ord(original[byte_offset]) ^ 0x02) +
172             original[byte_offset+1:])
173
174 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
175     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
176     # list of shnums to corrupt.
177     ds = []
178     for peerid in s._peers:
179         shares = s._peers[peerid]
180         for shnum in shares:
181             if (shnums_to_corrupt is not None
182                 and shnum not in shnums_to_corrupt):
183                 continue
184             data = shares[shnum]
185             # We're feeding the reader all of the share data, so it
186             # won't need to use the rref that we didn't provide, nor the
187             # storage index that we didn't provide. We do this because
188             # the reader will work for both MDMF and SDMF.
189             reader = MDMFSlotReadProxy(None, None, shnum, data)
190             # We need to get the offsets for the next part.
191             d = reader.get_verinfo()
192             def _do_corruption(verinfo, data, shnum, shares):
193                 (seqnum,
194                  root_hash,
195                  IV,
196                  segsize,
197                  datalen,
198                  k, n, prefix, o) = verinfo
199                 if isinstance(offset, tuple):
200                     offset1, offset2 = offset
201                 else:
202                     offset1 = offset
203                     offset2 = 0
204                 if offset1 == "pubkey" and IV:
205                     real_offset = 107
206                 elif offset1 in o:
207                     real_offset = o[offset1]
208                 else:
209                     real_offset = offset1
210                 real_offset = int(real_offset) + offset2 + offset_offset
211                 assert isinstance(real_offset, int), offset
212                 if offset1 == 0: # verbyte
213                     f = add_two
214                 else:
215                     f = flip_bit
216                 shares[shnum] = f(data, real_offset)
217             d.addCallback(_do_corruption, data, shnum, shares)
218             ds.append(d)
219     dl = defer.DeferredList(ds)
220     dl.addCallback(lambda ignored: res)
221     return dl
222
223 def make_storagebroker(s=None, num_peers=10):
224     if not s:
225         s = FakeStorage()
226     peerids = [tagged_hash("peerid", "%d" % i)[:20]
227                for i in range(num_peers)]
228     storage_broker = StorageFarmBroker(None, True)
229     for peerid in peerids:
230         fss = FakeStorageServer(peerid, s)
231         ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
232                "permutation-seed-base32": base32.b2a(peerid) }
233         storage_broker.test_add_rref(peerid, fss, ann)
234     return storage_broker
235
236 def make_nodemaker(s=None, num_peers=10):
237     storage_broker = make_storagebroker(s, num_peers)
238     sh = client.SecretHolder("lease secret", "convergence secret")
239     keygen = client.KeyGenerator()
240     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
241     nodemaker = NodeMaker(storage_broker, sh, None,
242                           None, None,
243                           {"k": 3, "n": 10}, SDMF_VERSION, keygen)
244     return nodemaker
245
246 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
247     # this used to be in Publish, but we removed the limit. Some of
248     # these tests test whether the new code correctly allows files
249     # larger than the limit.
250     OLD_MAX_SEGMENT_SIZE = 3500000
251     def setUp(self):
252         self._storage = s = FakeStorage()
253         self.nodemaker = make_nodemaker(s)
254
255     def test_create(self):
256         d = self.nodemaker.create_mutable_file()
257         def _created(n):
258             self.failUnless(isinstance(n, MutableFileNode))
259             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
260             sb = self.nodemaker.storage_broker
261             peer0 = sorted(sb.get_all_serverids())[0]
262             shnums = self._storage._peers[peer0].keys()
263             self.failUnlessEqual(len(shnums), 1)
264         d.addCallback(_created)
265         return d
266
267
268     def test_create_mdmf(self):
269         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
270         def _created(n):
271             self.failUnless(isinstance(n, MutableFileNode))
272             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
273             sb = self.nodemaker.storage_broker
274             peer0 = sorted(sb.get_all_serverids())[0]
275             shnums = self._storage._peers[peer0].keys()
276             self.failUnlessEqual(len(shnums), 1)
277         d.addCallback(_created)
278         return d
279
280     def test_single_share(self):
281         # Make sure that we tolerate publishing a single share.
282         self.nodemaker.default_encoding_parameters['k'] = 1
283         self.nodemaker.default_encoding_parameters['happy'] = 1
284         self.nodemaker.default_encoding_parameters['n'] = 1
285         d = defer.succeed(None)
286         for v in (SDMF_VERSION, MDMF_VERSION):
287             d.addCallback(lambda ignored, v=v:
288                 self.nodemaker.create_mutable_file(version=v))
289             def _created(n):
290                 self.failUnless(isinstance(n, MutableFileNode))
291                 self._node = n
292                 return n
293             d.addCallback(_created)
294             d.addCallback(lambda n:
295                 n.overwrite(MutableData("Contents" * 50000)))
296             d.addCallback(lambda ignored:
297                 self._node.download_best_version())
298             d.addCallback(lambda contents:
299                 self.failUnlessEqual(contents, "Contents" * 50000))
300         return d
301
302     def test_max_shares(self):
303         self.nodemaker.default_encoding_parameters['n'] = 255
304         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
305         def _created(n):
306             self.failUnless(isinstance(n, MutableFileNode))
307             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
308             sb = self.nodemaker.storage_broker
309             num_shares = sum([len(self._storage._peers[x].keys()) for x \
310                               in sb.get_all_serverids()])
311             self.failUnlessEqual(num_shares, 255)
312             self._node = n
313             return n
314         d.addCallback(_created)
315         # Now we upload some contents
316         d.addCallback(lambda n:
317             n.overwrite(MutableData("contents" * 50000)))
318         # ...then download contents
319         d.addCallback(lambda ignored:
320             self._node.download_best_version())
321         # ...and check to make sure everything went okay.
322         d.addCallback(lambda contents:
323             self.failUnlessEqual("contents" * 50000, contents))
324         return d
325
326     def test_max_shares_mdmf(self):
327         # Test how files behave when there are 255 shares.
328         self.nodemaker.default_encoding_parameters['n'] = 255
329         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
330         def _created(n):
331             self.failUnless(isinstance(n, MutableFileNode))
332             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
333             sb = self.nodemaker.storage_broker
334             num_shares = sum([len(self._storage._peers[x].keys()) for x \
335                               in sb.get_all_serverids()])
336             self.failUnlessEqual(num_shares, 255)
337             self._node = n
338             return n
339         d.addCallback(_created)
340         d.addCallback(lambda n:
341             n.overwrite(MutableData("contents" * 50000)))
342         d.addCallback(lambda ignored:
343             self._node.download_best_version())
344         d.addCallback(lambda contents:
345             self.failUnlessEqual(contents, "contents" * 50000))
346         return d
347
348     def test_mdmf_filenode_cap(self):
349         # Test that an MDMF filenode, once created, returns an MDMF URI.
350         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
351         def _created(n):
352             self.failUnless(isinstance(n, MutableFileNode))
353             cap = n.get_cap()
354             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
355             rcap = n.get_readcap()
356             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
357             vcap = n.get_verify_cap()
358             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
359         d.addCallback(_created)
360         return d
361
362
363     def test_create_from_mdmf_writecap(self):
364         # Test that the nodemaker is capable of creating an MDMF
365         # filenode given an MDMF cap.
366         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
367         def _created(n):
368             self.failUnless(isinstance(n, MutableFileNode))
369             s = n.get_uri()
370             self.failUnless(s.startswith("URI:MDMF"))
371             n2 = self.nodemaker.create_from_cap(s)
372             self.failUnless(isinstance(n2, MutableFileNode))
373             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
374             self.failUnlessEqual(n.get_uri(), n2.get_uri())
375         d.addCallback(_created)
376         return d
377
378
379     def test_create_from_mdmf_readcap(self):
380         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
381         def _created(n):
382             self.failUnless(isinstance(n, MutableFileNode))
383             s = n.get_readonly_uri()
384             n2 = self.nodemaker.create_from_cap(s)
385             self.failUnless(isinstance(n2, MutableFileNode))
386
387             # Check that it's a readonly node
388             self.failUnless(n2.is_readonly())
389         d.addCallback(_created)
390         return d
391
392
393     def test_internal_version_from_cap(self):
394         # MutableFileNodes and MutableFileVersions have an internal
395         # switch that tells them whether they're dealing with an SDMF or
396         # MDMF mutable file when they start doing stuff. We want to make
397         # sure that this is set appropriately given an MDMF cap.
398         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
399         def _created(n):
400             self.uri = n.get_uri()
401             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
402
403             n2 = self.nodemaker.create_from_cap(self.uri)
404             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
405         d.addCallback(_created)
406         return d
407
408
409     def test_serialize(self):
410         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
411         calls = []
412         def _callback(*args, **kwargs):
413             self.failUnlessEqual(args, (4,) )
414             self.failUnlessEqual(kwargs, {"foo": 5})
415             calls.append(1)
416             return 6
417         d = n._do_serialized(_callback, 4, foo=5)
418         def _check_callback(res):
419             self.failUnlessEqual(res, 6)
420             self.failUnlessEqual(calls, [1])
421         d.addCallback(_check_callback)
422
423         def _errback():
424             raise ValueError("heya")
425         d.addCallback(lambda res:
426                       self.shouldFail(ValueError, "_check_errback", "heya",
427                                       n._do_serialized, _errback))
428         return d
429
430     def test_upload_and_download(self):
431         d = self.nodemaker.create_mutable_file()
432         def _created(n):
433             d = defer.succeed(None)
434             d.addCallback(lambda res: n.get_servermap(MODE_READ))
435             d.addCallback(lambda smap: smap.dump(StringIO()))
436             d.addCallback(lambda sio:
437                           self.failUnless("3-of-10" in sio.getvalue()))
438             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
439             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
440             d.addCallback(lambda res: n.download_best_version())
441             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
442             d.addCallback(lambda res: n.get_size_of_best_version())
443             d.addCallback(lambda size:
444                           self.failUnlessEqual(size, len("contents 1")))
445             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
446             d.addCallback(lambda res: n.download_best_version())
447             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
448             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
449             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
450             d.addCallback(lambda res: n.download_best_version())
451             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
452             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
453             d.addCallback(lambda smap:
454                           n.download_version(smap,
455                                              smap.best_recoverable_version()))
456             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
457             # test a file that is large enough to overcome the
458             # mapupdate-to-retrieve data caching (i.e. make the shares larger
459             # than the default readsize, which is 2000 bytes). A 15kB file
460             # will have 5kB shares.
461             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
462             d.addCallback(lambda res: n.download_best_version())
463             d.addCallback(lambda res:
464                           self.failUnlessEqual(res, "large size file" * 1000))
465             return d
466         d.addCallback(_created)
467         return d
468
469
470     def test_upload_and_download_mdmf(self):
471         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
472         def _created(n):
473             d = defer.succeed(None)
474             d.addCallback(lambda ignored:
475                 n.get_servermap(MODE_READ))
476             def _then(servermap):
477                 dumped = servermap.dump(StringIO())
478                 self.failUnlessIn("3-of-10", dumped.getvalue())
479             d.addCallback(_then)
480             # Now overwrite the contents with some new contents. We want 
481             # to make them big enough to force the file to be uploaded
482             # in more than one segment.
483             big_contents = "contents1" * 100000 # about 900 KiB
484             big_contents_uploadable = MutableData(big_contents)
485             d.addCallback(lambda ignored:
486                 n.overwrite(big_contents_uploadable))
487             d.addCallback(lambda ignored:
488                 n.download_best_version())
489             d.addCallback(lambda data:
490                 self.failUnlessEqual(data, big_contents))
491             # Overwrite the contents again with some new contents. As
492             # before, they need to be big enough to force multiple
493             # segments, so that we make the downloader deal with
494             # multiple segments.
495             bigger_contents = "contents2" * 1000000 # about 9MiB 
496             bigger_contents_uploadable = MutableData(bigger_contents)
497             d.addCallback(lambda ignored:
498                 n.overwrite(bigger_contents_uploadable))
499             d.addCallback(lambda ignored:
500                 n.download_best_version())
501             d.addCallback(lambda data:
502                 self.failUnlessEqual(data, bigger_contents))
503             return d
504         d.addCallback(_created)
505         return d
506
507
508     def test_retrieve_producer_mdmf(self):
509         # We should make sure that the retriever is able to pause and stop
510         # correctly.
511         data = "contents1" * 100000
512         d = self.nodemaker.create_mutable_file(MutableData(data),
513                                                version=MDMF_VERSION)
514         d.addCallback(lambda node: node.get_best_mutable_version())
515         d.addCallback(self._test_retrieve_producer, "MDMF", data)
516         return d
517
518     # note: SDMF has only one big segment, so we can't use the usual
519     # after-the-first-write() trick to pause or stop the download.
520     # Disabled until we find a better approach.
521     def OFF_test_retrieve_producer_sdmf(self):
522         data = "contents1" * 100000
523         d = self.nodemaker.create_mutable_file(MutableData(data),
524                                                version=SDMF_VERSION)
525         d.addCallback(lambda node: node.get_best_mutable_version())
526         d.addCallback(self._test_retrieve_producer, "SDMF", data)
527         return d
528
529     def _test_retrieve_producer(self, version, kind, data):
530         # Now we'll retrieve it into a pausing consumer.
531         c = PausingConsumer()
532         d = version.read(c)
533         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
534
535         c2 = PausingAndStoppingConsumer()
536         d.addCallback(lambda ign:
537                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
538                                       "our Consumer called stopProducing()",
539                                       version.read, c2))
540
541         c3 = StoppingConsumer()
542         d.addCallback(lambda ign:
543                       self.shouldFail(DownloadStopped, kind+"_stop",
544                                       "our Consumer called stopProducing()",
545                                       version.read, c3))
546
547         c4 = ImmediatelyStoppingConsumer()
548         d.addCallback(lambda ign:
549                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
550                                       "our Consumer called stopProducing()",
551                                       version.read, c4))
552
553         def _then(ign):
554             c5 = MemoryConsumer()
555             d1 = version.read(c5)
556             c5.producer.stopProducing()
557             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
558                                    "our Consumer called stopProducing()",
559                                    lambda: d1)
560         d.addCallback(_then)
561         return d
562
563     def test_download_from_mdmf_cap(self):
564         # We should be able to download an MDMF file given its cap
565         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
566         def _created(node):
567             self.uri = node.get_uri()
568             # also confirm that the cap has no extension fields
569             pieces = self.uri.split(":")
570             self.failUnlessEqual(len(pieces), 4)
571
572             return node.overwrite(MutableData("contents1" * 100000))
573         def _then(ignored):
574             node = self.nodemaker.create_from_cap(self.uri)
575             return node.download_best_version()
576         def _downloaded(data):
577             self.failUnlessEqual(data, "contents1" * 100000)
578         d.addCallback(_created)
579         d.addCallback(_then)
580         d.addCallback(_downloaded)
581         return d
582
583
584     def test_mdmf_write_count(self):
585         # Publishing an MDMF file should only cause one write for each
586         # share that is to be published. Otherwise, we introduce
587         # undesirable semantics that are a regression from SDMF
588         upload = MutableData("MDMF" * 100000) # about 400 KiB
589         d = self.nodemaker.create_mutable_file(upload,
590                                                version=MDMF_VERSION)
591         def _check_server_write_counts(ignored):
592             sb = self.nodemaker.storage_broker
593             for server in sb.servers.itervalues():
594                 self.failUnlessEqual(server.get_rref().queries, 1)
595         d.addCallback(_check_server_write_counts)
596         return d
597
598
599     def test_create_with_initial_contents(self):
600         upload1 = MutableData("contents 1")
601         d = self.nodemaker.create_mutable_file(upload1)
602         def _created(n):
603             d = n.download_best_version()
604             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
605             upload2 = MutableData("contents 2")
606             d.addCallback(lambda res: n.overwrite(upload2))
607             d.addCallback(lambda res: n.download_best_version())
608             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
609             return d
610         d.addCallback(_created)
611         return d
612
613
614     def test_create_mdmf_with_initial_contents(self):
615         initial_contents = "foobarbaz" * 131072 # 900KiB
616         initial_contents_uploadable = MutableData(initial_contents)
617         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
618                                                version=MDMF_VERSION)
619         def _created(n):
620             d = n.download_best_version()
621             d.addCallback(lambda data:
622                 self.failUnlessEqual(data, initial_contents))
623             uploadable2 = MutableData(initial_contents + "foobarbaz")
624             d.addCallback(lambda ignored:
625                 n.overwrite(uploadable2))
626             d.addCallback(lambda ignored:
627                 n.download_best_version())
628             d.addCallback(lambda data:
629                 self.failUnlessEqual(data, initial_contents +
630                                            "foobarbaz"))
631             return d
632         d.addCallback(_created)
633         return d
634
635
636     def test_response_cache_memory_leak(self):
637         d = self.nodemaker.create_mutable_file("contents")
638         def _created(n):
639             d = n.download_best_version()
640             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
641             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
642
643             def _check_cache(expected):
644                 # The total size of cache entries should not increase on the second download;
645                 # in fact the cache contents should be identical.
646                 d2 = n.download_best_version()
647                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
648                 return d2
649             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
650             return d
651         d.addCallback(_created)
652         return d
653
654     def test_create_with_initial_contents_function(self):
655         data = "initial contents"
656         def _make_contents(n):
657             self.failUnless(isinstance(n, MutableFileNode))
658             key = n.get_writekey()
659             self.failUnless(isinstance(key, str), key)
660             self.failUnlessEqual(len(key), 16) # AES key size
661             return MutableData(data)
662         d = self.nodemaker.create_mutable_file(_make_contents)
663         def _created(n):
664             return n.download_best_version()
665         d.addCallback(_created)
666         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
667         return d
668
669
670     def test_create_mdmf_with_initial_contents_function(self):
671         data = "initial contents" * 100000
672         def _make_contents(n):
673             self.failUnless(isinstance(n, MutableFileNode))
674             key = n.get_writekey()
675             self.failUnless(isinstance(key, str), key)
676             self.failUnlessEqual(len(key), 16)
677             return MutableData(data)
678         d = self.nodemaker.create_mutable_file(_make_contents,
679                                                version=MDMF_VERSION)
680         d.addCallback(lambda n:
681             n.download_best_version())
682         d.addCallback(lambda data2:
683             self.failUnlessEqual(data2, data))
684         return d
685
686
687     def test_create_with_too_large_contents(self):
688         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
689         BIG_uploadable = MutableData(BIG)
690         d = self.nodemaker.create_mutable_file(BIG_uploadable)
691         def _created(n):
692             other_BIG_uploadable = MutableData(BIG)
693             d = n.overwrite(other_BIG_uploadable)
694             return d
695         d.addCallback(_created)
696         return d
697
698     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
699         d = n.get_servermap(MODE_READ)
700         d.addCallback(lambda servermap: servermap.best_recoverable_version())
701         d.addCallback(lambda verinfo:
702                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
703         return d
704
705     def test_modify(self):
706         def _modifier(old_contents, servermap, first_time):
707             new_contents = old_contents + "line2"
708             return new_contents
709         def _non_modifier(old_contents, servermap, first_time):
710             return old_contents
711         def _none_modifier(old_contents, servermap, first_time):
712             return None
713         def _error_modifier(old_contents, servermap, first_time):
714             raise ValueError("oops")
715         def _toobig_modifier(old_contents, servermap, first_time):
716             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
717             return new_content
718         calls = []
719         def _ucw_error_modifier(old_contents, servermap, first_time):
720             # simulate an UncoordinatedWriteError once
721             calls.append(1)
722             if len(calls) <= 1:
723                 raise UncoordinatedWriteError("simulated")
724             new_contents = old_contents + "line3"
725             return new_contents
726         def _ucw_error_non_modifier(old_contents, servermap, first_time):
727             # simulate an UncoordinatedWriteError once, and don't actually
728             # modify the contents on subsequent invocations
729             calls.append(1)
730             if len(calls) <= 1:
731                 raise UncoordinatedWriteError("simulated")
732             return old_contents
733
734         initial_contents = "line1"
735         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
736         def _created(n):
737             d = n.modify(_modifier)
738             d.addCallback(lambda res: n.download_best_version())
739             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
740             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
741
742             d.addCallback(lambda res: n.modify(_non_modifier))
743             d.addCallback(lambda res: n.download_best_version())
744             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
745             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
746
747             d.addCallback(lambda res: n.modify(_none_modifier))
748             d.addCallback(lambda res: n.download_best_version())
749             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
750             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
751
752             d.addCallback(lambda res:
753                           self.shouldFail(ValueError, "error_modifier", None,
754                                           n.modify, _error_modifier))
755             d.addCallback(lambda res: n.download_best_version())
756             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
757             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
758
759
760             d.addCallback(lambda res: n.download_best_version())
761             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
762             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
763
764             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
765             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
766             d.addCallback(lambda res: n.download_best_version())
767             d.addCallback(lambda res: self.failUnlessEqual(res,
768                                                            "line1line2line3"))
769             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
770
771             def _reset_ucw_error_modifier(res):
772                 calls[:] = []
773                 return res
774             d.addCallback(_reset_ucw_error_modifier)
775
776             # in practice, this n.modify call should publish twice: the first
777             # one gets a UCWE, the second does not. But our test jig (in
778             # which the modifier raises the UCWE) skips over the first one,
779             # so in this test there will be only one publish, and the seqnum
780             # will only be one larger than the previous test, not two (i.e. 4
781             # instead of 5).
782             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
783             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
784             d.addCallback(lambda res: n.download_best_version())
785             d.addCallback(lambda res: self.failUnlessEqual(res,
786                                                            "line1line2line3"))
787             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
788             d.addCallback(lambda res: n.modify(_toobig_modifier))
789             return d
790         d.addCallback(_created)
791         return d
792
793
794     def test_modify_backoffer(self):
795         def _modifier(old_contents, servermap, first_time):
796             return old_contents + "line2"
797         calls = []
798         def _ucw_error_modifier(old_contents, servermap, first_time):
799             # simulate an UncoordinatedWriteError once
800             calls.append(1)
801             if len(calls) <= 1:
802                 raise UncoordinatedWriteError("simulated")
803             return old_contents + "line3"
804         def _always_ucw_error_modifier(old_contents, servermap, first_time):
805             raise UncoordinatedWriteError("simulated")
806         def _backoff_stopper(node, f):
807             return f
808         def _backoff_pauser(node, f):
809             d = defer.Deferred()
810             reactor.callLater(0.5, d.callback, None)
811             return d
812
813         # the give-up-er will hit its maximum retry count quickly
814         giveuper = BackoffAgent()
815         giveuper._delay = 0.1
816         giveuper.factor = 1
817
818         d = self.nodemaker.create_mutable_file(MutableData("line1"))
819         def _created(n):
820             d = n.modify(_modifier)
821             d.addCallback(lambda res: n.download_best_version())
822             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
823             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
824
825             d.addCallback(lambda res:
826                           self.shouldFail(UncoordinatedWriteError,
827                                           "_backoff_stopper", None,
828                                           n.modify, _ucw_error_modifier,
829                                           _backoff_stopper))
830             d.addCallback(lambda res: n.download_best_version())
831             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
832             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
833
834             def _reset_ucw_error_modifier(res):
835                 calls[:] = []
836                 return res
837             d.addCallback(_reset_ucw_error_modifier)
838             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
839                                                _backoff_pauser))
840             d.addCallback(lambda res: n.download_best_version())
841             d.addCallback(lambda res: self.failUnlessEqual(res,
842                                                            "line1line2line3"))
843             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
844
845             d.addCallback(lambda res:
846                           self.shouldFail(UncoordinatedWriteError,
847                                           "giveuper", None,
848                                           n.modify, _always_ucw_error_modifier,
849                                           giveuper.delay))
850             d.addCallback(lambda res: n.download_best_version())
851             d.addCallback(lambda res: self.failUnlessEqual(res,
852                                                            "line1line2line3"))
853             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
854
855             return d
856         d.addCallback(_created)
857         return d
858
859     def test_upload_and_download_full_size_keys(self):
860         self.nodemaker.key_generator = client.KeyGenerator()
861         d = self.nodemaker.create_mutable_file()
862         def _created(n):
863             d = defer.succeed(None)
864             d.addCallback(lambda res: n.get_servermap(MODE_READ))
865             d.addCallback(lambda smap: smap.dump(StringIO()))
866             d.addCallback(lambda sio:
867                           self.failUnless("3-of-10" in sio.getvalue()))
868             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
869             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
870             d.addCallback(lambda res: n.download_best_version())
871             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
872             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
873             d.addCallback(lambda res: n.download_best_version())
874             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
875             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
876             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
877             d.addCallback(lambda res: n.download_best_version())
878             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
879             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
880             d.addCallback(lambda smap:
881                           n.download_version(smap,
882                                              smap.best_recoverable_version()))
883             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
884             return d
885         d.addCallback(_created)
886         return d
887
888
889     def test_size_after_servermap_update(self):
890         # a mutable file node should have something to say about how big
891         # it is after a servermap update is performed, since this tells
892         # us how large the best version of that mutable file is.
893         d = self.nodemaker.create_mutable_file()
894         def _created(n):
895             self.n = n
896             return n.get_servermap(MODE_READ)
897         d.addCallback(_created)
898         d.addCallback(lambda ignored:
899             self.failUnlessEqual(self.n.get_size(), 0))
900         d.addCallback(lambda ignored:
901             self.n.overwrite(MutableData("foobarbaz")))
902         d.addCallback(lambda ignored:
903             self.failUnlessEqual(self.n.get_size(), 9))
904         d.addCallback(lambda ignored:
905             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
906         d.addCallback(_created)
907         d.addCallback(lambda ignored:
908             self.failUnlessEqual(self.n.get_size(), 9))
909         return d
910
911
912 class PublishMixin:
913     def publish_one(self):
914         # publish a file and create shares, which can then be manipulated
915         # later.
916         self.CONTENTS = "New contents go here" * 1000
917         self.uploadable = MutableData(self.CONTENTS)
918         self._storage = FakeStorage()
919         self._nodemaker = make_nodemaker(self._storage)
920         self._storage_broker = self._nodemaker.storage_broker
921         d = self._nodemaker.create_mutable_file(self.uploadable)
922         def _created(node):
923             self._fn = node
924             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
925         d.addCallback(_created)
926         return d
927
928     def publish_mdmf(self):
929         # like publish_one, except that the result is guaranteed to be
930         # an MDMF file.
931         # self.CONTENTS should have more than one segment.
932         self.CONTENTS = "This is an MDMF file" * 100000
933         self.uploadable = MutableData(self.CONTENTS)
934         self._storage = FakeStorage()
935         self._nodemaker = make_nodemaker(self._storage)
936         self._storage_broker = self._nodemaker.storage_broker
937         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
938         def _created(node):
939             self._fn = node
940             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
941         d.addCallback(_created)
942         return d
943
944
945     def publish_sdmf(self):
946         # like publish_one, except that the result is guaranteed to be
947         # an SDMF file
948         self.CONTENTS = "This is an SDMF file" * 1000
949         self.uploadable = MutableData(self.CONTENTS)
950         self._storage = FakeStorage()
951         self._nodemaker = make_nodemaker(self._storage)
952         self._storage_broker = self._nodemaker.storage_broker
953         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
954         def _created(node):
955             self._fn = node
956             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
957         d.addCallback(_created)
958         return d
959
960
961     def publish_multiple(self, version=0):
962         self.CONTENTS = ["Contents 0",
963                          "Contents 1",
964                          "Contents 2",
965                          "Contents 3a",
966                          "Contents 3b"]
967         self.uploadables = [MutableData(d) for d in self.CONTENTS]
968         self._copied_shares = {}
969         self._storage = FakeStorage()
970         self._nodemaker = make_nodemaker(self._storage)
971         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
972         def _created(node):
973             self._fn = node
974             # now create multiple versions of the same file, and accumulate
975             # their shares, so we can mix and match them later.
976             d = defer.succeed(None)
977             d.addCallback(self._copy_shares, 0)
978             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
979             d.addCallback(self._copy_shares, 1)
980             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
981             d.addCallback(self._copy_shares, 2)
982             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
983             d.addCallback(self._copy_shares, 3)
984             # now we replace all the shares with version s3, and upload a new
985             # version to get s4b.
986             rollback = dict([(i,2) for i in range(10)])
987             d.addCallback(lambda res: self._set_versions(rollback))
988             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
989             d.addCallback(self._copy_shares, 4)
990             # we leave the storage in state 4
991             return d
992         d.addCallback(_created)
993         return d
994
995
996     def _copy_shares(self, ignored, index):
997         shares = self._storage._peers
998         # we need a deep copy
999         new_shares = {}
1000         for peerid in shares:
1001             new_shares[peerid] = {}
1002             for shnum in shares[peerid]:
1003                 new_shares[peerid][shnum] = shares[peerid][shnum]
1004         self._copied_shares[index] = new_shares
1005
1006     def _set_versions(self, versionmap):
1007         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1008         # share to be at. Any shnum which is left out of the map will stay at
1009         # its current version.
1010         shares = self._storage._peers
1011         oldshares = self._copied_shares
1012         for peerid in shares:
1013             for shnum in shares[peerid]:
1014                 if shnum in versionmap:
1015                     index = versionmap[shnum]
1016                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1017
1018 class Servermap(unittest.TestCase, PublishMixin):
1019     def setUp(self):
1020         return self.publish_one()
1021
1022     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1023                        update_range=None):
1024         if fn is None:
1025             fn = self._fn
1026         if sb is None:
1027             sb = self._storage_broker
1028         smu = ServermapUpdater(fn, sb, Monitor(),
1029                                ServerMap(), mode, update_range=update_range)
1030         d = smu.update()
1031         return d
1032
1033     def update_servermap(self, oldmap, mode=MODE_CHECK):
1034         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1035                                oldmap, mode)
1036         d = smu.update()
1037         return d
1038
1039     def failUnlessOneRecoverable(self, sm, num_shares):
1040         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1041         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1042         best = sm.best_recoverable_version()
1043         self.failIfEqual(best, None)
1044         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1045         self.failUnlessEqual(len(sm.shares_available()), 1)
1046         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1047         shnum, servers = sm.make_sharemap().items()[0]
1048         server = list(servers)[0]
1049         self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1050         self.failUnlessEqual(sm.version_on_server(server, 666), None)
1051         return sm
1052
1053     def test_basic(self):
1054         d = defer.succeed(None)
1055         ms = self.make_servermap
1056         us = self.update_servermap
1057
1058         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1059         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1060         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1061         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1062         d.addCallback(lambda res: ms(mode=MODE_READ))
1063         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1064         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1065         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1066         # this mode stops at 'k' shares
1067         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1068
1069         # and can we re-use the same servermap? Note that these are sorted in
1070         # increasing order of number of servers queried, since once a server
1071         # gets into the servermap, we'll always ask it for an update.
1072         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1073         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1074         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1075         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1076         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1077         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1078         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1079         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1080         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1081
1082         return d
1083
1084     def test_fetch_privkey(self):
1085         d = defer.succeed(None)
1086         # use the sibling filenode (which hasn't been used yet), and make
1087         # sure it can fetch the privkey. The file is small, so the privkey
1088         # will be fetched on the first (query) pass.
1089         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1090         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1091
1092         # create a new file, which is large enough to knock the privkey out
1093         # of the early part of the file
1094         LARGE = "These are Larger contents" * 200 # about 5KB
1095         LARGE_uploadable = MutableData(LARGE)
1096         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1097         def _created(large_fn):
1098             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1099             return self.make_servermap(MODE_WRITE, large_fn2)
1100         d.addCallback(_created)
1101         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1102         return d
1103
1104
1105     def test_mark_bad(self):
1106         d = defer.succeed(None)
1107         ms = self.make_servermap
1108
1109         d.addCallback(lambda res: ms(mode=MODE_READ))
1110         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1111         def _made_map(sm):
1112             v = sm.best_recoverable_version()
1113             vm = sm.make_versionmap()
1114             shares = list(vm[v])
1115             self.failUnlessEqual(len(shares), 6)
1116             self._corrupted = set()
1117             # mark the first 5 shares as corrupt, then update the servermap.
1118             # The map should not have the marked shares it in any more, and
1119             # new shares should be found to replace the missing ones.
1120             for (shnum, server, timestamp) in shares:
1121                 if shnum < 5:
1122                     self._corrupted.add( (server, shnum) )
1123                     sm.mark_bad_share(server, shnum, "")
1124             return self.update_servermap(sm, MODE_WRITE)
1125         d.addCallback(_made_map)
1126         def _check_map(sm):
1127             # this should find all 5 shares that weren't marked bad
1128             v = sm.best_recoverable_version()
1129             vm = sm.make_versionmap()
1130             shares = list(vm[v])
1131             for (server, shnum) in self._corrupted:
1132                 server_shares = sm.debug_shares_on_server(server)
1133                 self.failIf(shnum in server_shares,
1134                             "%d was in %s" % (shnum, server_shares))
1135             self.failUnlessEqual(len(shares), 5)
1136         d.addCallback(_check_map)
1137         return d
1138
1139     def failUnlessNoneRecoverable(self, sm):
1140         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1141         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1142         best = sm.best_recoverable_version()
1143         self.failUnlessEqual(best, None)
1144         self.failUnlessEqual(len(sm.shares_available()), 0)
1145
1146     def test_no_shares(self):
1147         self._storage._peers = {} # delete all shares
1148         ms = self.make_servermap
1149         d = defer.succeed(None)
1150 #
1151         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1152         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1153
1154         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1155         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1156
1157         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1158         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1159
1160         d.addCallback(lambda res: ms(mode=MODE_READ))
1161         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1162
1163         return d
1164
1165     def failUnlessNotQuiteEnough(self, sm):
1166         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1167         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1168         best = sm.best_recoverable_version()
1169         self.failUnlessEqual(best, None)
1170         self.failUnlessEqual(len(sm.shares_available()), 1)
1171         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1172         return sm
1173
1174     def test_not_quite_enough_shares(self):
1175         s = self._storage
1176         ms = self.make_servermap
1177         num_shares = len(s._peers)
1178         for peerid in s._peers:
1179             s._peers[peerid] = {}
1180             num_shares -= 1
1181             if num_shares == 2:
1182                 break
1183         # now there ought to be only two shares left
1184         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1185
1186         d = defer.succeed(None)
1187
1188         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1189         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1190         d.addCallback(lambda sm:
1191                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1192         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1193         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1194         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1195         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1196         d.addCallback(lambda res: ms(mode=MODE_READ))
1197         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1198
1199         return d
1200
1201
1202     def test_servermapupdater_finds_mdmf_files(self):
1203         # setUp already published an MDMF file for us. We just need to
1204         # make sure that when we run the ServermapUpdater, the file is
1205         # reported to have one recoverable version.
1206         d = defer.succeed(None)
1207         d.addCallback(lambda ignored:
1208             self.publish_mdmf())
1209         d.addCallback(lambda ignored:
1210             self.make_servermap(mode=MODE_CHECK))
1211         # Calling make_servermap also updates the servermap in the mode
1212         # that we specify, so we just need to see what it says.
1213         def _check_servermap(sm):
1214             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1215         d.addCallback(_check_servermap)
1216         return d
1217
1218
1219     def test_fetch_update(self):
1220         d = defer.succeed(None)
1221         d.addCallback(lambda ignored:
1222             self.publish_mdmf())
1223         d.addCallback(lambda ignored:
1224             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1225         def _check_servermap(sm):
1226             # 10 shares
1227             self.failUnlessEqual(len(sm.update_data), 10)
1228             # one version
1229             for data in sm.update_data.itervalues():
1230                 self.failUnlessEqual(len(data), 1)
1231         d.addCallback(_check_servermap)
1232         return d
1233
1234
1235     def test_servermapupdater_finds_sdmf_files(self):
1236         d = defer.succeed(None)
1237         d.addCallback(lambda ignored:
1238             self.publish_sdmf())
1239         d.addCallback(lambda ignored:
1240             self.make_servermap(mode=MODE_CHECK))
1241         d.addCallback(lambda servermap:
1242             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1243         return d
1244
1245
1246 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1247     def setUp(self):
1248         return self.publish_one()
1249
1250     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1251         if oldmap is None:
1252             oldmap = ServerMap()
1253         if sb is None:
1254             sb = self._storage_broker
1255         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1256         d = smu.update()
1257         return d
1258
1259     def abbrev_verinfo(self, verinfo):
1260         if verinfo is None:
1261             return None
1262         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1263          offsets_tuple) = verinfo
1264         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1265
1266     def abbrev_verinfo_dict(self, verinfo_d):
1267         output = {}
1268         for verinfo,value in verinfo_d.items():
1269             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1270              offsets_tuple) = verinfo
1271             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1272         return output
1273
1274     def dump_servermap(self, servermap):
1275         print "SERVERMAP", servermap
1276         print "RECOVERABLE", [self.abbrev_verinfo(v)
1277                               for v in servermap.recoverable_versions()]
1278         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1279         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1280
1281     def do_download(self, servermap, version=None):
1282         if version is None:
1283             version = servermap.best_recoverable_version()
1284         r = Retrieve(self._fn, self._storage_broker, servermap, version)
1285         c = consumer.MemoryConsumer()
1286         d = r.download(consumer=c)
1287         d.addCallback(lambda mc: "".join(mc.chunks))
1288         return d
1289
1290
1291     def test_basic(self):
1292         d = self.make_servermap()
1293         def _do_retrieve(servermap):
1294             self._smap = servermap
1295             #self.dump_servermap(servermap)
1296             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1297             return self.do_download(servermap)
1298         d.addCallback(_do_retrieve)
1299         def _retrieved(new_contents):
1300             self.failUnlessEqual(new_contents, self.CONTENTS)
1301         d.addCallback(_retrieved)
1302         # we should be able to re-use the same servermap, both with and
1303         # without updating it.
1304         d.addCallback(lambda res: self.do_download(self._smap))
1305         d.addCallback(_retrieved)
1306         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1307         d.addCallback(lambda res: self.do_download(self._smap))
1308         d.addCallback(_retrieved)
1309         # clobbering the pubkey should make the servermap updater re-fetch it
1310         def _clobber_pubkey(res):
1311             self._fn._pubkey = None
1312         d.addCallback(_clobber_pubkey)
1313         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1314         d.addCallback(lambda res: self.do_download(self._smap))
1315         d.addCallback(_retrieved)
1316         return d
1317
1318     def test_all_shares_vanished(self):
1319         d = self.make_servermap()
1320         def _remove_shares(servermap):
1321             for shares in self._storage._peers.values():
1322                 shares.clear()
1323             d1 = self.shouldFail(NotEnoughSharesError,
1324                                  "test_all_shares_vanished",
1325                                  "ran out of servers",
1326                                  self.do_download, servermap)
1327             return d1
1328         d.addCallback(_remove_shares)
1329         return d
1330
1331     def test_no_servers(self):
1332         sb2 = make_storagebroker(num_peers=0)
1333         # if there are no servers, then a MODE_READ servermap should come
1334         # back empty
1335         d = self.make_servermap(sb=sb2)
1336         def _check_servermap(servermap):
1337             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1338             self.failIf(servermap.recoverable_versions())
1339             self.failIf(servermap.unrecoverable_versions())
1340             self.failIf(servermap.all_servers())
1341         d.addCallback(_check_servermap)
1342         return d
1343
1344     def test_no_servers_download(self):
1345         sb2 = make_storagebroker(num_peers=0)
1346         self._fn._storage_broker = sb2
1347         d = self.shouldFail(UnrecoverableFileError,
1348                             "test_no_servers_download",
1349                             "no recoverable versions",
1350                             self._fn.download_best_version)
1351         def _restore(res):
1352             # a failed download that occurs while we aren't connected to
1353             # anybody should not prevent a subsequent download from working.
1354             # This isn't quite the webapi-driven test that #463 wants, but it
1355             # should be close enough.
1356             self._fn._storage_broker = self._storage_broker
1357             return self._fn.download_best_version()
1358         def _retrieved(new_contents):
1359             self.failUnlessEqual(new_contents, self.CONTENTS)
1360         d.addCallback(_restore)
1361         d.addCallback(_retrieved)
1362         return d
1363
1364
1365     def _test_corrupt_all(self, offset, substring,
1366                           should_succeed=False,
1367                           corrupt_early=True,
1368                           failure_checker=None,
1369                           fetch_privkey=False):
1370         d = defer.succeed(None)
1371         if corrupt_early:
1372             d.addCallback(corrupt, self._storage, offset)
1373         d.addCallback(lambda res: self.make_servermap())
1374         if not corrupt_early:
1375             d.addCallback(corrupt, self._storage, offset)
1376         def _do_retrieve(servermap):
1377             ver = servermap.best_recoverable_version()
1378             if ver is None and not should_succeed:
1379                 # no recoverable versions == not succeeding. The problem
1380                 # should be noted in the servermap's list of problems.
1381                 if substring:
1382                     allproblems = [str(f) for f in servermap.get_problems()]
1383                     self.failUnlessIn(substring, "".join(allproblems))
1384                 return servermap
1385             if should_succeed:
1386                 d1 = self._fn.download_version(servermap, ver,
1387                                                fetch_privkey)
1388                 d1.addCallback(lambda new_contents:
1389                                self.failUnlessEqual(new_contents, self.CONTENTS))
1390             else:
1391                 d1 = self.shouldFail(NotEnoughSharesError,
1392                                      "_corrupt_all(offset=%s)" % (offset,),
1393                                      substring,
1394                                      self._fn.download_version, servermap,
1395                                                                 ver,
1396                                                                 fetch_privkey)
1397             if failure_checker:
1398                 d1.addCallback(failure_checker)
1399             d1.addCallback(lambda res: servermap)
1400             return d1
1401         d.addCallback(_do_retrieve)
1402         return d
1403
1404     def test_corrupt_all_verbyte(self):
1405         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1406         # error in unpack_share().
1407         d = self._test_corrupt_all(0, "UnknownVersionError")
1408         def _check_servermap(servermap):
1409             # and the dump should mention the problems
1410             s = StringIO()
1411             dump = servermap.dump(s).getvalue()
1412             self.failUnless("30 PROBLEMS" in dump, dump)
1413         d.addCallback(_check_servermap)
1414         return d
1415
1416     def test_corrupt_all_seqnum(self):
1417         # a corrupt sequence number will trigger a bad signature
1418         return self._test_corrupt_all(1, "signature is invalid")
1419
1420     def test_corrupt_all_R(self):
1421         # a corrupt root hash will trigger a bad signature
1422         return self._test_corrupt_all(9, "signature is invalid")
1423
1424     def test_corrupt_all_IV(self):
1425         # a corrupt salt/IV will trigger a bad signature
1426         return self._test_corrupt_all(41, "signature is invalid")
1427
1428     def test_corrupt_all_k(self):
1429         # a corrupt 'k' will trigger a bad signature
1430         return self._test_corrupt_all(57, "signature is invalid")
1431
1432     def test_corrupt_all_N(self):
1433         # a corrupt 'N' will trigger a bad signature
1434         return self._test_corrupt_all(58, "signature is invalid")
1435
1436     def test_corrupt_all_segsize(self):
1437         # a corrupt segsize will trigger a bad signature
1438         return self._test_corrupt_all(59, "signature is invalid")
1439
1440     def test_corrupt_all_datalen(self):
1441         # a corrupt data length will trigger a bad signature
1442         return self._test_corrupt_all(67, "signature is invalid")
1443
1444     def test_corrupt_all_pubkey(self):
1445         # a corrupt pubkey won't match the URI's fingerprint. We need to
1446         # remove the pubkey from the filenode, or else it won't bother trying
1447         # to update it.
1448         self._fn._pubkey = None
1449         return self._test_corrupt_all("pubkey",
1450                                       "pubkey doesn't match fingerprint")
1451
1452     def test_corrupt_all_sig(self):
1453         # a corrupt signature is a bad one
1454         # the signature runs from about [543:799], depending upon the length
1455         # of the pubkey
1456         return self._test_corrupt_all("signature", "signature is invalid")
1457
1458     def test_corrupt_all_share_hash_chain_number(self):
1459         # a corrupt share hash chain entry will show up as a bad hash. If we
1460         # mangle the first byte, that will look like a bad hash number,
1461         # causing an IndexError
1462         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1463
1464     def test_corrupt_all_share_hash_chain_hash(self):
1465         # a corrupt share hash chain entry will show up as a bad hash. If we
1466         # mangle a few bytes in, that will look like a bad hash.
1467         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1468
1469     def test_corrupt_all_block_hash_tree(self):
1470         return self._test_corrupt_all("block_hash_tree",
1471                                       "block hash tree failure")
1472
1473     def test_corrupt_all_block(self):
1474         return self._test_corrupt_all("share_data", "block hash tree failure")
1475
1476     def test_corrupt_all_encprivkey(self):
1477         # a corrupted privkey won't even be noticed by the reader, only by a
1478         # writer.
1479         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1480
1481
1482     def test_corrupt_all_encprivkey_late(self):
1483         # this should work for the same reason as above, but we corrupt 
1484         # after the servermap update to exercise the error handling
1485         # code.
1486         # We need to remove the privkey from the node, or the retrieve
1487         # process won't know to update it.
1488         self._fn._privkey = None
1489         return self._test_corrupt_all("enc_privkey",
1490                                       None, # this shouldn't fail
1491                                       should_succeed=True,
1492                                       corrupt_early=False,
1493                                       fetch_privkey=True)
1494
1495
1496     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1497     # just use a .todo because the failing-but-ignored test emits about 30kB
1498     # of noise.
1499     def OFF_test_corrupt_all_seqnum_late(self):
1500         # corrupting the seqnum between mapupdate and retrieve should result
1501         # in NotEnoughSharesError, since each share will look invalid
1502         def _check(res):
1503             f = res[0]
1504             self.failUnless(f.check(NotEnoughSharesError))
1505             self.failUnless("uncoordinated write" in str(f))
1506         return self._test_corrupt_all(1, "ran out of servers",
1507                                       corrupt_early=False,
1508                                       failure_checker=_check)
1509
1510     def test_corrupt_all_block_hash_tree_late(self):
1511         def _check(res):
1512             f = res[0]
1513             self.failUnless(f.check(NotEnoughSharesError))
1514         return self._test_corrupt_all("block_hash_tree",
1515                                       "block hash tree failure",
1516                                       corrupt_early=False,
1517                                       failure_checker=_check)
1518
1519
1520     def test_corrupt_all_block_late(self):
1521         def _check(res):
1522             f = res[0]
1523             self.failUnless(f.check(NotEnoughSharesError))
1524         return self._test_corrupt_all("share_data", "block hash tree failure",
1525                                       corrupt_early=False,
1526                                       failure_checker=_check)
1527
1528
1529     def test_basic_pubkey_at_end(self):
1530         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1531         # download to succeed but forcing a bunch of retries first. Note that
1532         # this is rather pessimistic: our Retrieve process will throw away
1533         # the whole share if the pubkey is bad, even though the rest of the
1534         # share might be good.
1535
1536         self._fn._pubkey = None
1537         k = self._fn.get_required_shares()
1538         N = self._fn.get_total_shares()
1539         d = defer.succeed(None)
1540         d.addCallback(corrupt, self._storage, "pubkey",
1541                       shnums_to_corrupt=range(0, N-k))
1542         d.addCallback(lambda res: self.make_servermap())
1543         def _do_retrieve(servermap):
1544             self.failUnless(servermap.get_problems())
1545             self.failUnless("pubkey doesn't match fingerprint"
1546                             in str(servermap.get_problems()[0]))
1547             ver = servermap.best_recoverable_version()
1548             r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1549             c = consumer.MemoryConsumer()
1550             return r.download(c)
1551         d.addCallback(_do_retrieve)
1552         d.addCallback(lambda mc: "".join(mc.chunks))
1553         d.addCallback(lambda new_contents:
1554                       self.failUnlessEqual(new_contents, self.CONTENTS))
1555         return d
1556
1557
1558     def _test_corrupt_some(self, offset, mdmf=False):
1559         if mdmf:
1560             d = self.publish_mdmf()
1561         else:
1562             d = defer.succeed(None)
1563         d.addCallback(lambda ignored:
1564             corrupt(None, self._storage, offset, range(5)))
1565         d.addCallback(lambda ignored:
1566             self.make_servermap())
1567         def _do_retrieve(servermap):
1568             ver = servermap.best_recoverable_version()
1569             self.failUnless(ver)
1570             return self._fn.download_best_version()
1571         d.addCallback(_do_retrieve)
1572         d.addCallback(lambda new_contents:
1573             self.failUnlessEqual(new_contents, self.CONTENTS))
1574         return d
1575
1576
1577     def test_corrupt_some(self):
1578         # corrupt the data of first five shares (so the servermap thinks
1579         # they're good but retrieve marks them as bad), so that the
1580         # MODE_READ set of 6 will be insufficient, forcing node.download to
1581         # retry with more servers.
1582         return self._test_corrupt_some("share_data")
1583
1584
1585     def test_download_fails(self):
1586         d = corrupt(None, self._storage, "signature")
1587         d.addCallback(lambda ignored:
1588             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1589                             "no recoverable versions",
1590                             self._fn.download_best_version))
1591         return d
1592
1593
1594
1595     def test_corrupt_mdmf_block_hash_tree(self):
1596         d = self.publish_mdmf()
1597         d.addCallback(lambda ignored:
1598             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1599                                    "block hash tree failure",
1600                                    corrupt_early=False,
1601                                    should_succeed=False))
1602         return d
1603
1604
1605     def test_corrupt_mdmf_block_hash_tree_late(self):
1606         d = self.publish_mdmf()
1607         d.addCallback(lambda ignored:
1608             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1609                                    "block hash tree failure",
1610                                    corrupt_early=True,
1611                                    should_succeed=False))
1612         return d
1613
1614
1615     def test_corrupt_mdmf_share_data(self):
1616         d = self.publish_mdmf()
1617         d.addCallback(lambda ignored:
1618             # TODO: Find out what the block size is and corrupt a
1619             # specific block, rather than just guessing.
1620             self._test_corrupt_all(("share_data", 12 * 40),
1621                                     "block hash tree failure",
1622                                     corrupt_early=True,
1623                                     should_succeed=False))
1624         return d
1625
1626
1627     def test_corrupt_some_mdmf(self):
1628         return self._test_corrupt_some(("share_data", 12 * 40),
1629                                        mdmf=True)
1630
1631
1632 class CheckerMixin:
1633     def check_good(self, r, where):
1634         self.failUnless(r.is_healthy(), where)
1635         return r
1636
1637     def check_bad(self, r, where):
1638         self.failIf(r.is_healthy(), where)
1639         return r
1640
1641     def check_expected_failure(self, r, expected_exception, substring, where):
1642         for (peerid, storage_index, shnum, f) in r.problems:
1643             if f.check(expected_exception):
1644                 self.failUnless(substring in str(f),
1645                                 "%s: substring '%s' not in '%s'" %
1646                                 (where, substring, str(f)))
1647                 return
1648         self.fail("%s: didn't see expected exception %s in problems %s" %
1649                   (where, expected_exception, r.problems))
1650
1651
1652 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1653     def setUp(self):
1654         return self.publish_one()
1655
1656
1657     def test_check_good(self):
1658         d = self._fn.check(Monitor())
1659         d.addCallback(self.check_good, "test_check_good")
1660         return d
1661
1662     def test_check_mdmf_good(self):
1663         d = self.publish_mdmf()
1664         d.addCallback(lambda ignored:
1665             self._fn.check(Monitor()))
1666         d.addCallback(self.check_good, "test_check_mdmf_good")
1667         return d
1668
1669     def test_check_no_shares(self):
1670         for shares in self._storage._peers.values():
1671             shares.clear()
1672         d = self._fn.check(Monitor())
1673         d.addCallback(self.check_bad, "test_check_no_shares")
1674         return d
1675
1676     def test_check_mdmf_no_shares(self):
1677         d = self.publish_mdmf()
1678         def _then(ignored):
1679             for share in self._storage._peers.values():
1680                 share.clear()
1681         d.addCallback(_then)
1682         d.addCallback(lambda ignored:
1683             self._fn.check(Monitor()))
1684         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1685         return d
1686
1687     def test_check_not_enough_shares(self):
1688         for shares in self._storage._peers.values():
1689             for shnum in shares.keys():
1690                 if shnum > 0:
1691                     del shares[shnum]
1692         d = self._fn.check(Monitor())
1693         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1694         return d
1695
1696     def test_check_mdmf_not_enough_shares(self):
1697         d = self.publish_mdmf()
1698         def _then(ignored):
1699             for shares in self._storage._peers.values():
1700                 for shnum in shares.keys():
1701                     if shnum > 0:
1702                         del shares[shnum]
1703         d.addCallback(_then)
1704         d.addCallback(lambda ignored:
1705             self._fn.check(Monitor()))
1706         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1707         return d
1708
1709
1710     def test_check_all_bad_sig(self):
1711         d = corrupt(None, self._storage, 1) # bad sig
1712         d.addCallback(lambda ignored:
1713             self._fn.check(Monitor()))
1714         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1715         return d
1716
1717     def test_check_mdmf_all_bad_sig(self):
1718         d = self.publish_mdmf()
1719         d.addCallback(lambda ignored:
1720             corrupt(None, self._storage, 1))
1721         d.addCallback(lambda ignored:
1722             self._fn.check(Monitor()))
1723         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1724         return d
1725
1726     def test_verify_mdmf_all_bad_sharedata(self):
1727         d = self.publish_mdmf()
1728         # On 8 of the shares, corrupt the beginning of the share data.
1729         # The signature check during the servermap update won't catch this.
1730         d.addCallback(lambda ignored:
1731             corrupt(None, self._storage, "share_data", range(8)))
1732         # On 2 of the shares, corrupt the end of the share data.
1733         # The signature check during the servermap update won't catch
1734         # this either, and the retrieval process will have to process
1735         # all of the segments before it notices.
1736         d.addCallback(lambda ignored:
1737             # the block hash tree comes right after the share data, so if we
1738             # corrupt a little before the block hash tree, we'll corrupt in the
1739             # last block of each share.
1740             corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1741         d.addCallback(lambda ignored:
1742             self._fn.check(Monitor(), verify=True))
1743         # The verifier should flag the file as unhealthy, and should
1744         # list all 10 shares as bad.
1745         d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1746         def _check_num_bad(r):
1747             self.failIf(r.is_recoverable())
1748             smap = r.get_servermap()
1749             self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1750         d.addCallback(_check_num_bad)
1751         return d
1752
1753     def test_check_all_bad_blocks(self):
1754         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1755         # the Checker won't notice this.. it doesn't look at actual data
1756         d.addCallback(lambda ignored:
1757             self._fn.check(Monitor()))
1758         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1759         return d
1760
1761
1762     def test_check_mdmf_all_bad_blocks(self):
1763         d = self.publish_mdmf()
1764         d.addCallback(lambda ignored:
1765             corrupt(None, self._storage, "share_data"))
1766         d.addCallback(lambda ignored:
1767             self._fn.check(Monitor()))
1768         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1769         return d
1770
1771     def test_verify_good(self):
1772         d = self._fn.check(Monitor(), verify=True)
1773         d.addCallback(self.check_good, "test_verify_good")
1774         return d
1775
1776     def test_verify_all_bad_sig(self):
1777         d = corrupt(None, self._storage, 1) # bad sig
1778         d.addCallback(lambda ignored:
1779             self._fn.check(Monitor(), verify=True))
1780         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1781         return d
1782
1783     def test_verify_one_bad_sig(self):
1784         d = corrupt(None, self._storage, 1, [9]) # bad sig
1785         d.addCallback(lambda ignored:
1786             self._fn.check(Monitor(), verify=True))
1787         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1788         return d
1789
1790     def test_verify_one_bad_block(self):
1791         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1792         # the Verifier *will* notice this, since it examines every byte
1793         d.addCallback(lambda ignored:
1794             self._fn.check(Monitor(), verify=True))
1795         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1796         d.addCallback(self.check_expected_failure,
1797                       CorruptShareError, "block hash tree failure",
1798                       "test_verify_one_bad_block")
1799         return d
1800
1801     def test_verify_one_bad_sharehash(self):
1802         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1803         d.addCallback(lambda ignored:
1804             self._fn.check(Monitor(), verify=True))
1805         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1806         d.addCallback(self.check_expected_failure,
1807                       CorruptShareError, "corrupt hashes",
1808                       "test_verify_one_bad_sharehash")
1809         return d
1810
1811     def test_verify_one_bad_encprivkey(self):
1812         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1813         d.addCallback(lambda ignored:
1814             self._fn.check(Monitor(), verify=True))
1815         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1816         d.addCallback(self.check_expected_failure,
1817                       CorruptShareError, "invalid privkey",
1818                       "test_verify_one_bad_encprivkey")
1819         return d
1820
1821     def test_verify_one_bad_encprivkey_uncheckable(self):
1822         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1823         readonly_fn = self._fn.get_readonly()
1824         # a read-only node has no way to validate the privkey
1825         d.addCallback(lambda ignored:
1826             readonly_fn.check(Monitor(), verify=True))
1827         d.addCallback(self.check_good,
1828                       "test_verify_one_bad_encprivkey_uncheckable")
1829         return d
1830
1831
1832     def test_verify_mdmf_good(self):
1833         d = self.publish_mdmf()
1834         d.addCallback(lambda ignored:
1835             self._fn.check(Monitor(), verify=True))
1836         d.addCallback(self.check_good, "test_verify_mdmf_good")
1837         return d
1838
1839
1840     def test_verify_mdmf_one_bad_block(self):
1841         d = self.publish_mdmf()
1842         d.addCallback(lambda ignored:
1843             corrupt(None, self._storage, "share_data", [1]))
1844         d.addCallback(lambda ignored:
1845             self._fn.check(Monitor(), verify=True))
1846         # We should find one bad block here
1847         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1848         d.addCallback(self.check_expected_failure,
1849                       CorruptShareError, "block hash tree failure",
1850                       "test_verify_mdmf_one_bad_block")
1851         return d
1852
1853
1854     def test_verify_mdmf_bad_encprivkey(self):
1855         d = self.publish_mdmf()
1856         d.addCallback(lambda ignored:
1857             corrupt(None, self._storage, "enc_privkey", [0]))
1858         d.addCallback(lambda ignored:
1859             self._fn.check(Monitor(), verify=True))
1860         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1861         d.addCallback(self.check_expected_failure,
1862                       CorruptShareError, "privkey",
1863                       "test_verify_mdmf_bad_encprivkey")
1864         return d
1865
1866
1867     def test_verify_mdmf_bad_sig(self):
1868         d = self.publish_mdmf()
1869         d.addCallback(lambda ignored:
1870             corrupt(None, self._storage, 1, [1]))
1871         d.addCallback(lambda ignored:
1872             self._fn.check(Monitor(), verify=True))
1873         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1874         return d
1875
1876
1877     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1878         d = self.publish_mdmf()
1879         d.addCallback(lambda ignored:
1880             corrupt(None, self._storage, "enc_privkey", [1]))
1881         d.addCallback(lambda ignored:
1882             self._fn.get_readonly())
1883         d.addCallback(lambda fn:
1884             fn.check(Monitor(), verify=True))
1885         d.addCallback(self.check_good,
1886                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1887         return d
1888
1889
1890 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1891
1892     def get_shares(self, s):
1893         all_shares = {} # maps (peerid, shnum) to share data
1894         for peerid in s._peers:
1895             shares = s._peers[peerid]
1896             for shnum in shares:
1897                 data = shares[shnum]
1898                 all_shares[ (peerid, shnum) ] = data
1899         return all_shares
1900
1901     def copy_shares(self, ignored=None):
1902         self.old_shares.append(self.get_shares(self._storage))
1903
1904     def test_repair_nop(self):
1905         self.old_shares = []
1906         d = self.publish_one()
1907         d.addCallback(self.copy_shares)
1908         d.addCallback(lambda res: self._fn.check(Monitor()))
1909         d.addCallback(lambda check_results: self._fn.repair(check_results))
1910         def _check_results(rres):
1911             self.failUnless(IRepairResults.providedBy(rres))
1912             self.failUnless(rres.get_successful())
1913             # TODO: examine results
1914
1915             self.copy_shares()
1916
1917             initial_shares = self.old_shares[0]
1918             new_shares = self.old_shares[1]
1919             # TODO: this really shouldn't change anything. When we implement
1920             # a "minimal-bandwidth" repairer", change this test to assert:
1921             #self.failUnlessEqual(new_shares, initial_shares)
1922
1923             # all shares should be in the same place as before
1924             self.failUnlessEqual(set(initial_shares.keys()),
1925                                  set(new_shares.keys()))
1926             # but they should all be at a newer seqnum. The IV will be
1927             # different, so the roothash will be too.
1928             for key in initial_shares:
1929                 (version0,
1930                  seqnum0,
1931                  root_hash0,
1932                  IV0,
1933                  k0, N0, segsize0, datalen0,
1934                  o0) = unpack_header(initial_shares[key])
1935                 (version1,
1936                  seqnum1,
1937                  root_hash1,
1938                  IV1,
1939                  k1, N1, segsize1, datalen1,
1940                  o1) = unpack_header(new_shares[key])
1941                 self.failUnlessEqual(version0, version1)
1942                 self.failUnlessEqual(seqnum0+1, seqnum1)
1943                 self.failUnlessEqual(k0, k1)
1944                 self.failUnlessEqual(N0, N1)
1945                 self.failUnlessEqual(segsize0, segsize1)
1946                 self.failUnlessEqual(datalen0, datalen1)
1947         d.addCallback(_check_results)
1948         return d
1949
1950     def failIfSharesChanged(self, ignored=None):
1951         old_shares = self.old_shares[-2]
1952         current_shares = self.old_shares[-1]
1953         self.failUnlessEqual(old_shares, current_shares)
1954
1955
1956     def test_unrepairable_0shares(self):
1957         d = self.publish_one()
1958         def _delete_all_shares(ign):
1959             shares = self._storage._peers
1960             for peerid in shares:
1961                 shares[peerid] = {}
1962         d.addCallback(_delete_all_shares)
1963         d.addCallback(lambda ign: self._fn.check(Monitor()))
1964         d.addCallback(lambda check_results: self._fn.repair(check_results))
1965         def _check(crr):
1966             self.failUnlessEqual(crr.get_successful(), False)
1967         d.addCallback(_check)
1968         return d
1969
1970     def test_mdmf_unrepairable_0shares(self):
1971         d = self.publish_mdmf()
1972         def _delete_all_shares(ign):
1973             shares = self._storage._peers
1974             for peerid in shares:
1975                 shares[peerid] = {}
1976         d.addCallback(_delete_all_shares)
1977         d.addCallback(lambda ign: self._fn.check(Monitor()))
1978         d.addCallback(lambda check_results: self._fn.repair(check_results))
1979         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
1980         return d
1981
1982
1983     def test_unrepairable_1share(self):
1984         d = self.publish_one()
1985         def _delete_all_shares(ign):
1986             shares = self._storage._peers
1987             for peerid in shares:
1988                 for shnum in list(shares[peerid]):
1989                     if shnum > 0:
1990                         del shares[peerid][shnum]
1991         d.addCallback(_delete_all_shares)
1992         d.addCallback(lambda ign: self._fn.check(Monitor()))
1993         d.addCallback(lambda check_results: self._fn.repair(check_results))
1994         def _check(crr):
1995             self.failUnlessEqual(crr.get_successful(), False)
1996         d.addCallback(_check)
1997         return d
1998
1999     def test_mdmf_unrepairable_1share(self):
2000         d = self.publish_mdmf()
2001         def _delete_all_shares(ign):
2002             shares = self._storage._peers
2003             for peerid in shares:
2004                 for shnum in list(shares[peerid]):
2005                     if shnum > 0:
2006                         del shares[peerid][shnum]
2007         d.addCallback(_delete_all_shares)
2008         d.addCallback(lambda ign: self._fn.check(Monitor()))
2009         d.addCallback(lambda check_results: self._fn.repair(check_results))
2010         def _check(crr):
2011             self.failUnlessEqual(crr.get_successful(), False)
2012         d.addCallback(_check)
2013         return d
2014
2015     def test_repairable_5shares(self):
2016         d = self.publish_mdmf()
2017         def _delete_all_shares(ign):
2018             shares = self._storage._peers
2019             for peerid in shares:
2020                 for shnum in list(shares[peerid]):
2021                     if shnum > 4:
2022                         del shares[peerid][shnum]
2023         d.addCallback(_delete_all_shares)
2024         d.addCallback(lambda ign: self._fn.check(Monitor()))
2025         d.addCallback(lambda check_results: self._fn.repair(check_results))
2026         def _check(crr):
2027             self.failUnlessEqual(crr.get_successful(), True)
2028         d.addCallback(_check)
2029         return d
2030
2031     def test_mdmf_repairable_5shares(self):
2032         d = self.publish_mdmf()
2033         def _delete_some_shares(ign):
2034             shares = self._storage._peers
2035             for peerid in shares:
2036                 for shnum in list(shares[peerid]):
2037                     if shnum > 5:
2038                         del shares[peerid][shnum]
2039         d.addCallback(_delete_some_shares)
2040         d.addCallback(lambda ign: self._fn.check(Monitor()))
2041         def _check(cr):
2042             self.failIf(cr.is_healthy())
2043             self.failUnless(cr.is_recoverable())
2044             return cr
2045         d.addCallback(_check)
2046         d.addCallback(lambda check_results: self._fn.repair(check_results))
2047         def _check1(crr):
2048             self.failUnlessEqual(crr.get_successful(), True)
2049         d.addCallback(_check1)
2050         return d
2051
2052
2053     def test_merge(self):
2054         self.old_shares = []
2055         d = self.publish_multiple()
2056         # repair will refuse to merge multiple highest seqnums unless you
2057         # pass force=True
2058         d.addCallback(lambda res:
2059                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2060                                           1:4,3:4,5:4,7:4,9:4}))
2061         d.addCallback(self.copy_shares)
2062         d.addCallback(lambda res: self._fn.check(Monitor()))
2063         def _try_repair(check_results):
2064             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2065             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2066                                  self._fn.repair, check_results)
2067             d2.addCallback(self.copy_shares)
2068             d2.addCallback(self.failIfSharesChanged)
2069             d2.addCallback(lambda res: check_results)
2070             return d2
2071         d.addCallback(_try_repair)
2072         d.addCallback(lambda check_results:
2073                       self._fn.repair(check_results, force=True))
2074         # this should give us 10 shares of the highest roothash
2075         def _check_repair_results(rres):
2076             self.failUnless(rres.get_successful())
2077             pass # TODO
2078         d.addCallback(_check_repair_results)
2079         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2080         def _check_smap(smap):
2081             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2082             self.failIf(smap.unrecoverable_versions())
2083             # now, which should have won?
2084             roothash_s4a = self.get_roothash_for(3)
2085             roothash_s4b = self.get_roothash_for(4)
2086             if roothash_s4b > roothash_s4a:
2087                 expected_contents = self.CONTENTS[4]
2088             else:
2089                 expected_contents = self.CONTENTS[3]
2090             new_versionid = smap.best_recoverable_version()
2091             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2092             d2 = self._fn.download_version(smap, new_versionid)
2093             d2.addCallback(self.failUnlessEqual, expected_contents)
2094             return d2
2095         d.addCallback(_check_smap)
2096         return d
2097
2098     def test_non_merge(self):
2099         self.old_shares = []
2100         d = self.publish_multiple()
2101         # repair should not refuse a repair that doesn't need to merge. In
2102         # this case, we combine v2 with v3. The repair should ignore v2 and
2103         # copy v3 into a new v5.
2104         d.addCallback(lambda res:
2105                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2106                                           1:3,3:3,5:3,7:3,9:3}))
2107         d.addCallback(lambda res: self._fn.check(Monitor()))
2108         d.addCallback(lambda check_results: self._fn.repair(check_results))
2109         # this should give us 10 shares of v3
2110         def _check_repair_results(rres):
2111             self.failUnless(rres.get_successful())
2112             pass # TODO
2113         d.addCallback(_check_repair_results)
2114         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2115         def _check_smap(smap):
2116             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2117             self.failIf(smap.unrecoverable_versions())
2118             # now, which should have won?
2119             expected_contents = self.CONTENTS[3]
2120             new_versionid = smap.best_recoverable_version()
2121             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2122             d2 = self._fn.download_version(smap, new_versionid)
2123             d2.addCallback(self.failUnlessEqual, expected_contents)
2124             return d2
2125         d.addCallback(_check_smap)
2126         return d
2127
2128     def get_roothash_for(self, index):
2129         # return the roothash for the first share we see in the saved set
2130         shares = self._copied_shares[index]
2131         for peerid in shares:
2132             for shnum in shares[peerid]:
2133                 share = shares[peerid][shnum]
2134                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2135                           unpack_header(share)
2136                 return root_hash
2137
2138     def test_check_and_repair_readcap(self):
2139         # we can't currently repair from a mutable readcap: #625
2140         self.old_shares = []
2141         d = self.publish_one()
2142         d.addCallback(self.copy_shares)
2143         def _get_readcap(res):
2144             self._fn3 = self._fn.get_readonly()
2145             # also delete some shares
2146             for peerid,shares in self._storage._peers.items():
2147                 shares.pop(0, None)
2148         d.addCallback(_get_readcap)
2149         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2150         def _check_results(crr):
2151             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2152             # we should detect the unhealthy, but skip over mutable-readcap
2153             # repairs until #625 is fixed
2154             self.failIf(crr.get_pre_repair_results().is_healthy())
2155             self.failIf(crr.get_repair_attempted())
2156             self.failIf(crr.get_post_repair_results().is_healthy())
2157         d.addCallback(_check_results)
2158         return d
2159
2160 class DevNullDictionary(dict):
2161     def __setitem__(self, key, value):
2162         return
2163
2164 class MultipleEncodings(unittest.TestCase):
2165     def setUp(self):
2166         self.CONTENTS = "New contents go here"
2167         self.uploadable = MutableData(self.CONTENTS)
2168         self._storage = FakeStorage()
2169         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2170         self._storage_broker = self._nodemaker.storage_broker
2171         d = self._nodemaker.create_mutable_file(self.uploadable)
2172         def _created(node):
2173             self._fn = node
2174         d.addCallback(_created)
2175         return d
2176
2177     def _encode(self, k, n, data, version=SDMF_VERSION):
2178         # encode 'data' into a peerid->shares dict.
2179
2180         fn = self._fn
2181         # disable the nodecache, since for these tests we explicitly need
2182         # multiple nodes pointing at the same file
2183         self._nodemaker._node_cache = DevNullDictionary()
2184         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2185         # then we copy over other fields that are normally fetched from the
2186         # existing shares
2187         fn2._pubkey = fn._pubkey
2188         fn2._privkey = fn._privkey
2189         fn2._encprivkey = fn._encprivkey
2190         # and set the encoding parameters to something completely different
2191         fn2._required_shares = k
2192         fn2._total_shares = n
2193
2194         s = self._storage
2195         s._peers = {} # clear existing storage
2196         p2 = Publish(fn2, self._storage_broker, None)
2197         uploadable = MutableData(data)
2198         d = p2.publish(uploadable)
2199         def _published(res):
2200             shares = s._peers
2201             s._peers = {}
2202             return shares
2203         d.addCallback(_published)
2204         return d
2205
2206     def make_servermap(self, mode=MODE_READ, oldmap=None):
2207         if oldmap is None:
2208             oldmap = ServerMap()
2209         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2210                                oldmap, mode)
2211         d = smu.update()
2212         return d
2213
2214     def test_multiple_encodings(self):
2215         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2216         # then mix up the shares, to make sure that download survives seeing
2217         # a variety of encodings. This is actually kind of tricky to set up.
2218
2219         contents1 = "Contents for encoding 1 (3-of-10) go here"
2220         contents2 = "Contents for encoding 2 (4-of-9) go here"
2221         contents3 = "Contents for encoding 3 (4-of-7) go here"
2222
2223         # we make a retrieval object that doesn't know what encoding
2224         # parameters to use
2225         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2226
2227         # now we upload a file through fn1, and grab its shares
2228         d = self._encode(3, 10, contents1)
2229         def _encoded_1(shares):
2230             self._shares1 = shares
2231         d.addCallback(_encoded_1)
2232         d.addCallback(lambda res: self._encode(4, 9, contents2))
2233         def _encoded_2(shares):
2234             self._shares2 = shares
2235         d.addCallback(_encoded_2)
2236         d.addCallback(lambda res: self._encode(4, 7, contents3))
2237         def _encoded_3(shares):
2238             self._shares3 = shares
2239         d.addCallback(_encoded_3)
2240
2241         def _merge(res):
2242             log.msg("merging sharelists")
2243             # we merge the shares from the two sets, leaving each shnum in
2244             # its original location, but using a share from set1 or set2
2245             # according to the following sequence:
2246             #
2247             #  4-of-9  a  s2
2248             #  4-of-9  b  s2
2249             #  4-of-7  c   s3
2250             #  4-of-9  d  s2
2251             #  3-of-9  e s1
2252             #  3-of-9  f s1
2253             #  3-of-9  g s1
2254             #  4-of-9  h  s2
2255             #
2256             # so that neither form can be recovered until fetch [f], at which
2257             # point version-s1 (the 3-of-10 form) should be recoverable. If
2258             # the implementation latches on to the first version it sees,
2259             # then s2 will be recoverable at fetch [g].
2260
2261             # Later, when we implement code that handles multiple versions,
2262             # we can use this framework to assert that all recoverable
2263             # versions are retrieved, and test that 'epsilon' does its job
2264
2265             places = [2, 2, 3, 2, 1, 1, 1, 2]
2266
2267             sharemap = {}
2268             sb = self._storage_broker
2269
2270             for peerid in sorted(sb.get_all_serverids()):
2271                 for shnum in self._shares1.get(peerid, {}):
2272                     if shnum < len(places):
2273                         which = places[shnum]
2274                     else:
2275                         which = "x"
2276                     self._storage._peers[peerid] = peers = {}
2277                     in_1 = shnum in self._shares1[peerid]
2278                     in_2 = shnum in self._shares2.get(peerid, {})
2279                     in_3 = shnum in self._shares3.get(peerid, {})
2280                     if which == 1:
2281                         if in_1:
2282                             peers[shnum] = self._shares1[peerid][shnum]
2283                             sharemap[shnum] = peerid
2284                     elif which == 2:
2285                         if in_2:
2286                             peers[shnum] = self._shares2[peerid][shnum]
2287                             sharemap[shnum] = peerid
2288                     elif which == 3:
2289                         if in_3:
2290                             peers[shnum] = self._shares3[peerid][shnum]
2291                             sharemap[shnum] = peerid
2292
2293             # we don't bother placing any other shares
2294             # now sort the sequence so that share 0 is returned first
2295             new_sequence = [sharemap[shnum]
2296                             for shnum in sorted(sharemap.keys())]
2297             self._storage._sequence = new_sequence
2298             log.msg("merge done")
2299         d.addCallback(_merge)
2300         d.addCallback(lambda res: fn3.download_best_version())
2301         def _retrieved(new_contents):
2302             # the current specified behavior is "first version recoverable"
2303             self.failUnlessEqual(new_contents, contents1)
2304         d.addCallback(_retrieved)
2305         return d
2306
2307
2308 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2309
2310     def setUp(self):
2311         return self.publish_multiple()
2312
2313     def test_multiple_versions(self):
2314         # if we see a mix of versions in the grid, download_best_version
2315         # should get the latest one
2316         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2317         d = self._fn.download_best_version()
2318         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2319         # and the checker should report problems
2320         d.addCallback(lambda res: self._fn.check(Monitor()))
2321         d.addCallback(self.check_bad, "test_multiple_versions")
2322
2323         # but if everything is at version 2, that's what we should download
2324         d.addCallback(lambda res:
2325                       self._set_versions(dict([(i,2) for i in range(10)])))
2326         d.addCallback(lambda res: self._fn.download_best_version())
2327         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2328         # if exactly one share is at version 3, we should still get v2
2329         d.addCallback(lambda res:
2330                       self._set_versions({0:3}))
2331         d.addCallback(lambda res: self._fn.download_best_version())
2332         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2333         # but the servermap should see the unrecoverable version. This
2334         # depends upon the single newer share being queried early.
2335         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2336         def _check_smap(smap):
2337             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2338             newer = smap.unrecoverable_newer_versions()
2339             self.failUnlessEqual(len(newer), 1)
2340             verinfo, health = newer.items()[0]
2341             self.failUnlessEqual(verinfo[0], 4)
2342             self.failUnlessEqual(health, (1,3))
2343             self.failIf(smap.needs_merge())
2344         d.addCallback(_check_smap)
2345         # if we have a mix of two parallel versions (s4a and s4b), we could
2346         # recover either
2347         d.addCallback(lambda res:
2348                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2349                                           1:4,3:4,5:4,7:4,9:4}))
2350         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2351         def _check_smap_mixed(smap):
2352             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2353             newer = smap.unrecoverable_newer_versions()
2354             self.failUnlessEqual(len(newer), 0)
2355             self.failUnless(smap.needs_merge())
2356         d.addCallback(_check_smap_mixed)
2357         d.addCallback(lambda res: self._fn.download_best_version())
2358         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2359                                                   res == self.CONTENTS[4]))
2360         return d
2361
2362     def test_replace(self):
2363         # if we see a mix of versions in the grid, we should be able to
2364         # replace them all with a newer version
2365
2366         # if exactly one share is at version 3, we should download (and
2367         # replace) v2, and the result should be v4. Note that the index we
2368         # give to _set_versions is different than the sequence number.
2369         target = dict([(i,2) for i in range(10)]) # seqnum3
2370         target[0] = 3 # seqnum4
2371         self._set_versions(target)
2372
2373         def _modify(oldversion, servermap, first_time):
2374             return oldversion + " modified"
2375         d = self._fn.modify(_modify)
2376         d.addCallback(lambda res: self._fn.download_best_version())
2377         expected = self.CONTENTS[2] + " modified"
2378         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2379         # and the servermap should indicate that the outlier was replaced too
2380         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2381         def _check_smap(smap):
2382             self.failUnlessEqual(smap.highest_seqnum(), 5)
2383             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2384             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2385         d.addCallback(_check_smap)
2386         return d
2387
2388
2389 class Utils(unittest.TestCase):
2390     def test_cache(self):
2391         c = ResponseCache()
2392         # xdata = base62.b2a(os.urandom(100))[:100]
2393         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2394         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2395         c.add("v1", 1, 0, xdata)
2396         c.add("v1", 1, 2000, ydata)
2397         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2398         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2399         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2400         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2401         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2402         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2403         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2404         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2405         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2406         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2407         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2408         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2409         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2410         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2411         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2412         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2413         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2414         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2415
2416         # test joining fragments
2417         c = ResponseCache()
2418         c.add("v1", 1, 0, xdata[:10])
2419         c.add("v1", 1, 10, xdata[10:20])
2420         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2421
2422 class Exceptions(unittest.TestCase):
2423     def test_repr(self):
2424         nmde = NeedMoreDataError(100, 50, 100)
2425         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2426         ucwe = UncoordinatedWriteError()
2427         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2428
2429 class SameKeyGenerator:
2430     def __init__(self, pubkey, privkey):
2431         self.pubkey = pubkey
2432         self.privkey = privkey
2433     def generate(self, keysize=None):
2434         return defer.succeed( (self.pubkey, self.privkey) )
2435
2436 class FirstServerGetsKilled:
2437     done = False
2438     def notify(self, retval, wrapper, methname):
2439         if not self.done:
2440             wrapper.broken = True
2441             self.done = True
2442         return retval
2443
2444 class FirstServerGetsDeleted:
2445     def __init__(self):
2446         self.done = False
2447         self.silenced = None
2448     def notify(self, retval, wrapper, methname):
2449         if not self.done:
2450             # this query will work, but later queries should think the share
2451             # has been deleted
2452             self.done = True
2453             self.silenced = wrapper
2454             return retval
2455         if wrapper == self.silenced:
2456             assert methname == "slot_testv_and_readv_and_writev"
2457             return (True, {})
2458         return retval
2459
2460 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2461     def do_publish_surprise(self, version):
2462         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2463         self.set_up_grid()
2464         nm = self.g.clients[0].nodemaker
2465         d = nm.create_mutable_file(MutableData("contents 1"),
2466                                     version=version)
2467         def _created(n):
2468             d = defer.succeed(None)
2469             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2470             def _got_smap1(smap):
2471                 # stash the old state of the file
2472                 self.old_map = smap
2473             d.addCallback(_got_smap1)
2474             # then modify the file, leaving the old map untouched
2475             d.addCallback(lambda res: log.msg("starting winning write"))
2476             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2477             # now attempt to modify the file with the old servermap. This
2478             # will look just like an uncoordinated write, in which every
2479             # single share got updated between our mapupdate and our publish
2480             d.addCallback(lambda res: log.msg("starting doomed write"))
2481             d.addCallback(lambda res:
2482                           self.shouldFail(UncoordinatedWriteError,
2483                                           "test_publish_surprise", None,
2484                                           n.upload,
2485                                           MutableData("contents 2a"), self.old_map))
2486             return d
2487         d.addCallback(_created)
2488         return d
2489
2490     def test_publish_surprise_sdmf(self):
2491         return self.do_publish_surprise(SDMF_VERSION)
2492
2493     def test_publish_surprise_mdmf(self):
2494         return self.do_publish_surprise(MDMF_VERSION)
2495
2496     def test_retrieve_surprise(self):
2497         self.basedir = "mutable/Problems/test_retrieve_surprise"
2498         self.set_up_grid()
2499         nm = self.g.clients[0].nodemaker
2500         d = nm.create_mutable_file(MutableData("contents 1"))
2501         def _created(n):
2502             d = defer.succeed(None)
2503             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2504             def _got_smap1(smap):
2505                 # stash the old state of the file
2506                 self.old_map = smap
2507             d.addCallback(_got_smap1)
2508             # then modify the file, leaving the old map untouched
2509             d.addCallback(lambda res: log.msg("starting winning write"))
2510             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2511             # now attempt to retrieve the old version with the old servermap.
2512             # This will look like someone has changed the file since we
2513             # updated the servermap.
2514             d.addCallback(lambda res: n._cache._clear())
2515             d.addCallback(lambda res: log.msg("starting doomed read"))
2516             d.addCallback(lambda res:
2517                           self.shouldFail(NotEnoughSharesError,
2518                                           "test_retrieve_surprise",
2519                                           "ran out of servers: have 0 of 1",
2520                                           n.download_version,
2521                                           self.old_map,
2522                                           self.old_map.best_recoverable_version(),
2523                                           ))
2524             return d
2525         d.addCallback(_created)
2526         return d
2527
2528
2529     def test_unexpected_shares(self):
2530         # upload the file, take a servermap, shut down one of the servers,
2531         # upload it again (causing shares to appear on a new server), then
2532         # upload using the old servermap. The last upload should fail with an
2533         # UncoordinatedWriteError, because of the shares that didn't appear
2534         # in the servermap.
2535         self.basedir = "mutable/Problems/test_unexpected_shares"
2536         self.set_up_grid()
2537         nm = self.g.clients[0].nodemaker
2538         d = nm.create_mutable_file(MutableData("contents 1"))
2539         def _created(n):
2540             d = defer.succeed(None)
2541             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2542             def _got_smap1(smap):
2543                 # stash the old state of the file
2544                 self.old_map = smap
2545                 # now shut down one of the servers
2546                 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2547                 self.g.remove_server(peer0)
2548                 # then modify the file, leaving the old map untouched
2549                 log.msg("starting winning write")
2550                 return n.overwrite(MutableData("contents 2"))
2551             d.addCallback(_got_smap1)
2552             # now attempt to modify the file with the old servermap. This
2553             # will look just like an uncoordinated write, in which every
2554             # single share got updated between our mapupdate and our publish
2555             d.addCallback(lambda res: log.msg("starting doomed write"))
2556             d.addCallback(lambda res:
2557                           self.shouldFail(UncoordinatedWriteError,
2558                                           "test_surprise", None,
2559                                           n.upload,
2560                                           MutableData("contents 2a"), self.old_map))
2561             return d
2562         d.addCallback(_created)
2563         return d
2564
2565     def test_multiply_placed_shares(self):
2566         self.basedir = "mutable/Problems/test_multiply_placed_shares"
2567         self.set_up_grid()
2568         nm = self.g.clients[0].nodemaker
2569         d = nm.create_mutable_file(MutableData("contents 1"))
2570         # remove one of the servers and reupload the file.
2571         def _created(n):
2572             self._node = n
2573
2574             servers = self.g.get_all_serverids()
2575             self.ss = self.g.remove_server(servers[len(servers)-1])
2576
2577             new_server = self.g.make_server(len(servers)-1)
2578             self.g.add_server(len(servers)-1, new_server)
2579
2580             return self._node.download_best_version()
2581         d.addCallback(_created)
2582         d.addCallback(lambda data: MutableData(data))
2583         d.addCallback(lambda data: self._node.overwrite(data))
2584
2585         # restore the server we removed earlier, then download+upload
2586         # the file again
2587         def _overwritten(ign):
2588             self.g.add_server(len(self.g.servers_by_number), self.ss)
2589             return self._node.download_best_version()
2590         d.addCallback(_overwritten)
2591         d.addCallback(lambda data: MutableData(data))
2592         d.addCallback(lambda data: self._node.overwrite(data))
2593         d.addCallback(lambda ignored:
2594             self._node.get_servermap(MODE_CHECK))
2595         def _overwritten_again(smap):
2596             # Make sure that all shares were updated by making sure that
2597             # there aren't any other versions in the sharemap.
2598             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2599             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2600         d.addCallback(_overwritten_again)
2601         return d
2602
2603     def test_bad_server(self):
2604         # Break one server, then create the file: the initial publish should
2605         # complete with an alternate server. Breaking a second server should
2606         # not prevent an update from succeeding either.
2607         self.basedir = "mutable/Problems/test_bad_server"
2608         self.set_up_grid()
2609         nm = self.g.clients[0].nodemaker
2610
2611         # to make sure that one of the initial peers is broken, we have to
2612         # get creative. We create an RSA key and compute its storage-index.
2613         # Then we make a KeyGenerator that always returns that one key, and
2614         # use it to create the mutable file. This will get easier when we can
2615         # use #467 static-server-selection to disable permutation and force
2616         # the choice of server for share[0].
2617
2618         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2619         def _got_key( (pubkey, privkey) ):
2620             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2621             pubkey_s = pubkey.serialize()
2622             privkey_s = privkey.serialize()
2623             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2624                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2625             self._storage_index = u.get_storage_index()
2626         d.addCallback(_got_key)
2627         def _break_peer0(res):
2628             si = self._storage_index
2629             servers = nm.storage_broker.get_servers_for_psi(si)
2630             self.g.break_server(servers[0].get_serverid())
2631             self.server1 = servers[1]
2632         d.addCallback(_break_peer0)
2633         # now "create" the file, using the pre-established key, and let the
2634         # initial publish finally happen
2635         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2636         # that ought to work
2637         def _got_node(n):
2638             d = n.download_best_version()
2639             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2640             # now break the second peer
2641             def _break_peer1(res):
2642                 self.g.break_server(self.server1.get_serverid())
2643             d.addCallback(_break_peer1)
2644             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2645             # that ought to work too
2646             d.addCallback(lambda res: n.download_best_version())
2647             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2648             def _explain_error(f):
2649                 print f
2650                 if f.check(NotEnoughServersError):
2651                     print "first_error:", f.value.first_error
2652                 return f
2653             d.addErrback(_explain_error)
2654             return d
2655         d.addCallback(_got_node)
2656         return d
2657
2658     def test_bad_server_overlap(self):
2659         # like test_bad_server, but with no extra unused servers to fall back
2660         # upon. This means that we must re-use a server which we've already
2661         # used. If we don't remember the fact that we sent them one share
2662         # already, we'll mistakenly think we're experiencing an
2663         # UncoordinatedWriteError.
2664
2665         # Break one server, then create the file: the initial publish should
2666         # complete with an alternate server. Breaking a second server should
2667         # not prevent an update from succeeding either.
2668         self.basedir = "mutable/Problems/test_bad_server_overlap"
2669         self.set_up_grid()
2670         nm = self.g.clients[0].nodemaker
2671         sb = nm.storage_broker
2672
2673         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2674         self.g.break_server(peerids[0])
2675
2676         d = nm.create_mutable_file(MutableData("contents 1"))
2677         def _created(n):
2678             d = n.download_best_version()
2679             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2680             # now break one of the remaining servers
2681             def _break_second_server(res):
2682                 self.g.break_server(peerids[1])
2683             d.addCallback(_break_second_server)
2684             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2685             # that ought to work too
2686             d.addCallback(lambda res: n.download_best_version())
2687             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2688             return d
2689         d.addCallback(_created)
2690         return d
2691
2692     def test_publish_all_servers_bad(self):
2693         # Break all servers: the publish should fail
2694         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2695         self.set_up_grid()
2696         nm = self.g.clients[0].nodemaker
2697         for s in nm.storage_broker.get_connected_servers():
2698             s.get_rref().broken = True
2699
2700         d = self.shouldFail(NotEnoughServersError,
2701                             "test_publish_all_servers_bad",
2702                             "ran out of good servers",
2703                             nm.create_mutable_file, MutableData("contents"))
2704         return d
2705
2706     def test_publish_no_servers(self):
2707         # no servers at all: the publish should fail
2708         self.basedir = "mutable/Problems/test_publish_no_servers"
2709         self.set_up_grid(num_servers=0)
2710         nm = self.g.clients[0].nodemaker
2711
2712         d = self.shouldFail(NotEnoughServersError,
2713                             "test_publish_no_servers",
2714                             "Ran out of non-bad servers",
2715                             nm.create_mutable_file, MutableData("contents"))
2716         return d
2717
2718
2719     def test_privkey_query_error(self):
2720         # when a servermap is updated with MODE_WRITE, it tries to get the
2721         # privkey. Something might go wrong during this query attempt.
2722         # Exercise the code in _privkey_query_failed which tries to handle
2723         # such an error.
2724         self.basedir = "mutable/Problems/test_privkey_query_error"
2725         self.set_up_grid(num_servers=20)
2726         nm = self.g.clients[0].nodemaker
2727         nm._node_cache = DevNullDictionary() # disable the nodecache
2728
2729         # we need some contents that are large enough to push the privkey out
2730         # of the early part of the file
2731         LARGE = "These are Larger contents" * 2000 # about 50KB
2732         LARGE_uploadable = MutableData(LARGE)
2733         d = nm.create_mutable_file(LARGE_uploadable)
2734         def _created(n):
2735             self.uri = n.get_uri()
2736             self.n2 = nm.create_from_cap(self.uri)
2737
2738             # When a mapupdate is performed on a node that doesn't yet know
2739             # the privkey, a short read is sent to a batch of servers, to get
2740             # the verinfo and (hopefully, if the file is short enough) the
2741             # encprivkey. Our file is too large to let this first read
2742             # contain the encprivkey. Each non-encprivkey-bearing response
2743             # that arrives (until the node gets the encprivkey) will trigger
2744             # a second read to specifically read the encprivkey.
2745             #
2746             # So, to exercise this case:
2747             #  1. notice which server gets a read() call first
2748             #  2. tell that server to start throwing errors
2749             killer = FirstServerGetsKilled()
2750             for s in nm.storage_broker.get_connected_servers():
2751                 s.get_rref().post_call_notifier = killer.notify
2752         d.addCallback(_created)
2753
2754         # now we update a servermap from a new node (which doesn't have the
2755         # privkey yet, forcing it to use a separate privkey query). Note that
2756         # the map-update will succeed, since we'll just get a copy from one
2757         # of the other shares.
2758         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2759
2760         return d
2761
2762     def test_privkey_query_missing(self):
2763         # like test_privkey_query_error, but the shares are deleted by the
2764         # second query, instead of raising an exception.
2765         self.basedir = "mutable/Problems/test_privkey_query_missing"
2766         self.set_up_grid(num_servers=20)
2767         nm = self.g.clients[0].nodemaker
2768         LARGE = "These are Larger contents" * 2000 # about 50KiB
2769         LARGE_uploadable = MutableData(LARGE)
2770         nm._node_cache = DevNullDictionary() # disable the nodecache
2771
2772         d = nm.create_mutable_file(LARGE_uploadable)
2773         def _created(n):
2774             self.uri = n.get_uri()
2775             self.n2 = nm.create_from_cap(self.uri)
2776             deleter = FirstServerGetsDeleted()
2777             for s in nm.storage_broker.get_connected_servers():
2778                 s.get_rref().post_call_notifier = deleter.notify
2779         d.addCallback(_created)
2780         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2781         return d
2782
2783
2784     def test_block_and_hash_query_error(self):
2785         # This tests for what happens when a query to a remote server
2786         # fails in either the hash validation step or the block getting
2787         # step (because of batching, this is the same actual query).
2788         # We need to have the storage server persist up until the point
2789         # that its prefix is validated, then suddenly die. This
2790         # exercises some exception handling code in Retrieve.
2791         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2792         self.set_up_grid(num_servers=20)
2793         nm = self.g.clients[0].nodemaker
2794         CONTENTS = "contents" * 2000
2795         CONTENTS_uploadable = MutableData(CONTENTS)
2796         d = nm.create_mutable_file(CONTENTS_uploadable)
2797         def _created(node):
2798             self._node = node
2799         d.addCallback(_created)
2800         d.addCallback(lambda ignored:
2801             self._node.get_servermap(MODE_READ))
2802         def _then(servermap):
2803             # we have our servermap. Now we set up the servers like the
2804             # tests above -- the first one that gets a read call should
2805             # start throwing errors, but only after returning its prefix
2806             # for validation. Since we'll download without fetching the
2807             # private key, the next query to the remote server will be
2808             # for either a block and salt or for hashes, either of which
2809             # will exercise the error handling code.
2810             killer = FirstServerGetsKilled()
2811             for s in nm.storage_broker.get_connected_servers():
2812                 s.get_rref().post_call_notifier = killer.notify
2813             ver = servermap.best_recoverable_version()
2814             assert ver
2815             return self._node.download_version(servermap, ver)
2816         d.addCallback(_then)
2817         d.addCallback(lambda data:
2818             self.failUnlessEqual(data, CONTENTS))
2819         return d
2820
2821     def test_1654(self):
2822         # test that the Retrieve object unconditionally verifies the block
2823         # hash tree root for mutable shares. The failure mode is that
2824         # carefully crafted shares can cause undetected corruption (the
2825         # retrieve appears to finish successfully, but the result is
2826         # corrupted). When fixed, these shares always cause a
2827         # CorruptShareError, which results in NotEnoughSharesError in this
2828         # 2-of-2 file.
2829         self.basedir = "mutable/Problems/test_1654"
2830         self.set_up_grid(num_servers=2)
2831         cap = uri.from_string(TEST_1654_CAP)
2832         si = cap.get_storage_index()
2833
2834         for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2835             sharedata = base64.b64decode(share)
2836             storedir = self.get_serverdir(shnum)
2837             storage_path = os.path.join(storedir, "shares",
2838                                         storage_index_to_dir(si))
2839             fileutil.make_dirs(storage_path)
2840             fileutil.write(os.path.join(storage_path, "%d" % shnum),
2841                            sharedata)
2842
2843         nm = self.g.clients[0].nodemaker
2844         n = nm.create_from_cap(TEST_1654_CAP)
2845         # to exercise the problem correctly, we must ensure that sh0 is
2846         # processed first, and sh1 second. NoNetworkGrid has facilities to
2847         # stall the first request from a single server, but it's not
2848         # currently easy to extend that to stall the second request (mutable
2849         # retrievals will see two: first the mapupdate, then the fetch).
2850         # However, repeated executions of this run without the #1654 fix
2851         # suggests that we're failing reliably even without explicit stalls,
2852         # probably because the servers are queried in a fixed order. So I'm
2853         # ok with relying upon that.
2854         d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2855                             "ran out of servers",
2856                             n.download_best_version)
2857         return d
2858
2859
2860 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2861
2862 TEST_1654_SH0 = """\
2863 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2864 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2865 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2866 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2867 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2868 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2869 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2870 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2871 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2872 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2873 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2874 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2875 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2876 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2877 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2878 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2879 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2880 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2881 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2882 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2883 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2884 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2885 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2886 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2887 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2888 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2889 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2890 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2891 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2892 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2893 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2894 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2895 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2896 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2897 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2898 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2899 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2900 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2901 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2902 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2903 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2904 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2905 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2906 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2907 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2908 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2909 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2910 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2911 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2912 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2913 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2914 72mXGlqyLyWYuAAAAAA="""
2915
2916 TEST_1654_SH1 = """\
2917 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2918 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2919 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2920 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2921 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2922 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2923 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2924 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2925 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2926 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2927 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2928 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2929 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2930 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2931 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2932 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2933 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2934 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2935 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2936 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2937 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2938 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2939 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2940 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2941 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2942 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2943 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2944 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2945 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2946 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2947 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2948 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2949 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2950 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2951 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2952 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2953 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2954 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2955 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2956 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2957 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2958 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2959 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2960 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2961 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2962 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2963 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2964 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2965 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2966 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2967 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2968 72mXGlqyLyWYuAAAAAA="""
2969
2970
2971 class FileHandle(unittest.TestCase):
2972     def setUp(self):
2973         self.test_data = "Test Data" * 50000
2974         self.sio = StringIO(self.test_data)
2975         self.uploadable = MutableFileHandle(self.sio)
2976
2977
2978     def test_filehandle_read(self):
2979         self.basedir = "mutable/FileHandle/test_filehandle_read"
2980         chunk_size = 10
2981         for i in xrange(0, len(self.test_data), chunk_size):
2982             data = self.uploadable.read(chunk_size)
2983             data = "".join(data)
2984             start = i
2985             end = i + chunk_size
2986             self.failUnlessEqual(data, self.test_data[start:end])
2987
2988
2989     def test_filehandle_get_size(self):
2990         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2991         actual_size = len(self.test_data)
2992         size = self.uploadable.get_size()
2993         self.failUnlessEqual(size, actual_size)
2994
2995
2996     def test_filehandle_get_size_out_of_order(self):
2997         # We should be able to call get_size whenever we want without
2998         # disturbing the location of the seek pointer.
2999         chunk_size = 100
3000         data = self.uploadable.read(chunk_size)
3001         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3002
3003         # Now get the size.
3004         size = self.uploadable.get_size()
3005         self.failUnlessEqual(size, len(self.test_data))
3006
3007         # Now get more data. We should be right where we left off.
3008         more_data = self.uploadable.read(chunk_size)
3009         start = chunk_size
3010         end = chunk_size * 2
3011         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3012
3013
3014     def test_filehandle_file(self):
3015         # Make sure that the MutableFileHandle works on a file as well
3016         # as a StringIO object, since in some cases it will be asked to
3017         # deal with files.
3018         self.basedir = self.mktemp()
3019         # necessary? What am I doing wrong here?
3020         os.mkdir(self.basedir)
3021         f_path = os.path.join(self.basedir, "test_file")
3022         f = open(f_path, "w")
3023         f.write(self.test_data)
3024         f.close()
3025         f = open(f_path, "r")
3026
3027         uploadable = MutableFileHandle(f)
3028
3029         data = uploadable.read(len(self.test_data))
3030         self.failUnlessEqual("".join(data), self.test_data)
3031         size = uploadable.get_size()
3032         self.failUnlessEqual(size, len(self.test_data))
3033
3034
3035     def test_close(self):
3036         # Make sure that the MutableFileHandle closes its handle when
3037         # told to do so.
3038         self.uploadable.close()
3039         self.failUnless(self.sio.closed)
3040
3041
3042 class DataHandle(unittest.TestCase):
3043     def setUp(self):
3044         self.test_data = "Test Data" * 50000
3045         self.uploadable = MutableData(self.test_data)
3046
3047
3048     def test_datahandle_read(self):
3049         chunk_size = 10
3050         for i in xrange(0, len(self.test_data), chunk_size):
3051             data = self.uploadable.read(chunk_size)
3052             data = "".join(data)
3053             start = i
3054             end = i + chunk_size
3055             self.failUnlessEqual(data, self.test_data[start:end])
3056
3057
3058     def test_datahandle_get_size(self):
3059         actual_size = len(self.test_data)
3060         size = self.uploadable.get_size()
3061         self.failUnlessEqual(size, actual_size)
3062
3063
3064     def test_datahandle_get_size_out_of_order(self):
3065         # We should be able to call get_size whenever we want without
3066         # disturbing the location of the seek pointer.
3067         chunk_size = 100
3068         data = self.uploadable.read(chunk_size)
3069         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3070
3071         # Now get the size.
3072         size = self.uploadable.get_size()
3073         self.failUnlessEqual(size, len(self.test_data))
3074
3075         # Now get more data. We should be right where we left off.
3076         more_data = self.uploadable.read(chunk_size)
3077         start = chunk_size
3078         end = chunk_size * 2
3079         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3080
3081
3082 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3083               PublishMixin):
3084     def setUp(self):
3085         GridTestMixin.setUp(self)
3086         self.basedir = self.mktemp()
3087         self.set_up_grid()
3088         self.c = self.g.clients[0]
3089         self.nm = self.c.nodemaker
3090         self.data = "test data" * 100000 # about 900 KiB; MDMF
3091         self.small_data = "test data" * 10 # about 90 B; SDMF
3092
3093
3094     def do_upload_mdmf(self):
3095         d = self.nm.create_mutable_file(MutableData(self.data),
3096                                         version=MDMF_VERSION)
3097         def _then(n):
3098             assert isinstance(n, MutableFileNode)
3099             assert n._protocol_version == MDMF_VERSION
3100             self.mdmf_node = n
3101             return n
3102         d.addCallback(_then)
3103         return d
3104
3105     def do_upload_sdmf(self):
3106         d = self.nm.create_mutable_file(MutableData(self.small_data))
3107         def _then(n):
3108             assert isinstance(n, MutableFileNode)
3109             assert n._protocol_version == SDMF_VERSION
3110             self.sdmf_node = n
3111             return n
3112         d.addCallback(_then)
3113         return d
3114
3115     def do_upload_empty_sdmf(self):
3116         d = self.nm.create_mutable_file(MutableData(""))
3117         def _then(n):
3118             assert isinstance(n, MutableFileNode)
3119             self.sdmf_zero_length_node = n
3120             assert n._protocol_version == SDMF_VERSION
3121             return n
3122         d.addCallback(_then)
3123         return d
3124
3125     def do_upload(self):
3126         d = self.do_upload_mdmf()
3127         d.addCallback(lambda ign: self.do_upload_sdmf())
3128         return d
3129
3130     def test_debug(self):
3131         d = self.do_upload_mdmf()
3132         def _debug(n):
3133             fso = debug.FindSharesOptions()
3134             storage_index = base32.b2a(n.get_storage_index())
3135             fso.si_s = storage_index
3136             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3137                             for (i,ss,storedir)
3138                             in self.iterate_servers()]
3139             fso.stdout = StringIO()
3140             fso.stderr = StringIO()
3141             debug.find_shares(fso)
3142             sharefiles = fso.stdout.getvalue().splitlines()
3143             expected = self.nm.default_encoding_parameters["n"]
3144             self.failUnlessEqual(len(sharefiles), expected)
3145
3146             do = debug.DumpOptions()
3147             do["filename"] = sharefiles[0]
3148             do.stdout = StringIO()
3149             debug.dump_share(do)
3150             output = do.stdout.getvalue()
3151             lines = set(output.splitlines())
3152             self.failUnless("Mutable slot found:" in lines, output)
3153             self.failUnless(" share_type: MDMF" in lines, output)
3154             self.failUnless(" num_extra_leases: 0" in lines, output)
3155             self.failUnless(" MDMF contents:" in lines, output)
3156             self.failUnless("  seqnum: 1" in lines, output)
3157             self.failUnless("  required_shares: 3" in lines, output)
3158             self.failUnless("  total_shares: 10" in lines, output)
3159             self.failUnless("  segsize: 131073" in lines, output)
3160             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
3161             vcap = n.get_verify_cap().to_string()
3162             self.failUnless("  verify-cap: %s" % vcap in lines, output)
3163
3164             cso = debug.CatalogSharesOptions()
3165             cso.nodedirs = fso.nodedirs
3166             cso.stdout = StringIO()
3167             cso.stderr = StringIO()
3168             debug.catalog_shares(cso)
3169             shares = cso.stdout.getvalue().splitlines()
3170             oneshare = shares[0] # all shares should be MDMF
3171             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3172             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3173             fields = oneshare.split()
3174             self.failUnlessEqual(fields[0], "MDMF")
3175             self.failUnlessEqual(fields[1], storage_index)
3176             self.failUnlessEqual(fields[2], "3/10")
3177             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3178             self.failUnless(fields[4].startswith("#1:"), fields[3])
3179             # the rest of fields[4] is the roothash, which depends upon
3180             # encryption salts and is not constant. fields[5] is the
3181             # remaining time on the longest lease, which is timing dependent.
3182             # The rest of the line is the quoted pathname to the share.
3183         d.addCallback(_debug)
3184         return d
3185
3186     def test_get_sequence_number(self):
3187         d = self.do_upload()
3188         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3189         d.addCallback(lambda bv:
3190             self.failUnlessEqual(bv.get_sequence_number(), 1))
3191         d.addCallback(lambda ignored:
3192             self.sdmf_node.get_best_readable_version())
3193         d.addCallback(lambda bv:
3194             self.failUnlessEqual(bv.get_sequence_number(), 1))
3195         # Now update. The sequence number in both cases should be 1 in
3196         # both cases.
3197         def _do_update(ignored):
3198             new_data = MutableData("foo bar baz" * 100000)
3199             new_small_data = MutableData("foo bar baz" * 10)
3200             d1 = self.mdmf_node.overwrite(new_data)
3201             d2 = self.sdmf_node.overwrite(new_small_data)
3202             dl = gatherResults([d1, d2])
3203             return dl
3204         d.addCallback(_do_update)
3205         d.addCallback(lambda ignored:
3206             self.mdmf_node.get_best_readable_version())
3207         d.addCallback(lambda bv:
3208             self.failUnlessEqual(bv.get_sequence_number(), 2))
3209         d.addCallback(lambda ignored:
3210             self.sdmf_node.get_best_readable_version())
3211         d.addCallback(lambda bv:
3212             self.failUnlessEqual(bv.get_sequence_number(), 2))
3213         return d
3214
3215
3216     def test_cap_after_upload(self):
3217         # If we create a new mutable file and upload things to it, and
3218         # it's an MDMF file, we should get an MDMF cap back from that
3219         # file and should be able to use that.
3220         # That's essentially what MDMF node is, so just check that.
3221         d = self.do_upload_mdmf()
3222         def _then(ign):
3223             mdmf_uri = self.mdmf_node.get_uri()
3224             cap = uri.from_string(mdmf_uri)
3225             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3226             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3227             cap = uri.from_string(readonly_mdmf_uri)
3228             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3229         d.addCallback(_then)
3230         return d
3231
3232     def test_mutable_version(self):
3233         # assert that getting parameters from the IMutableVersion object
3234         # gives us the same data as getting them from the filenode itself
3235         d = self.do_upload()
3236         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3237         def _check_mdmf(bv):
3238             n = self.mdmf_node
3239             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3240             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3241             self.failIf(bv.is_readonly())
3242         d.addCallback(_check_mdmf)
3243         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3244         def _check_sdmf(bv):
3245             n = self.sdmf_node
3246             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3247             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3248             self.failIf(bv.is_readonly())
3249         d.addCallback(_check_sdmf)
3250         return d
3251
3252
3253     def test_get_readonly_version(self):
3254         d = self.do_upload()
3255         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3256         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3257
3258         # Attempting to get a mutable version of a mutable file from a
3259         # filenode initialized with a readcap should return a readonly
3260         # version of that same node.
3261         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3262         d.addCallback(lambda ro: ro.get_best_mutable_version())
3263         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3264
3265         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3266         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3267
3268         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3269         d.addCallback(lambda ro: ro.get_best_mutable_version())
3270         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3271         return d
3272
3273
3274     def test_toplevel_overwrite(self):
3275         new_data = MutableData("foo bar baz" * 100000)
3276         new_small_data = MutableData("foo bar baz" * 10)
3277         d = self.do_upload()
3278         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3279         d.addCallback(lambda ignored:
3280             self.mdmf_node.download_best_version())
3281         d.addCallback(lambda data:
3282             self.failUnlessEqual(data, "foo bar baz" * 100000))
3283         d.addCallback(lambda ignored:
3284             self.sdmf_node.overwrite(new_small_data))
3285         d.addCallback(lambda ignored:
3286             self.sdmf_node.download_best_version())
3287         d.addCallback(lambda data:
3288             self.failUnlessEqual(data, "foo bar baz" * 10))
3289         return d
3290
3291
3292     def test_toplevel_modify(self):
3293         d = self.do_upload()
3294         def modifier(old_contents, servermap, first_time):
3295             return old_contents + "modified"
3296         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3297         d.addCallback(lambda ignored:
3298             self.mdmf_node.download_best_version())
3299         d.addCallback(lambda data:
3300             self.failUnlessIn("modified", data))
3301         d.addCallback(lambda ignored:
3302             self.sdmf_node.modify(modifier))
3303         d.addCallback(lambda ignored:
3304             self.sdmf_node.download_best_version())
3305         d.addCallback(lambda data:
3306             self.failUnlessIn("modified", data))
3307         return d
3308
3309
3310     def test_version_modify(self):
3311         # TODO: When we can publish multiple versions, alter this test
3312         # to modify a version other than the best usable version, then
3313         # test to see that the best recoverable version is that.
3314         d = self.do_upload()
3315         def modifier(old_contents, servermap, first_time):
3316             return old_contents + "modified"
3317         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3318         d.addCallback(lambda ignored:
3319             self.mdmf_node.download_best_version())
3320         d.addCallback(lambda data:
3321             self.failUnlessIn("modified", data))
3322         d.addCallback(lambda ignored:
3323             self.sdmf_node.modify(modifier))
3324         d.addCallback(lambda ignored:
3325             self.sdmf_node.download_best_version())
3326         d.addCallback(lambda data:
3327             self.failUnlessIn("modified", data))
3328         return d
3329
3330
3331     def test_download_version(self):
3332         d = self.publish_multiple()
3333         # We want to have two recoverable versions on the grid.
3334         d.addCallback(lambda res:
3335                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3336                                           1:1,3:1,5:1,7:1,9:1}))
3337         # Now try to download each version. We should get the plaintext
3338         # associated with that version.
3339         d.addCallback(lambda ignored:
3340             self._fn.get_servermap(mode=MODE_READ))
3341         def _got_servermap(smap):
3342             versions = smap.recoverable_versions()
3343             assert len(versions) == 2
3344
3345             self.servermap = smap
3346             self.version1, self.version2 = versions
3347             assert self.version1 != self.version2
3348
3349             self.version1_seqnum = self.version1[0]
3350             self.version2_seqnum = self.version2[0]
3351             self.version1_index = self.version1_seqnum - 1
3352             self.version2_index = self.version2_seqnum - 1
3353
3354         d.addCallback(_got_servermap)
3355         d.addCallback(lambda ignored:
3356             self._fn.download_version(self.servermap, self.version1))
3357         d.addCallback(lambda results:
3358             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3359                                  results))
3360         d.addCallback(lambda ignored:
3361             self._fn.download_version(self.servermap, self.version2))
3362         d.addCallback(lambda results:
3363             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3364                                  results))
3365         return d
3366
3367
3368     def test_download_nonexistent_version(self):
3369         d = self.do_upload_mdmf()
3370         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3371         def _set_servermap(servermap):
3372             self.servermap = servermap
3373         d.addCallback(_set_servermap)
3374         d.addCallback(lambda ignored:
3375            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3376                            None,
3377                            self.mdmf_node.download_version, self.servermap,
3378                            "not a version"))
3379         return d
3380
3381
3382     def test_partial_read(self):
3383         d = self.do_upload_mdmf()
3384         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3385         modes = [("start_on_segment_boundary",
3386                   mathutil.next_multiple(128 * 1024, 3), 50),
3387                  ("ending_one_byte_after_segment_boundary",
3388                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3389                  ("zero_length_at_start", 0, 0),
3390                  ("zero_length_in_middle", 50, 0),
3391                  ("zero_length_at_segment_boundary",
3392                   mathutil.next_multiple(128 * 1024, 3), 0),
3393                  ]
3394         for (name, offset, length) in modes:
3395             d.addCallback(self._do_partial_read, name, offset, length)
3396         # then read only a few bytes at a time, and see that the results are
3397         # what we expect.
3398         def _read_data(version):
3399             c = consumer.MemoryConsumer()
3400             d2 = defer.succeed(None)
3401             for i in xrange(0, len(self.data), 10000):
3402                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3403             d2.addCallback(lambda ignored:
3404                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3405             return d2
3406         d.addCallback(_read_data)
3407         return d
3408     def _do_partial_read(self, version, name, offset, length):
3409         c = consumer.MemoryConsumer()
3410         d = version.read(c, offset, length)
3411         expected = self.data[offset:offset+length]
3412         d.addCallback(lambda ignored: "".join(c.chunks))
3413         def _check(results):
3414             if results != expected:
3415                 print
3416                 print "got: %s ... %s" % (results[:20], results[-20:])
3417                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3418                 self.fail("results[%s] != expected" % name)
3419             return version # daisy-chained to next call
3420         d.addCallback(_check)
3421         return d
3422
3423
3424     def _test_read_and_download(self, node, expected):
3425         d = node.get_best_readable_version()
3426         def _read_data(version):
3427             c = consumer.MemoryConsumer()
3428             d2 = defer.succeed(None)
3429             d2.addCallback(lambda ignored: version.read(c))
3430             d2.addCallback(lambda ignored:
3431                 self.failUnlessEqual(expected, "".join(c.chunks)))
3432             return d2
3433         d.addCallback(_read_data)
3434         d.addCallback(lambda ignored: node.download_best_version())
3435         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3436         return d
3437
3438     def test_read_and_download_mdmf(self):
3439         d = self.do_upload_mdmf()
3440         d.addCallback(self._test_read_and_download, self.data)
3441         return d
3442
3443     def test_read_and_download_sdmf(self):
3444         d = self.do_upload_sdmf()
3445         d.addCallback(self._test_read_and_download, self.small_data)
3446         return d
3447
3448     def test_read_and_download_sdmf_zero_length(self):
3449         d = self.do_upload_empty_sdmf()
3450         d.addCallback(self._test_read_and_download, "")
3451         return d
3452
3453
3454 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3455     timeout = 400 # these tests are too big, 120s is not enough on slow
3456                   # platforms
3457     def setUp(self):
3458         GridTestMixin.setUp(self)
3459         self.basedir = self.mktemp()
3460         self.set_up_grid()
3461         self.c = self.g.clients[0]
3462         self.nm = self.c.nodemaker
3463         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3464         self.small_data = "test data" * 10 # about 90 B; SDMF
3465
3466
3467     def do_upload_sdmf(self):
3468         d = self.nm.create_mutable_file(MutableData(self.small_data))
3469         def _then(n):
3470             assert isinstance(n, MutableFileNode)
3471             self.sdmf_node = n
3472             # Make SDMF node that has 255 shares.
3473             self.nm.default_encoding_parameters['n'] = 255
3474             self.nm.default_encoding_parameters['k'] = 127
3475             return self.nm.create_mutable_file(MutableData(self.small_data))
3476         d.addCallback(_then)
3477         def _then2(n):
3478             assert isinstance(n, MutableFileNode)
3479             self.sdmf_max_shares_node = n
3480         d.addCallback(_then2)
3481         return d
3482
3483     def do_upload_mdmf(self):
3484         d = self.nm.create_mutable_file(MutableData(self.data),
3485                                         version=MDMF_VERSION)
3486         def _then(n):
3487             assert isinstance(n, MutableFileNode)
3488             self.mdmf_node = n
3489             # Make MDMF node that has 255 shares.
3490             self.nm.default_encoding_parameters['n'] = 255
3491             self.nm.default_encoding_parameters['k'] = 127
3492             return self.nm.create_mutable_file(MutableData(self.data),
3493                                                version=MDMF_VERSION)
3494         d.addCallback(_then)
3495         def _then2(n):
3496             assert isinstance(n, MutableFileNode)
3497             self.mdmf_max_shares_node = n
3498         d.addCallback(_then2)
3499         return d
3500
3501     def _test_replace(self, offset, new_data):
3502         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3503         d0 = self.do_upload_mdmf()
3504         def _run(ign):
3505             d = defer.succeed(None)
3506             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3507                 # close over 'node'.
3508                 d.addCallback(lambda ign, node=node:
3509                               node.get_best_mutable_version())
3510                 d.addCallback(lambda mv:
3511                               mv.update(MutableData(new_data), offset))
3512                 d.addCallback(lambda ign, node=node:
3513                               node.download_best_version())
3514                 def _check(results):
3515                     if results != expected:
3516                         print
3517                         print "got: %s ... %s" % (results[:20], results[-20:])
3518                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3519                         self.fail("results != expected")
3520                 d.addCallback(_check)
3521             return d
3522         d0.addCallback(_run)
3523         return d0
3524
3525     def test_append(self):
3526         # We should be able to append data to a mutable file and get
3527         # what we expect.
3528         return self._test_replace(len(self.data), "appended")
3529
3530     def test_replace_middle(self):
3531         # We should be able to replace data in the middle of a mutable
3532         # file and get what we expect back.
3533         return self._test_replace(100, "replaced")
3534
3535     def test_replace_beginning(self):
3536         # We should be able to replace data at the beginning of the file
3537         # without truncating the file
3538         return self._test_replace(0, "beginning")
3539
3540     def test_replace_segstart1(self):
3541         return self._test_replace(128*1024+1, "NNNN")
3542
3543     def test_replace_zero_length_beginning(self):
3544         return self._test_replace(0, "")
3545
3546     def test_replace_zero_length_middle(self):
3547         return self._test_replace(50, "")
3548
3549     def test_replace_zero_length_segstart1(self):
3550         return self._test_replace(128*1024+1, "")
3551
3552     def test_replace_and_extend(self):
3553         # We should be able to replace data in the middle of a mutable
3554         # file and extend that mutable file and get what we expect.
3555         return self._test_replace(100, "modified " * 100000)
3556
3557
3558     def _check_differences(self, got, expected):
3559         # displaying arbitrary file corruption is tricky for a
3560         # 1MB file of repeating data,, so look for likely places
3561         # with problems and display them separately
3562         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3563         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3564         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3565                     for (start,end) in gotmods]
3566         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3567                     for (start,end) in expmods]
3568         #print "expecting: %s" % expspans
3569
3570         SEGSIZE = 128*1024
3571         if got != expected:
3572             print "differences:"
3573             for segnum in range(len(expected)//SEGSIZE):
3574                 start = segnum * SEGSIZE
3575                 end = (segnum+1) * SEGSIZE
3576                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3577                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3578                 if got_ends != exp_ends:
3579                     print "expected[%d]: %s" % (start, exp_ends)
3580                     print "got     [%d]: %s" % (start, got_ends)
3581             if expspans != gotspans:
3582                 print "expected: %s" % expspans
3583                 print "got     : %s" % gotspans
3584             open("EXPECTED","wb").write(expected)
3585             open("GOT","wb").write(got)
3586             print "wrote data to EXPECTED and GOT"
3587             self.fail("didn't get expected data")
3588
3589
3590     def test_replace_locations(self):
3591         # exercise fencepost conditions
3592         SEGSIZE = 128*1024
3593         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3594         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3595         d0 = self.do_upload_mdmf()
3596         def _run(ign):
3597             expected = self.data
3598             d = defer.succeed(None)
3599             for offset in suspects:
3600                 new_data = letters.next()*2 # "AA", then "BB", etc
3601                 expected = expected[:offset]+new_data+expected[offset+2:]
3602                 d.addCallback(lambda ign:
3603                               self.mdmf_node.get_best_mutable_version())
3604                 def _modify(mv, offset=offset, new_data=new_data):
3605                     # close over 'offset','new_data'
3606                     md = MutableData(new_data)
3607                     return mv.update(md, offset)
3608                 d.addCallback(_modify)
3609                 d.addCallback(lambda ignored:
3610                               self.mdmf_node.download_best_version())
3611                 d.addCallback(self._check_differences, expected)
3612             return d
3613         d0.addCallback(_run)
3614         return d0
3615
3616     def test_replace_locations_max_shares(self):
3617         # exercise fencepost conditions
3618         SEGSIZE = 128*1024
3619         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3620         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3621         d0 = self.do_upload_mdmf()
3622         def _run(ign):
3623             expected = self.data
3624             d = defer.succeed(None)
3625             for offset in suspects:
3626                 new_data = letters.next()*2 # "AA", then "BB", etc
3627                 expected = expected[:offset]+new_data+expected[offset+2:]
3628                 d.addCallback(lambda ign:
3629                               self.mdmf_max_shares_node.get_best_mutable_version())
3630                 def _modify(mv, offset=offset, new_data=new_data):
3631                     # close over 'offset','new_data'
3632                     md = MutableData(new_data)
3633                     return mv.update(md, offset)
3634                 d.addCallback(_modify)
3635                 d.addCallback(lambda ignored:
3636                               self.mdmf_max_shares_node.download_best_version())
3637                 d.addCallback(self._check_differences, expected)
3638             return d
3639         d0.addCallback(_run)
3640         return d0
3641
3642
3643     def test_append_power_of_two(self):
3644         # If we attempt to extend a mutable file so that its segment
3645         # count crosses a power-of-two boundary, the update operation
3646         # should know how to reencode the file.
3647
3648         # Note that the data populating self.mdmf_node is about 900 KiB
3649         # long -- this is 7 segments in the default segment size. So we
3650         # need to add 2 segments worth of data to push it over a
3651         # power-of-two boundary.
3652         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3653         new_data = self.data + (segment * 2)
3654         d0 = self.do_upload_mdmf()
3655         def _run(ign):
3656             d = defer.succeed(None)
3657             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3658                 # close over 'node'.
3659                 d.addCallback(lambda ign, node=node:
3660                               node.get_best_mutable_version())
3661                 d.addCallback(lambda mv:
3662                               mv.update(MutableData(segment * 2), len(self.data)))
3663                 d.addCallback(lambda ign, node=node:
3664                               node.download_best_version())
3665                 d.addCallback(lambda results:
3666                               self.failUnlessEqual(results, new_data))
3667             return d
3668         d0.addCallback(_run)
3669         return d0
3670
3671     def test_update_sdmf(self):
3672         # Running update on a single-segment file should still work.
3673         new_data = self.small_data + "appended"
3674         d0 = self.do_upload_sdmf()
3675         def _run(ign):
3676             d = defer.succeed(None)
3677             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3678                 # close over 'node'.
3679                 d.addCallback(lambda ign, node=node:
3680                               node.get_best_mutable_version())
3681                 d.addCallback(lambda mv:
3682                               mv.update(MutableData("appended"), len(self.small_data)))
3683                 d.addCallback(lambda ign, node=node:
3684                               node.download_best_version())
3685                 d.addCallback(lambda results:
3686                               self.failUnlessEqual(results, new_data))
3687             return d
3688         d0.addCallback(_run)
3689         return d0
3690
3691     def test_replace_in_last_segment(self):
3692         # The wrapper should know how to handle the tail segment
3693         # appropriately.
3694         replace_offset = len(self.data) - 100
3695         new_data = self.data[:replace_offset] + "replaced"
3696         rest_offset = replace_offset + len("replaced")
3697         new_data += self.data[rest_offset:]
3698         d0 = self.do_upload_mdmf()
3699         def _run(ign):
3700             d = defer.succeed(None)
3701             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3702                 # close over 'node'.
3703                 d.addCallback(lambda ign, node=node:
3704                               node.get_best_mutable_version())
3705                 d.addCallback(lambda mv:
3706                               mv.update(MutableData("replaced"), replace_offset))
3707                 d.addCallback(lambda ign, node=node:
3708                               node.download_best_version())
3709                 d.addCallback(lambda results:
3710                               self.failUnlessEqual(results, new_data))
3711             return d
3712         d0.addCallback(_run)
3713         return d0
3714
3715     def test_multiple_segment_replace(self):
3716         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3717         new_data = self.data[:replace_offset]
3718         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3719         new_data += 2 * new_segment
3720         new_data += "replaced"
3721         rest_offset = len(new_data)
3722         new_data += self.data[rest_offset:]
3723         d0 = self.do_upload_mdmf()
3724         def _run(ign):
3725             d = defer.succeed(None)
3726             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3727                 # close over 'node'.
3728                 d.addCallback(lambda ign, node=node:
3729                               node.get_best_mutable_version())
3730                 d.addCallback(lambda mv:
3731                               mv.update(MutableData((2 * new_segment) + "replaced"),
3732                                         replace_offset))
3733                 d.addCallback(lambda ignored, node=node:
3734                               node.download_best_version())
3735                 d.addCallback(lambda results:
3736                               self.failUnlessEqual(results, new_data))
3737             return d
3738         d0.addCallback(_run)
3739         return d0
3740
3741 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3742     sdmf_old_shares = {}
3743     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3744     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3745     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3746     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3747     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3748     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3749     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3750     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3751     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3752     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3753     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3754     sdmf_old_contents = "This is a test file.\n"
3755     def copy_sdmf_shares(self):
3756         # We'll basically be short-circuiting the upload process.
3757         servernums = self.g.servers_by_number.keys()
3758         assert len(servernums) == 10
3759
3760         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3761         # Get the storage index.
3762         cap = uri.from_string(self.sdmf_old_cap)
3763         si = cap.get_storage_index()
3764
3765         # Now execute each assignment by writing the storage.
3766         for (share, servernum) in assignments:
3767             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3768             storedir = self.get_serverdir(servernum)
3769             storage_path = os.path.join(storedir, "shares",
3770                                         storage_index_to_dir(si))
3771             fileutil.make_dirs(storage_path)
3772             fileutil.write(os.path.join(storage_path, "%d" % share),
3773                            sharedata)
3774         # ...and verify that the shares are there.
3775         shares = self.find_uri_shares(self.sdmf_old_cap)
3776         assert len(shares) == 10
3777
3778     def test_new_downloader_can_read_old_shares(self):
3779         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3780         self.set_up_grid()
3781         self.copy_sdmf_shares()
3782         nm = self.g.clients[0].nodemaker
3783         n = nm.create_from_cap(self.sdmf_old_cap)
3784         d = n.download_best_version()
3785         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3786         return d
3787
3788 class DifferentEncoding(unittest.TestCase):
3789     def setUp(self):
3790         self._storage = s = FakeStorage()
3791         self.nodemaker = make_nodemaker(s)
3792
3793     def test_filenode(self):
3794         # create a file with 3-of-20, then modify it with a client configured
3795         # to do 3-of-10. #1510 tracks a failure here
3796         self.nodemaker.default_encoding_parameters["n"] = 20
3797         d = self.nodemaker.create_mutable_file("old contents")
3798         def _created(n):
3799             filecap = n.get_cap().to_string()
3800             del n # we want a new object, not the cached one
3801             self.nodemaker.default_encoding_parameters["n"] = 10
3802             n2 = self.nodemaker.create_from_cap(filecap)
3803             return n2
3804         d.addCallback(_created)
3805         def modifier(old_contents, servermap, first_time):
3806             return "new contents"
3807         d.addCallback(lambda n: n.modify(modifier))
3808         return d