]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
9dc1b96f59bbeb87ee9861e960c9ba6a57e5577d
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1 import os, re, base64
2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9      ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
22
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import ResponseCache, \
25      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27      NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
30                                       MutableData, \
31                                       DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
35
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39      PausingAndStoppingConsumer, StoppingConsumer, \
40      ImmediatelyStoppingConsumer
41
42 def eventuaaaaaly(res=None):
43     d = fireEventually(res)
44     d.addCallback(fireEventually)
45     d.addCallback(fireEventually)
46     return d
47
48
49 # this "FakeStorage" exists to put the share data in RAM and avoid using real
50 # network connections, both to speed up the tests and to reduce the amount of
51 # non-mutable.py code being exercised.
52
53 class FakeStorage:
54     # this class replaces the collection of storage servers, allowing the
55     # tests to examine and manipulate the published shares. It also lets us
56     # control the order in which read queries are answered, to exercise more
57     # of the error-handling code in Retrieve .
58     #
59     # Note that we ignore the storage index: this FakeStorage instance can
60     # only be used for a single storage index.
61
62
63     def __init__(self):
64         self._peers = {}
65         # _sequence is used to cause the responses to occur in a specific
66         # order. If it is in use, then we will defer queries instead of
67         # answering them right away, accumulating the Deferreds in a dict. We
68         # don't know exactly how many queries we'll get, so exactly one
69         # second after the first query arrives, we will release them all (in
70         # order).
71         self._sequence = None
72         self._pending = {}
73         self._pending_timer = None
74
75     def read(self, peerid, storage_index):
76         shares = self._peers.get(peerid, {})
77         if self._sequence is None:
78             return eventuaaaaaly(shares)
79         d = defer.Deferred()
80         if not self._pending:
81             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
82         if peerid not in self._pending:
83             self._pending[peerid] = []
84         self._pending[peerid].append( (d, shares) )
85         return d
86
87     def _fire_readers(self):
88         self._pending_timer = None
89         pending = self._pending
90         self._pending = {}
91         for peerid in self._sequence:
92             if peerid in pending:
93                 for (d, shares) in pending.pop(peerid):
94                     eventually(d.callback, shares)
95         for peerid in pending:
96             for (d, shares) in pending[peerid]:
97                 eventually(d.callback, shares)
98
99     def write(self, peerid, storage_index, shnum, offset, data):
100         if peerid not in self._peers:
101             self._peers[peerid] = {}
102         shares = self._peers[peerid]
103         f = StringIO()
104         f.write(shares.get(shnum, ""))
105         f.seek(offset)
106         f.write(data)
107         shares[shnum] = f.getvalue()
108
109
110 class FakeStorageServer:
111     def __init__(self, peerid, storage):
112         self.peerid = peerid
113         self.storage = storage
114         self.queries = 0
115     def callRemote(self, methname, *args, **kwargs):
116         self.queries += 1
117         def _call():
118             meth = getattr(self, methname)
119             return meth(*args, **kwargs)
120         d = fireEventually()
121         d.addCallback(lambda res: _call())
122         return d
123
124     def callRemoteOnly(self, methname, *args, **kwargs):
125         self.queries += 1
126         d = self.callRemote(methname, *args, **kwargs)
127         d.addBoth(lambda ignore: None)
128         pass
129
130     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
131         pass
132
133     def slot_readv(self, storage_index, shnums, readv):
134         d = self.storage.read(self.peerid, storage_index)
135         def _read(shares):
136             response = {}
137             for shnum in shares:
138                 if shnums and shnum not in shnums:
139                     continue
140                 vector = response[shnum] = []
141                 for (offset, length) in readv:
142                     assert isinstance(offset, (int, long)), offset
143                     assert isinstance(length, (int, long)), length
144                     vector.append(shares[shnum][offset:offset+length])
145             return response
146         d.addCallback(_read)
147         return d
148
149     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
150                                         tw_vectors, read_vector):
151         # always-pass: parrot the test vectors back to them.
152         readv = {}
153         for shnum, (testv, writev, new_length) in tw_vectors.items():
154             for (offset, length, op, specimen) in testv:
155                 assert op in ("le", "eq", "ge")
156             # TODO: this isn't right, the read is controlled by read_vector,
157             # not by testv
158             readv[shnum] = [ specimen
159                              for (offset, length, op, specimen)
160                              in testv ]
161             for (offset, data) in writev:
162                 self.storage.write(self.peerid, storage_index, shnum,
163                                    offset, data)
164         answer = (True, readv)
165         return fireEventually(answer)
166
167
168 def flip_bit(original, byte_offset):
169     return (original[:byte_offset] +
170             chr(ord(original[byte_offset]) ^ 0x01) +
171             original[byte_offset+1:])
172
173 def add_two(original, byte_offset):
174     # It isn't enough to simply flip the bit for the version number,
175     # because 1 is a valid version number. So we add two instead.
176     return (original[:byte_offset] +
177             chr(ord(original[byte_offset]) ^ 0x02) +
178             original[byte_offset+1:])
179
180 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
181     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
182     # list of shnums to corrupt.
183     ds = []
184     for peerid in s._peers:
185         shares = s._peers[peerid]
186         for shnum in shares:
187             if (shnums_to_corrupt is not None
188                 and shnum not in shnums_to_corrupt):
189                 continue
190             data = shares[shnum]
191             # We're feeding the reader all of the share data, so it
192             # won't need to use the rref that we didn't provide, nor the
193             # storage index that we didn't provide. We do this because
194             # the reader will work for both MDMF and SDMF.
195             reader = MDMFSlotReadProxy(None, None, shnum, data)
196             # We need to get the offsets for the next part.
197             d = reader.get_verinfo()
198             def _do_corruption(verinfo, data, shnum, shares):
199                 (seqnum,
200                  root_hash,
201                  IV,
202                  segsize,
203                  datalen,
204                  k, n, prefix, o) = verinfo
205                 if isinstance(offset, tuple):
206                     offset1, offset2 = offset
207                 else:
208                     offset1 = offset
209                     offset2 = 0
210                 if offset1 == "pubkey" and IV:
211                     real_offset = 107
212                 elif offset1 in o:
213                     real_offset = o[offset1]
214                 else:
215                     real_offset = offset1
216                 real_offset = int(real_offset) + offset2 + offset_offset
217                 assert isinstance(real_offset, int), offset
218                 if offset1 == 0: # verbyte
219                     f = add_two
220                 else:
221                     f = flip_bit
222                 shares[shnum] = f(data, real_offset)
223             d.addCallback(_do_corruption, data, shnum, shares)
224             ds.append(d)
225     dl = defer.DeferredList(ds)
226     dl.addCallback(lambda ignored: res)
227     return dl
228
229 def make_storagebroker(s=None, num_peers=10):
230     if not s:
231         s = FakeStorage()
232     peerids = [tagged_hash("peerid", "%d" % i)[:20]
233                for i in range(num_peers)]
234     storage_broker = StorageFarmBroker(None, True)
235     for peerid in peerids:
236         fss = FakeStorageServer(peerid, s)
237         ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
238                "permutation-seed-base32": base32.b2a(peerid) }
239         storage_broker.test_add_rref(peerid, fss, ann)
240     return storage_broker
241
242 def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
243     storage_broker = make_storagebroker(s, num_peers)
244     sh = client.SecretHolder("lease secret", "convergence secret")
245     keygen = client.KeyGenerator()
246     if keysize:
247         keygen.set_default_keysize(keysize)
248     nodemaker = NodeMaker(storage_broker, sh, None,
249                           None, None,
250                           {"k": 3, "n": 10}, SDMF_VERSION, keygen)
251     return nodemaker
252
253 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
254     # this used to be in Publish, but we removed the limit. Some of
255     # these tests test whether the new code correctly allows files
256     # larger than the limit.
257     OLD_MAX_SEGMENT_SIZE = 3500000
258     def setUp(self):
259         self._storage = s = FakeStorage()
260         self.nodemaker = make_nodemaker(s)
261
262     def test_create(self):
263         d = self.nodemaker.create_mutable_file()
264         def _created(n):
265             self.failUnless(isinstance(n, MutableFileNode))
266             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267             sb = self.nodemaker.storage_broker
268             peer0 = sorted(sb.get_all_serverids())[0]
269             shnums = self._storage._peers[peer0].keys()
270             self.failUnlessEqual(len(shnums), 1)
271         d.addCallback(_created)
272         return d
273
274
275     def test_create_mdmf(self):
276         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
277         def _created(n):
278             self.failUnless(isinstance(n, MutableFileNode))
279             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
280             sb = self.nodemaker.storage_broker
281             peer0 = sorted(sb.get_all_serverids())[0]
282             shnums = self._storage._peers[peer0].keys()
283             self.failUnlessEqual(len(shnums), 1)
284         d.addCallback(_created)
285         return d
286
287     def test_single_share(self):
288         # Make sure that we tolerate publishing a single share.
289         self.nodemaker.default_encoding_parameters['k'] = 1
290         self.nodemaker.default_encoding_parameters['happy'] = 1
291         self.nodemaker.default_encoding_parameters['n'] = 1
292         d = defer.succeed(None)
293         for v in (SDMF_VERSION, MDMF_VERSION):
294             d.addCallback(lambda ignored, v=v:
295                 self.nodemaker.create_mutable_file(version=v))
296             def _created(n):
297                 self.failUnless(isinstance(n, MutableFileNode))
298                 self._node = n
299                 return n
300             d.addCallback(_created)
301             d.addCallback(lambda n:
302                 n.overwrite(MutableData("Contents" * 50000)))
303             d.addCallback(lambda ignored:
304                 self._node.download_best_version())
305             d.addCallback(lambda contents:
306                 self.failUnlessEqual(contents, "Contents" * 50000))
307         return d
308
309     def test_max_shares(self):
310         self.nodemaker.default_encoding_parameters['n'] = 255
311         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
312         def _created(n):
313             self.failUnless(isinstance(n, MutableFileNode))
314             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
315             sb = self.nodemaker.storage_broker
316             num_shares = sum([len(self._storage._peers[x].keys()) for x \
317                               in sb.get_all_serverids()])
318             self.failUnlessEqual(num_shares, 255)
319             self._node = n
320             return n
321         d.addCallback(_created)
322         # Now we upload some contents
323         d.addCallback(lambda n:
324             n.overwrite(MutableData("contents" * 50000)))
325         # ...then download contents
326         d.addCallback(lambda ignored:
327             self._node.download_best_version())
328         # ...and check to make sure everything went okay.
329         d.addCallback(lambda contents:
330             self.failUnlessEqual("contents" * 50000, contents))
331         return d
332
333     def test_max_shares_mdmf(self):
334         # Test how files behave when there are 255 shares.
335         self.nodemaker.default_encoding_parameters['n'] = 255
336         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
337         def _created(n):
338             self.failUnless(isinstance(n, MutableFileNode))
339             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
340             sb = self.nodemaker.storage_broker
341             num_shares = sum([len(self._storage._peers[x].keys()) for x \
342                               in sb.get_all_serverids()])
343             self.failUnlessEqual(num_shares, 255)
344             self._node = n
345             return n
346         d.addCallback(_created)
347         d.addCallback(lambda n:
348             n.overwrite(MutableData("contents" * 50000)))
349         d.addCallback(lambda ignored:
350             self._node.download_best_version())
351         d.addCallback(lambda contents:
352             self.failUnlessEqual(contents, "contents" * 50000))
353         return d
354
355     def test_mdmf_filenode_cap(self):
356         # Test that an MDMF filenode, once created, returns an MDMF URI.
357         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
358         def _created(n):
359             self.failUnless(isinstance(n, MutableFileNode))
360             cap = n.get_cap()
361             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
362             rcap = n.get_readcap()
363             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
364             vcap = n.get_verify_cap()
365             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
366         d.addCallback(_created)
367         return d
368
369
370     def test_create_from_mdmf_writecap(self):
371         # Test that the nodemaker is capable of creating an MDMF
372         # filenode given an MDMF cap.
373         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
374         def _created(n):
375             self.failUnless(isinstance(n, MutableFileNode))
376             s = n.get_uri()
377             self.failUnless(s.startswith("URI:MDMF"))
378             n2 = self.nodemaker.create_from_cap(s)
379             self.failUnless(isinstance(n2, MutableFileNode))
380             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
381             self.failUnlessEqual(n.get_uri(), n2.get_uri())
382         d.addCallback(_created)
383         return d
384
385
386     def test_create_from_mdmf_readcap(self):
387         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
388         def _created(n):
389             self.failUnless(isinstance(n, MutableFileNode))
390             s = n.get_readonly_uri()
391             n2 = self.nodemaker.create_from_cap(s)
392             self.failUnless(isinstance(n2, MutableFileNode))
393
394             # Check that it's a readonly node
395             self.failUnless(n2.is_readonly())
396         d.addCallback(_created)
397         return d
398
399
400     def test_internal_version_from_cap(self):
401         # MutableFileNodes and MutableFileVersions have an internal
402         # switch that tells them whether they're dealing with an SDMF or
403         # MDMF mutable file when they start doing stuff. We want to make
404         # sure that this is set appropriately given an MDMF cap.
405         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
406         def _created(n):
407             self.uri = n.get_uri()
408             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
409
410             n2 = self.nodemaker.create_from_cap(self.uri)
411             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
412         d.addCallback(_created)
413         return d
414
415
416     def test_serialize(self):
417         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
418         calls = []
419         def _callback(*args, **kwargs):
420             self.failUnlessEqual(args, (4,) )
421             self.failUnlessEqual(kwargs, {"foo": 5})
422             calls.append(1)
423             return 6
424         d = n._do_serialized(_callback, 4, foo=5)
425         def _check_callback(res):
426             self.failUnlessEqual(res, 6)
427             self.failUnlessEqual(calls, [1])
428         d.addCallback(_check_callback)
429
430         def _errback():
431             raise ValueError("heya")
432         d.addCallback(lambda res:
433                       self.shouldFail(ValueError, "_check_errback", "heya",
434                                       n._do_serialized, _errback))
435         return d
436
437     def test_upload_and_download(self):
438         d = self.nodemaker.create_mutable_file()
439         def _created(n):
440             d = defer.succeed(None)
441             d.addCallback(lambda res: n.get_servermap(MODE_READ))
442             d.addCallback(lambda smap: smap.dump(StringIO()))
443             d.addCallback(lambda sio:
444                           self.failUnless("3-of-10" in sio.getvalue()))
445             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
446             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
447             d.addCallback(lambda res: n.download_best_version())
448             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
449             d.addCallback(lambda res: n.get_size_of_best_version())
450             d.addCallback(lambda size:
451                           self.failUnlessEqual(size, len("contents 1")))
452             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
453             d.addCallback(lambda res: n.download_best_version())
454             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
455             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
456             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
457             d.addCallback(lambda res: n.download_best_version())
458             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
459             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
460             d.addCallback(lambda smap:
461                           n.download_version(smap,
462                                              smap.best_recoverable_version()))
463             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
464             # test a file that is large enough to overcome the
465             # mapupdate-to-retrieve data caching (i.e. make the shares larger
466             # than the default readsize, which is 2000 bytes). A 15kB file
467             # will have 5kB shares.
468             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
469             d.addCallback(lambda res: n.download_best_version())
470             d.addCallback(lambda res:
471                           self.failUnlessEqual(res, "large size file" * 1000))
472             return d
473         d.addCallback(_created)
474         return d
475
476
477     def test_upload_and_download_mdmf(self):
478         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
479         def _created(n):
480             d = defer.succeed(None)
481             d.addCallback(lambda ignored:
482                 n.get_servermap(MODE_READ))
483             def _then(servermap):
484                 dumped = servermap.dump(StringIO())
485                 self.failUnlessIn("3-of-10", dumped.getvalue())
486             d.addCallback(_then)
487             # Now overwrite the contents with some new contents. We want 
488             # to make them big enough to force the file to be uploaded
489             # in more than one segment.
490             big_contents = "contents1" * 100000 # about 900 KiB
491             big_contents_uploadable = MutableData(big_contents)
492             d.addCallback(lambda ignored:
493                 n.overwrite(big_contents_uploadable))
494             d.addCallback(lambda ignored:
495                 n.download_best_version())
496             d.addCallback(lambda data:
497                 self.failUnlessEqual(data, big_contents))
498             # Overwrite the contents again with some new contents. As
499             # before, they need to be big enough to force multiple
500             # segments, so that we make the downloader deal with
501             # multiple segments.
502             bigger_contents = "contents2" * 1000000 # about 9MiB 
503             bigger_contents_uploadable = MutableData(bigger_contents)
504             d.addCallback(lambda ignored:
505                 n.overwrite(bigger_contents_uploadable))
506             d.addCallback(lambda ignored:
507                 n.download_best_version())
508             d.addCallback(lambda data:
509                 self.failUnlessEqual(data, bigger_contents))
510             return d
511         d.addCallback(_created)
512         return d
513
514
515     def test_retrieve_producer_mdmf(self):
516         # We should make sure that the retriever is able to pause and stop
517         # correctly.
518         data = "contents1" * 100000
519         d = self.nodemaker.create_mutable_file(MutableData(data),
520                                                version=MDMF_VERSION)
521         d.addCallback(lambda node: node.get_best_mutable_version())
522         d.addCallback(self._test_retrieve_producer, "MDMF", data)
523         return d
524
525     # note: SDMF has only one big segment, so we can't use the usual
526     # after-the-first-write() trick to pause or stop the download.
527     # Disabled until we find a better approach.
528     def OFF_test_retrieve_producer_sdmf(self):
529         data = "contents1" * 100000
530         d = self.nodemaker.create_mutable_file(MutableData(data),
531                                                version=SDMF_VERSION)
532         d.addCallback(lambda node: node.get_best_mutable_version())
533         d.addCallback(self._test_retrieve_producer, "SDMF", data)
534         return d
535
536     def _test_retrieve_producer(self, version, kind, data):
537         # Now we'll retrieve it into a pausing consumer.
538         c = PausingConsumer()
539         d = version.read(c)
540         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
541
542         c2 = PausingAndStoppingConsumer()
543         d.addCallback(lambda ign:
544                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
545                                       "our Consumer called stopProducing()",
546                                       version.read, c2))
547
548         c3 = StoppingConsumer()
549         d.addCallback(lambda ign:
550                       self.shouldFail(DownloadStopped, kind+"_stop",
551                                       "our Consumer called stopProducing()",
552                                       version.read, c3))
553
554         c4 = ImmediatelyStoppingConsumer()
555         d.addCallback(lambda ign:
556                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
557                                       "our Consumer called stopProducing()",
558                                       version.read, c4))
559
560         def _then(ign):
561             c5 = MemoryConsumer()
562             d1 = version.read(c5)
563             c5.producer.stopProducing()
564             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
565                                    "our Consumer called stopProducing()",
566                                    lambda: d1)
567         d.addCallback(_then)
568         return d
569
570     def test_download_from_mdmf_cap(self):
571         # We should be able to download an MDMF file given its cap
572         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
573         def _created(node):
574             self.uri = node.get_uri()
575             # also confirm that the cap has no extension fields
576             pieces = self.uri.split(":")
577             self.failUnlessEqual(len(pieces), 4)
578
579             return node.overwrite(MutableData("contents1" * 100000))
580         def _then(ignored):
581             node = self.nodemaker.create_from_cap(self.uri)
582             return node.download_best_version()
583         def _downloaded(data):
584             self.failUnlessEqual(data, "contents1" * 100000)
585         d.addCallback(_created)
586         d.addCallback(_then)
587         d.addCallback(_downloaded)
588         return d
589
590
591     def test_mdmf_write_count(self):
592         # Publishing an MDMF file should only cause one write for each
593         # share that is to be published. Otherwise, we introduce
594         # undesirable semantics that are a regression from SDMF
595         upload = MutableData("MDMF" * 100000) # about 400 KiB
596         d = self.nodemaker.create_mutable_file(upload,
597                                                version=MDMF_VERSION)
598         def _check_server_write_counts(ignored):
599             sb = self.nodemaker.storage_broker
600             for server in sb.servers.itervalues():
601                 self.failUnlessEqual(server.get_rref().queries, 1)
602         d.addCallback(_check_server_write_counts)
603         return d
604
605
606     def test_create_with_initial_contents(self):
607         upload1 = MutableData("contents 1")
608         d = self.nodemaker.create_mutable_file(upload1)
609         def _created(n):
610             d = n.download_best_version()
611             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
612             upload2 = MutableData("contents 2")
613             d.addCallback(lambda res: n.overwrite(upload2))
614             d.addCallback(lambda res: n.download_best_version())
615             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
616             return d
617         d.addCallback(_created)
618         return d
619
620
621     def test_create_mdmf_with_initial_contents(self):
622         initial_contents = "foobarbaz" * 131072 # 900KiB
623         initial_contents_uploadable = MutableData(initial_contents)
624         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
625                                                version=MDMF_VERSION)
626         def _created(n):
627             d = n.download_best_version()
628             d.addCallback(lambda data:
629                 self.failUnlessEqual(data, initial_contents))
630             uploadable2 = MutableData(initial_contents + "foobarbaz")
631             d.addCallback(lambda ignored:
632                 n.overwrite(uploadable2))
633             d.addCallback(lambda ignored:
634                 n.download_best_version())
635             d.addCallback(lambda data:
636                 self.failUnlessEqual(data, initial_contents +
637                                            "foobarbaz"))
638             return d
639         d.addCallback(_created)
640         return d
641
642
643     def test_response_cache_memory_leak(self):
644         d = self.nodemaker.create_mutable_file("contents")
645         def _created(n):
646             d = n.download_best_version()
647             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
648             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
649
650             def _check_cache(expected):
651                 # The total size of cache entries should not increase on the second download;
652                 # in fact the cache contents should be identical.
653                 d2 = n.download_best_version()
654                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
655                 return d2
656             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
657             return d
658         d.addCallback(_created)
659         return d
660
661     def test_create_with_initial_contents_function(self):
662         data = "initial contents"
663         def _make_contents(n):
664             self.failUnless(isinstance(n, MutableFileNode))
665             key = n.get_writekey()
666             self.failUnless(isinstance(key, str), key)
667             self.failUnlessEqual(len(key), 16) # AES key size
668             return MutableData(data)
669         d = self.nodemaker.create_mutable_file(_make_contents)
670         def _created(n):
671             return n.download_best_version()
672         d.addCallback(_created)
673         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
674         return d
675
676
677     def test_create_mdmf_with_initial_contents_function(self):
678         data = "initial contents" * 100000
679         def _make_contents(n):
680             self.failUnless(isinstance(n, MutableFileNode))
681             key = n.get_writekey()
682             self.failUnless(isinstance(key, str), key)
683             self.failUnlessEqual(len(key), 16)
684             return MutableData(data)
685         d = self.nodemaker.create_mutable_file(_make_contents,
686                                                version=MDMF_VERSION)
687         d.addCallback(lambda n:
688             n.download_best_version())
689         d.addCallback(lambda data2:
690             self.failUnlessEqual(data2, data))
691         return d
692
693
694     def test_create_with_too_large_contents(self):
695         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
696         BIG_uploadable = MutableData(BIG)
697         d = self.nodemaker.create_mutable_file(BIG_uploadable)
698         def _created(n):
699             other_BIG_uploadable = MutableData(BIG)
700             d = n.overwrite(other_BIG_uploadable)
701             return d
702         d.addCallback(_created)
703         return d
704
705     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
706         d = n.get_servermap(MODE_READ)
707         d.addCallback(lambda servermap: servermap.best_recoverable_version())
708         d.addCallback(lambda verinfo:
709                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
710         return d
711
712     def test_modify(self):
713         def _modifier(old_contents, servermap, first_time):
714             new_contents = old_contents + "line2"
715             return new_contents
716         def _non_modifier(old_contents, servermap, first_time):
717             return old_contents
718         def _none_modifier(old_contents, servermap, first_time):
719             return None
720         def _error_modifier(old_contents, servermap, first_time):
721             raise ValueError("oops")
722         def _toobig_modifier(old_contents, servermap, first_time):
723             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
724             return new_content
725         calls = []
726         def _ucw_error_modifier(old_contents, servermap, first_time):
727             # simulate an UncoordinatedWriteError once
728             calls.append(1)
729             if len(calls) <= 1:
730                 raise UncoordinatedWriteError("simulated")
731             new_contents = old_contents + "line3"
732             return new_contents
733         def _ucw_error_non_modifier(old_contents, servermap, first_time):
734             # simulate an UncoordinatedWriteError once, and don't actually
735             # modify the contents on subsequent invocations
736             calls.append(1)
737             if len(calls) <= 1:
738                 raise UncoordinatedWriteError("simulated")
739             return old_contents
740
741         initial_contents = "line1"
742         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
743         def _created(n):
744             d = n.modify(_modifier)
745             d.addCallback(lambda res: n.download_best_version())
746             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
747             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
748
749             d.addCallback(lambda res: n.modify(_non_modifier))
750             d.addCallback(lambda res: n.download_best_version())
751             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
752             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
753
754             d.addCallback(lambda res: n.modify(_none_modifier))
755             d.addCallback(lambda res: n.download_best_version())
756             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
757             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
758
759             d.addCallback(lambda res:
760                           self.shouldFail(ValueError, "error_modifier", None,
761                                           n.modify, _error_modifier))
762             d.addCallback(lambda res: n.download_best_version())
763             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
764             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
765
766
767             d.addCallback(lambda res: n.download_best_version())
768             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
769             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
770
771             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
772             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
773             d.addCallback(lambda res: n.download_best_version())
774             d.addCallback(lambda res: self.failUnlessEqual(res,
775                                                            "line1line2line3"))
776             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
777
778             def _reset_ucw_error_modifier(res):
779                 calls[:] = []
780                 return res
781             d.addCallback(_reset_ucw_error_modifier)
782
783             # in practice, this n.modify call should publish twice: the first
784             # one gets a UCWE, the second does not. But our test jig (in
785             # which the modifier raises the UCWE) skips over the first one,
786             # so in this test there will be only one publish, and the seqnum
787             # will only be one larger than the previous test, not two (i.e. 4
788             # instead of 5).
789             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
790             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
791             d.addCallback(lambda res: n.download_best_version())
792             d.addCallback(lambda res: self.failUnlessEqual(res,
793                                                            "line1line2line3"))
794             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
795             d.addCallback(lambda res: n.modify(_toobig_modifier))
796             return d
797         d.addCallback(_created)
798         return d
799
800
801     def test_modify_backoffer(self):
802         def _modifier(old_contents, servermap, first_time):
803             return old_contents + "line2"
804         calls = []
805         def _ucw_error_modifier(old_contents, servermap, first_time):
806             # simulate an UncoordinatedWriteError once
807             calls.append(1)
808             if len(calls) <= 1:
809                 raise UncoordinatedWriteError("simulated")
810             return old_contents + "line3"
811         def _always_ucw_error_modifier(old_contents, servermap, first_time):
812             raise UncoordinatedWriteError("simulated")
813         def _backoff_stopper(node, f):
814             return f
815         def _backoff_pauser(node, f):
816             d = defer.Deferred()
817             reactor.callLater(0.5, d.callback, None)
818             return d
819
820         # the give-up-er will hit its maximum retry count quickly
821         giveuper = BackoffAgent()
822         giveuper._delay = 0.1
823         giveuper.factor = 1
824
825         d = self.nodemaker.create_mutable_file(MutableData("line1"))
826         def _created(n):
827             d = n.modify(_modifier)
828             d.addCallback(lambda res: n.download_best_version())
829             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
830             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
831
832             d.addCallback(lambda res:
833                           self.shouldFail(UncoordinatedWriteError,
834                                           "_backoff_stopper", None,
835                                           n.modify, _ucw_error_modifier,
836                                           _backoff_stopper))
837             d.addCallback(lambda res: n.download_best_version())
838             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
839             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
840
841             def _reset_ucw_error_modifier(res):
842                 calls[:] = []
843                 return res
844             d.addCallback(_reset_ucw_error_modifier)
845             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
846                                                _backoff_pauser))
847             d.addCallback(lambda res: n.download_best_version())
848             d.addCallback(lambda res: self.failUnlessEqual(res,
849                                                            "line1line2line3"))
850             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
851
852             d.addCallback(lambda res:
853                           self.shouldFail(UncoordinatedWriteError,
854                                           "giveuper", None,
855                                           n.modify, _always_ucw_error_modifier,
856                                           giveuper.delay))
857             d.addCallback(lambda res: n.download_best_version())
858             d.addCallback(lambda res: self.failUnlessEqual(res,
859                                                            "line1line2line3"))
860             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
861
862             return d
863         d.addCallback(_created)
864         return d
865
866     def test_upload_and_download_full_size_keys(self):
867         self.nodemaker.key_generator = client.KeyGenerator()
868         d = self.nodemaker.create_mutable_file()
869         def _created(n):
870             d = defer.succeed(None)
871             d.addCallback(lambda res: n.get_servermap(MODE_READ))
872             d.addCallback(lambda smap: smap.dump(StringIO()))
873             d.addCallback(lambda sio:
874                           self.failUnless("3-of-10" in sio.getvalue()))
875             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
876             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
877             d.addCallback(lambda res: n.download_best_version())
878             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
879             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
880             d.addCallback(lambda res: n.download_best_version())
881             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
882             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
883             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
884             d.addCallback(lambda res: n.download_best_version())
885             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
886             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
887             d.addCallback(lambda smap:
888                           n.download_version(smap,
889                                              smap.best_recoverable_version()))
890             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
891             return d
892         d.addCallback(_created)
893         return d
894
895
896     def test_size_after_servermap_update(self):
897         # a mutable file node should have something to say about how big
898         # it is after a servermap update is performed, since this tells
899         # us how large the best version of that mutable file is.
900         d = self.nodemaker.create_mutable_file()
901         def _created(n):
902             self.n = n
903             return n.get_servermap(MODE_READ)
904         d.addCallback(_created)
905         d.addCallback(lambda ignored:
906             self.failUnlessEqual(self.n.get_size(), 0))
907         d.addCallback(lambda ignored:
908             self.n.overwrite(MutableData("foobarbaz")))
909         d.addCallback(lambda ignored:
910             self.failUnlessEqual(self.n.get_size(), 9))
911         d.addCallback(lambda ignored:
912             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
913         d.addCallback(_created)
914         d.addCallback(lambda ignored:
915             self.failUnlessEqual(self.n.get_size(), 9))
916         return d
917
918
919 class PublishMixin:
920     def publish_one(self):
921         # publish a file and create shares, which can then be manipulated
922         # later.
923         self.CONTENTS = "New contents go here" * 1000
924         self.uploadable = MutableData(self.CONTENTS)
925         self._storage = FakeStorage()
926         self._nodemaker = make_nodemaker(self._storage)
927         self._storage_broker = self._nodemaker.storage_broker
928         d = self._nodemaker.create_mutable_file(self.uploadable)
929         def _created(node):
930             self._fn = node
931             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
932         d.addCallback(_created)
933         return d
934
935     def publish_mdmf(self):
936         # like publish_one, except that the result is guaranteed to be
937         # an MDMF file.
938         # self.CONTENTS should have more than one segment.
939         self.CONTENTS = "This is an MDMF file" * 100000
940         self.uploadable = MutableData(self.CONTENTS)
941         self._storage = FakeStorage()
942         self._nodemaker = make_nodemaker(self._storage)
943         self._storage_broker = self._nodemaker.storage_broker
944         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
945         def _created(node):
946             self._fn = node
947             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
948         d.addCallback(_created)
949         return d
950
951
952     def publish_sdmf(self):
953         # like publish_one, except that the result is guaranteed to be
954         # an SDMF file
955         self.CONTENTS = "This is an SDMF file" * 1000
956         self.uploadable = MutableData(self.CONTENTS)
957         self._storage = FakeStorage()
958         self._nodemaker = make_nodemaker(self._storage)
959         self._storage_broker = self._nodemaker.storage_broker
960         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
961         def _created(node):
962             self._fn = node
963             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
964         d.addCallback(_created)
965         return d
966
967     def publish_empty_sdmf(self):
968         self.CONTENTS = ""
969         self.uploadable = MutableData(self.CONTENTS)
970         self._storage = FakeStorage()
971         self._nodemaker = make_nodemaker(self._storage, keysize=None)
972         self._storage_broker = self._nodemaker.storage_broker
973         d = self._nodemaker.create_mutable_file(self.uploadable,
974                                                 version=SDMF_VERSION)
975         def _created(node):
976             self._fn = node
977             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
978         d.addCallback(_created)
979         return d
980
981
982     def publish_multiple(self, version=0):
983         self.CONTENTS = ["Contents 0",
984                          "Contents 1",
985                          "Contents 2",
986                          "Contents 3a",
987                          "Contents 3b"]
988         self.uploadables = [MutableData(d) for d in self.CONTENTS]
989         self._copied_shares = {}
990         self._storage = FakeStorage()
991         self._nodemaker = make_nodemaker(self._storage)
992         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
993         def _created(node):
994             self._fn = node
995             # now create multiple versions of the same file, and accumulate
996             # their shares, so we can mix and match them later.
997             d = defer.succeed(None)
998             d.addCallback(self._copy_shares, 0)
999             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1000             d.addCallback(self._copy_shares, 1)
1001             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1002             d.addCallback(self._copy_shares, 2)
1003             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1004             d.addCallback(self._copy_shares, 3)
1005             # now we replace all the shares with version s3, and upload a new
1006             # version to get s4b.
1007             rollback = dict([(i,2) for i in range(10)])
1008             d.addCallback(lambda res: self._set_versions(rollback))
1009             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1010             d.addCallback(self._copy_shares, 4)
1011             # we leave the storage in state 4
1012             return d
1013         d.addCallback(_created)
1014         return d
1015
1016
1017     def _copy_shares(self, ignored, index):
1018         shares = self._storage._peers
1019         # we need a deep copy
1020         new_shares = {}
1021         for peerid in shares:
1022             new_shares[peerid] = {}
1023             for shnum in shares[peerid]:
1024                 new_shares[peerid][shnum] = shares[peerid][shnum]
1025         self._copied_shares[index] = new_shares
1026
1027     def _set_versions(self, versionmap):
1028         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1029         # share to be at. Any shnum which is left out of the map will stay at
1030         # its current version.
1031         shares = self._storage._peers
1032         oldshares = self._copied_shares
1033         for peerid in shares:
1034             for shnum in shares[peerid]:
1035                 if shnum in versionmap:
1036                     index = versionmap[shnum]
1037                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1038
1039 class Servermap(unittest.TestCase, PublishMixin):
1040     def setUp(self):
1041         return self.publish_one()
1042
1043     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1044                        update_range=None):
1045         if fn is None:
1046             fn = self._fn
1047         if sb is None:
1048             sb = self._storage_broker
1049         smu = ServermapUpdater(fn, sb, Monitor(),
1050                                ServerMap(), mode, update_range=update_range)
1051         d = smu.update()
1052         return d
1053
1054     def update_servermap(self, oldmap, mode=MODE_CHECK):
1055         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1056                                oldmap, mode)
1057         d = smu.update()
1058         return d
1059
1060     def failUnlessOneRecoverable(self, sm, num_shares):
1061         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1062         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1063         best = sm.best_recoverable_version()
1064         self.failIfEqual(best, None)
1065         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1066         self.failUnlessEqual(len(sm.shares_available()), 1)
1067         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1068         shnum, servers = sm.make_sharemap().items()[0]
1069         server = list(servers)[0]
1070         self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1071         self.failUnlessEqual(sm.version_on_server(server, 666), None)
1072         return sm
1073
1074     def test_basic(self):
1075         d = defer.succeed(None)
1076         ms = self.make_servermap
1077         us = self.update_servermap
1078
1079         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1080         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1081         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1082         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1083         d.addCallback(lambda res: ms(mode=MODE_READ))
1084         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1085         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1086         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1087         # this mode stops at 'k' shares
1088         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1089
1090         # and can we re-use the same servermap? Note that these are sorted in
1091         # increasing order of number of servers queried, since once a server
1092         # gets into the servermap, we'll always ask it for an update.
1093         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1094         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1095         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1096         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1097         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1098         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1099         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1100         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1101         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1102
1103         return d
1104
1105     def test_fetch_privkey(self):
1106         d = defer.succeed(None)
1107         # use the sibling filenode (which hasn't been used yet), and make
1108         # sure it can fetch the privkey. The file is small, so the privkey
1109         # will be fetched on the first (query) pass.
1110         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1111         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1112
1113         # create a new file, which is large enough to knock the privkey out
1114         # of the early part of the file
1115         LARGE = "These are Larger contents" * 200 # about 5KB
1116         LARGE_uploadable = MutableData(LARGE)
1117         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1118         def _created(large_fn):
1119             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1120             return self.make_servermap(MODE_WRITE, large_fn2)
1121         d.addCallback(_created)
1122         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1123         return d
1124
1125
1126     def test_mark_bad(self):
1127         d = defer.succeed(None)
1128         ms = self.make_servermap
1129
1130         d.addCallback(lambda res: ms(mode=MODE_READ))
1131         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1132         def _made_map(sm):
1133             v = sm.best_recoverable_version()
1134             vm = sm.make_versionmap()
1135             shares = list(vm[v])
1136             self.failUnlessEqual(len(shares), 6)
1137             self._corrupted = set()
1138             # mark the first 5 shares as corrupt, then update the servermap.
1139             # The map should not have the marked shares it in any more, and
1140             # new shares should be found to replace the missing ones.
1141             for (shnum, server, timestamp) in shares:
1142                 if shnum < 5:
1143                     self._corrupted.add( (server, shnum) )
1144                     sm.mark_bad_share(server, shnum, "")
1145             return self.update_servermap(sm, MODE_WRITE)
1146         d.addCallback(_made_map)
1147         def _check_map(sm):
1148             # this should find all 5 shares that weren't marked bad
1149             v = sm.best_recoverable_version()
1150             vm = sm.make_versionmap()
1151             shares = list(vm[v])
1152             for (server, shnum) in self._corrupted:
1153                 server_shares = sm.debug_shares_on_server(server)
1154                 self.failIf(shnum in server_shares,
1155                             "%d was in %s" % (shnum, server_shares))
1156             self.failUnlessEqual(len(shares), 5)
1157         d.addCallback(_check_map)
1158         return d
1159
1160     def failUnlessNoneRecoverable(self, sm):
1161         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1162         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1163         best = sm.best_recoverable_version()
1164         self.failUnlessEqual(best, None)
1165         self.failUnlessEqual(len(sm.shares_available()), 0)
1166
1167     def test_no_shares(self):
1168         self._storage._peers = {} # delete all shares
1169         ms = self.make_servermap
1170         d = defer.succeed(None)
1171 #
1172         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1173         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1174
1175         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1176         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1177
1178         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1179         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1180
1181         d.addCallback(lambda res: ms(mode=MODE_READ))
1182         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1183
1184         return d
1185
1186     def failUnlessNotQuiteEnough(self, sm):
1187         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1188         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1189         best = sm.best_recoverable_version()
1190         self.failUnlessEqual(best, None)
1191         self.failUnlessEqual(len(sm.shares_available()), 1)
1192         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1193         return sm
1194
1195     def test_not_quite_enough_shares(self):
1196         s = self._storage
1197         ms = self.make_servermap
1198         num_shares = len(s._peers)
1199         for peerid in s._peers:
1200             s._peers[peerid] = {}
1201             num_shares -= 1
1202             if num_shares == 2:
1203                 break
1204         # now there ought to be only two shares left
1205         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1206
1207         d = defer.succeed(None)
1208
1209         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1210         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1211         d.addCallback(lambda sm:
1212                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1213         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1214         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1215         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1216         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1217         d.addCallback(lambda res: ms(mode=MODE_READ))
1218         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1219
1220         return d
1221
1222
1223     def test_servermapupdater_finds_mdmf_files(self):
1224         # setUp already published an MDMF file for us. We just need to
1225         # make sure that when we run the ServermapUpdater, the file is
1226         # reported to have one recoverable version.
1227         d = defer.succeed(None)
1228         d.addCallback(lambda ignored:
1229             self.publish_mdmf())
1230         d.addCallback(lambda ignored:
1231             self.make_servermap(mode=MODE_CHECK))
1232         # Calling make_servermap also updates the servermap in the mode
1233         # that we specify, so we just need to see what it says.
1234         def _check_servermap(sm):
1235             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1236         d.addCallback(_check_servermap)
1237         return d
1238
1239
1240     def test_fetch_update(self):
1241         d = defer.succeed(None)
1242         d.addCallback(lambda ignored:
1243             self.publish_mdmf())
1244         d.addCallback(lambda ignored:
1245             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1246         def _check_servermap(sm):
1247             # 10 shares
1248             self.failUnlessEqual(len(sm.update_data), 10)
1249             # one version
1250             for data in sm.update_data.itervalues():
1251                 self.failUnlessEqual(len(data), 1)
1252         d.addCallback(_check_servermap)
1253         return d
1254
1255
1256     def test_servermapupdater_finds_sdmf_files(self):
1257         d = defer.succeed(None)
1258         d.addCallback(lambda ignored:
1259             self.publish_sdmf())
1260         d.addCallback(lambda ignored:
1261             self.make_servermap(mode=MODE_CHECK))
1262         d.addCallback(lambda servermap:
1263             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1264         return d
1265
1266
1267 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1268     def setUp(self):
1269         return self.publish_one()
1270
1271     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1272         if oldmap is None:
1273             oldmap = ServerMap()
1274         if sb is None:
1275             sb = self._storage_broker
1276         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1277         d = smu.update()
1278         return d
1279
1280     def abbrev_verinfo(self, verinfo):
1281         if verinfo is None:
1282             return None
1283         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1284          offsets_tuple) = verinfo
1285         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1286
1287     def abbrev_verinfo_dict(self, verinfo_d):
1288         output = {}
1289         for verinfo,value in verinfo_d.items():
1290             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1291              offsets_tuple) = verinfo
1292             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1293         return output
1294
1295     def dump_servermap(self, servermap):
1296         print "SERVERMAP", servermap
1297         print "RECOVERABLE", [self.abbrev_verinfo(v)
1298                               for v in servermap.recoverable_versions()]
1299         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1300         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1301
1302     def do_download(self, servermap, version=None):
1303         if version is None:
1304             version = servermap.best_recoverable_version()
1305         r = Retrieve(self._fn, self._storage_broker, servermap, version)
1306         c = consumer.MemoryConsumer()
1307         d = r.download(consumer=c)
1308         d.addCallback(lambda mc: "".join(mc.chunks))
1309         return d
1310
1311
1312     def test_basic(self):
1313         d = self.make_servermap()
1314         def _do_retrieve(servermap):
1315             self._smap = servermap
1316             #self.dump_servermap(servermap)
1317             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1318             return self.do_download(servermap)
1319         d.addCallback(_do_retrieve)
1320         def _retrieved(new_contents):
1321             self.failUnlessEqual(new_contents, self.CONTENTS)
1322         d.addCallback(_retrieved)
1323         # we should be able to re-use the same servermap, both with and
1324         # without updating it.
1325         d.addCallback(lambda res: self.do_download(self._smap))
1326         d.addCallback(_retrieved)
1327         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1328         d.addCallback(lambda res: self.do_download(self._smap))
1329         d.addCallback(_retrieved)
1330         # clobbering the pubkey should make the servermap updater re-fetch it
1331         def _clobber_pubkey(res):
1332             self._fn._pubkey = None
1333         d.addCallback(_clobber_pubkey)
1334         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1335         d.addCallback(lambda res: self.do_download(self._smap))
1336         d.addCallback(_retrieved)
1337         return d
1338
1339     def test_all_shares_vanished(self):
1340         d = self.make_servermap()
1341         def _remove_shares(servermap):
1342             for shares in self._storage._peers.values():
1343                 shares.clear()
1344             d1 = self.shouldFail(NotEnoughSharesError,
1345                                  "test_all_shares_vanished",
1346                                  "ran out of servers",
1347                                  self.do_download, servermap)
1348             return d1
1349         d.addCallback(_remove_shares)
1350         return d
1351
1352     def test_no_servers(self):
1353         sb2 = make_storagebroker(num_peers=0)
1354         # if there are no servers, then a MODE_READ servermap should come
1355         # back empty
1356         d = self.make_servermap(sb=sb2)
1357         def _check_servermap(servermap):
1358             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1359             self.failIf(servermap.recoverable_versions())
1360             self.failIf(servermap.unrecoverable_versions())
1361             self.failIf(servermap.all_servers())
1362         d.addCallback(_check_servermap)
1363         return d
1364
1365     def test_no_servers_download(self):
1366         sb2 = make_storagebroker(num_peers=0)
1367         self._fn._storage_broker = sb2
1368         d = self.shouldFail(UnrecoverableFileError,
1369                             "test_no_servers_download",
1370                             "no recoverable versions",
1371                             self._fn.download_best_version)
1372         def _restore(res):
1373             # a failed download that occurs while we aren't connected to
1374             # anybody should not prevent a subsequent download from working.
1375             # This isn't quite the webapi-driven test that #463 wants, but it
1376             # should be close enough.
1377             self._fn._storage_broker = self._storage_broker
1378             return self._fn.download_best_version()
1379         def _retrieved(new_contents):
1380             self.failUnlessEqual(new_contents, self.CONTENTS)
1381         d.addCallback(_restore)
1382         d.addCallback(_retrieved)
1383         return d
1384
1385
1386     def _test_corrupt_all(self, offset, substring,
1387                           should_succeed=False,
1388                           corrupt_early=True,
1389                           failure_checker=None,
1390                           fetch_privkey=False):
1391         d = defer.succeed(None)
1392         if corrupt_early:
1393             d.addCallback(corrupt, self._storage, offset)
1394         d.addCallback(lambda res: self.make_servermap())
1395         if not corrupt_early:
1396             d.addCallback(corrupt, self._storage, offset)
1397         def _do_retrieve(servermap):
1398             ver = servermap.best_recoverable_version()
1399             if ver is None and not should_succeed:
1400                 # no recoverable versions == not succeeding. The problem
1401                 # should be noted in the servermap's list of problems.
1402                 if substring:
1403                     allproblems = [str(f) for f in servermap.get_problems()]
1404                     self.failUnlessIn(substring, "".join(allproblems))
1405                 return servermap
1406             if should_succeed:
1407                 d1 = self._fn.download_version(servermap, ver,
1408                                                fetch_privkey)
1409                 d1.addCallback(lambda new_contents:
1410                                self.failUnlessEqual(new_contents, self.CONTENTS))
1411             else:
1412                 d1 = self.shouldFail(NotEnoughSharesError,
1413                                      "_corrupt_all(offset=%s)" % (offset,),
1414                                      substring,
1415                                      self._fn.download_version, servermap,
1416                                                                 ver,
1417                                                                 fetch_privkey)
1418             if failure_checker:
1419                 d1.addCallback(failure_checker)
1420             d1.addCallback(lambda res: servermap)
1421             return d1
1422         d.addCallback(_do_retrieve)
1423         return d
1424
1425     def test_corrupt_all_verbyte(self):
1426         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1427         # error in unpack_share().
1428         d = self._test_corrupt_all(0, "UnknownVersionError")
1429         def _check_servermap(servermap):
1430             # and the dump should mention the problems
1431             s = StringIO()
1432             dump = servermap.dump(s).getvalue()
1433             self.failUnless("30 PROBLEMS" in dump, dump)
1434         d.addCallback(_check_servermap)
1435         return d
1436
1437     def test_corrupt_all_seqnum(self):
1438         # a corrupt sequence number will trigger a bad signature
1439         return self._test_corrupt_all(1, "signature is invalid")
1440
1441     def test_corrupt_all_R(self):
1442         # a corrupt root hash will trigger a bad signature
1443         return self._test_corrupt_all(9, "signature is invalid")
1444
1445     def test_corrupt_all_IV(self):
1446         # a corrupt salt/IV will trigger a bad signature
1447         return self._test_corrupt_all(41, "signature is invalid")
1448
1449     def test_corrupt_all_k(self):
1450         # a corrupt 'k' will trigger a bad signature
1451         return self._test_corrupt_all(57, "signature is invalid")
1452
1453     def test_corrupt_all_N(self):
1454         # a corrupt 'N' will trigger a bad signature
1455         return self._test_corrupt_all(58, "signature is invalid")
1456
1457     def test_corrupt_all_segsize(self):
1458         # a corrupt segsize will trigger a bad signature
1459         return self._test_corrupt_all(59, "signature is invalid")
1460
1461     def test_corrupt_all_datalen(self):
1462         # a corrupt data length will trigger a bad signature
1463         return self._test_corrupt_all(67, "signature is invalid")
1464
1465     def test_corrupt_all_pubkey(self):
1466         # a corrupt pubkey won't match the URI's fingerprint. We need to
1467         # remove the pubkey from the filenode, or else it won't bother trying
1468         # to update it.
1469         self._fn._pubkey = None
1470         return self._test_corrupt_all("pubkey",
1471                                       "pubkey doesn't match fingerprint")
1472
1473     def test_corrupt_all_sig(self):
1474         # a corrupt signature is a bad one
1475         # the signature runs from about [543:799], depending upon the length
1476         # of the pubkey
1477         return self._test_corrupt_all("signature", "signature is invalid")
1478
1479     def test_corrupt_all_share_hash_chain_number(self):
1480         # a corrupt share hash chain entry will show up as a bad hash. If we
1481         # mangle the first byte, that will look like a bad hash number,
1482         # causing an IndexError
1483         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1484
1485     def test_corrupt_all_share_hash_chain_hash(self):
1486         # a corrupt share hash chain entry will show up as a bad hash. If we
1487         # mangle a few bytes in, that will look like a bad hash.
1488         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1489
1490     def test_corrupt_all_block_hash_tree(self):
1491         return self._test_corrupt_all("block_hash_tree",
1492                                       "block hash tree failure")
1493
1494     def test_corrupt_all_block(self):
1495         return self._test_corrupt_all("share_data", "block hash tree failure")
1496
1497     def test_corrupt_all_encprivkey(self):
1498         # a corrupted privkey won't even be noticed by the reader, only by a
1499         # writer.
1500         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1501
1502
1503     def test_corrupt_all_encprivkey_late(self):
1504         # this should work for the same reason as above, but we corrupt 
1505         # after the servermap update to exercise the error handling
1506         # code.
1507         # We need to remove the privkey from the node, or the retrieve
1508         # process won't know to update it.
1509         self._fn._privkey = None
1510         return self._test_corrupt_all("enc_privkey",
1511                                       None, # this shouldn't fail
1512                                       should_succeed=True,
1513                                       corrupt_early=False,
1514                                       fetch_privkey=True)
1515
1516
1517     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1518     # just use a .todo because the failing-but-ignored test emits about 30kB
1519     # of noise.
1520     def OFF_test_corrupt_all_seqnum_late(self):
1521         # corrupting the seqnum between mapupdate and retrieve should result
1522         # in NotEnoughSharesError, since each share will look invalid
1523         def _check(res):
1524             f = res[0]
1525             self.failUnless(f.check(NotEnoughSharesError))
1526             self.failUnless("uncoordinated write" in str(f))
1527         return self._test_corrupt_all(1, "ran out of servers",
1528                                       corrupt_early=False,
1529                                       failure_checker=_check)
1530
1531     def test_corrupt_all_block_hash_tree_late(self):
1532         def _check(res):
1533             f = res[0]
1534             self.failUnless(f.check(NotEnoughSharesError))
1535         return self._test_corrupt_all("block_hash_tree",
1536                                       "block hash tree failure",
1537                                       corrupt_early=False,
1538                                       failure_checker=_check)
1539
1540
1541     def test_corrupt_all_block_late(self):
1542         def _check(res):
1543             f = res[0]
1544             self.failUnless(f.check(NotEnoughSharesError))
1545         return self._test_corrupt_all("share_data", "block hash tree failure",
1546                                       corrupt_early=False,
1547                                       failure_checker=_check)
1548
1549
1550     def test_basic_pubkey_at_end(self):
1551         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1552         # download to succeed but forcing a bunch of retries first. Note that
1553         # this is rather pessimistic: our Retrieve process will throw away
1554         # the whole share if the pubkey is bad, even though the rest of the
1555         # share might be good.
1556
1557         self._fn._pubkey = None
1558         k = self._fn.get_required_shares()
1559         N = self._fn.get_total_shares()
1560         d = defer.succeed(None)
1561         d.addCallback(corrupt, self._storage, "pubkey",
1562                       shnums_to_corrupt=range(0, N-k))
1563         d.addCallback(lambda res: self.make_servermap())
1564         def _do_retrieve(servermap):
1565             self.failUnless(servermap.get_problems())
1566             self.failUnless("pubkey doesn't match fingerprint"
1567                             in str(servermap.get_problems()[0]))
1568             ver = servermap.best_recoverable_version()
1569             r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1570             c = consumer.MemoryConsumer()
1571             return r.download(c)
1572         d.addCallback(_do_retrieve)
1573         d.addCallback(lambda mc: "".join(mc.chunks))
1574         d.addCallback(lambda new_contents:
1575                       self.failUnlessEqual(new_contents, self.CONTENTS))
1576         return d
1577
1578
1579     def _test_corrupt_some(self, offset, mdmf=False):
1580         if mdmf:
1581             d = self.publish_mdmf()
1582         else:
1583             d = defer.succeed(None)
1584         d.addCallback(lambda ignored:
1585             corrupt(None, self._storage, offset, range(5)))
1586         d.addCallback(lambda ignored:
1587             self.make_servermap())
1588         def _do_retrieve(servermap):
1589             ver = servermap.best_recoverable_version()
1590             self.failUnless(ver)
1591             return self._fn.download_best_version()
1592         d.addCallback(_do_retrieve)
1593         d.addCallback(lambda new_contents:
1594             self.failUnlessEqual(new_contents, self.CONTENTS))
1595         return d
1596
1597
1598     def test_corrupt_some(self):
1599         # corrupt the data of first five shares (so the servermap thinks
1600         # they're good but retrieve marks them as bad), so that the
1601         # MODE_READ set of 6 will be insufficient, forcing node.download to
1602         # retry with more servers.
1603         return self._test_corrupt_some("share_data")
1604
1605
1606     def test_download_fails(self):
1607         d = corrupt(None, self._storage, "signature")
1608         d.addCallback(lambda ignored:
1609             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1610                             "no recoverable versions",
1611                             self._fn.download_best_version))
1612         return d
1613
1614
1615
1616     def test_corrupt_mdmf_block_hash_tree(self):
1617         d = self.publish_mdmf()
1618         d.addCallback(lambda ignored:
1619             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1620                                    "block hash tree failure",
1621                                    corrupt_early=False,
1622                                    should_succeed=False))
1623         return d
1624
1625
1626     def test_corrupt_mdmf_block_hash_tree_late(self):
1627         d = self.publish_mdmf()
1628         d.addCallback(lambda ignored:
1629             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1630                                    "block hash tree failure",
1631                                    corrupt_early=True,
1632                                    should_succeed=False))
1633         return d
1634
1635
1636     def test_corrupt_mdmf_share_data(self):
1637         d = self.publish_mdmf()
1638         d.addCallback(lambda ignored:
1639             # TODO: Find out what the block size is and corrupt a
1640             # specific block, rather than just guessing.
1641             self._test_corrupt_all(("share_data", 12 * 40),
1642                                     "block hash tree failure",
1643                                     corrupt_early=True,
1644                                     should_succeed=False))
1645         return d
1646
1647
1648     def test_corrupt_some_mdmf(self):
1649         return self._test_corrupt_some(("share_data", 12 * 40),
1650                                        mdmf=True)
1651
1652
1653 class CheckerMixin:
1654     def check_good(self, r, where):
1655         self.failUnless(r.is_healthy(), where)
1656         return r
1657
1658     def check_bad(self, r, where):
1659         self.failIf(r.is_healthy(), where)
1660         return r
1661
1662     def check_expected_failure(self, r, expected_exception, substring, where):
1663         for (peerid, storage_index, shnum, f) in r.get_share_problems():
1664             if f.check(expected_exception):
1665                 self.failUnless(substring in str(f),
1666                                 "%s: substring '%s' not in '%s'" %
1667                                 (where, substring, str(f)))
1668                 return
1669         self.fail("%s: didn't see expected exception %s in problems %s" %
1670                   (where, expected_exception, r.get_share_problems()))
1671
1672
1673 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1674     def setUp(self):
1675         return self.publish_one()
1676
1677
1678     def test_check_good(self):
1679         d = self._fn.check(Monitor())
1680         d.addCallback(self.check_good, "test_check_good")
1681         return d
1682
1683     def test_check_mdmf_good(self):
1684         d = self.publish_mdmf()
1685         d.addCallback(lambda ignored:
1686             self._fn.check(Monitor()))
1687         d.addCallback(self.check_good, "test_check_mdmf_good")
1688         return d
1689
1690     def test_check_no_shares(self):
1691         for shares in self._storage._peers.values():
1692             shares.clear()
1693         d = self._fn.check(Monitor())
1694         d.addCallback(self.check_bad, "test_check_no_shares")
1695         return d
1696
1697     def test_check_mdmf_no_shares(self):
1698         d = self.publish_mdmf()
1699         def _then(ignored):
1700             for share in self._storage._peers.values():
1701                 share.clear()
1702         d.addCallback(_then)
1703         d.addCallback(lambda ignored:
1704             self._fn.check(Monitor()))
1705         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1706         return d
1707
1708     def test_check_not_enough_shares(self):
1709         for shares in self._storage._peers.values():
1710             for shnum in shares.keys():
1711                 if shnum > 0:
1712                     del shares[shnum]
1713         d = self._fn.check(Monitor())
1714         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1715         return d
1716
1717     def test_check_mdmf_not_enough_shares(self):
1718         d = self.publish_mdmf()
1719         def _then(ignored):
1720             for shares in self._storage._peers.values():
1721                 for shnum in shares.keys():
1722                     if shnum > 0:
1723                         del shares[shnum]
1724         d.addCallback(_then)
1725         d.addCallback(lambda ignored:
1726             self._fn.check(Monitor()))
1727         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1728         return d
1729
1730
1731     def test_check_all_bad_sig(self):
1732         d = corrupt(None, self._storage, 1) # bad sig
1733         d.addCallback(lambda ignored:
1734             self._fn.check(Monitor()))
1735         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1736         return d
1737
1738     def test_check_mdmf_all_bad_sig(self):
1739         d = self.publish_mdmf()
1740         d.addCallback(lambda ignored:
1741             corrupt(None, self._storage, 1))
1742         d.addCallback(lambda ignored:
1743             self._fn.check(Monitor()))
1744         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1745         return d
1746
1747     def test_verify_mdmf_all_bad_sharedata(self):
1748         d = self.publish_mdmf()
1749         # On 8 of the shares, corrupt the beginning of the share data.
1750         # The signature check during the servermap update won't catch this.
1751         d.addCallback(lambda ignored:
1752             corrupt(None, self._storage, "share_data", range(8)))
1753         # On 2 of the shares, corrupt the end of the share data.
1754         # The signature check during the servermap update won't catch
1755         # this either, and the retrieval process will have to process
1756         # all of the segments before it notices.
1757         d.addCallback(lambda ignored:
1758             # the block hash tree comes right after the share data, so if we
1759             # corrupt a little before the block hash tree, we'll corrupt in the
1760             # last block of each share.
1761             corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1762         d.addCallback(lambda ignored:
1763             self._fn.check(Monitor(), verify=True))
1764         # The verifier should flag the file as unhealthy, and should
1765         # list all 10 shares as bad.
1766         d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1767         def _check_num_bad(r):
1768             self.failIf(r.is_recoverable())
1769             smap = r.get_servermap()
1770             self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1771         d.addCallback(_check_num_bad)
1772         return d
1773
1774     def test_check_all_bad_blocks(self):
1775         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1776         # the Checker won't notice this.. it doesn't look at actual data
1777         d.addCallback(lambda ignored:
1778             self._fn.check(Monitor()))
1779         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1780         return d
1781
1782
1783     def test_check_mdmf_all_bad_blocks(self):
1784         d = self.publish_mdmf()
1785         d.addCallback(lambda ignored:
1786             corrupt(None, self._storage, "share_data"))
1787         d.addCallback(lambda ignored:
1788             self._fn.check(Monitor()))
1789         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1790         return d
1791
1792     def test_verify_good(self):
1793         d = self._fn.check(Monitor(), verify=True)
1794         d.addCallback(self.check_good, "test_verify_good")
1795         return d
1796
1797     def test_verify_all_bad_sig(self):
1798         d = corrupt(None, self._storage, 1) # bad sig
1799         d.addCallback(lambda ignored:
1800             self._fn.check(Monitor(), verify=True))
1801         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1802         return d
1803
1804     def test_verify_one_bad_sig(self):
1805         d = corrupt(None, self._storage, 1, [9]) # bad sig
1806         d.addCallback(lambda ignored:
1807             self._fn.check(Monitor(), verify=True))
1808         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1809         return d
1810
1811     def test_verify_one_bad_block(self):
1812         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1813         # the Verifier *will* notice this, since it examines every byte
1814         d.addCallback(lambda ignored:
1815             self._fn.check(Monitor(), verify=True))
1816         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1817         d.addCallback(self.check_expected_failure,
1818                       CorruptShareError, "block hash tree failure",
1819                       "test_verify_one_bad_block")
1820         return d
1821
1822     def test_verify_one_bad_sharehash(self):
1823         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1824         d.addCallback(lambda ignored:
1825             self._fn.check(Monitor(), verify=True))
1826         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1827         d.addCallback(self.check_expected_failure,
1828                       CorruptShareError, "corrupt hashes",
1829                       "test_verify_one_bad_sharehash")
1830         return d
1831
1832     def test_verify_one_bad_encprivkey(self):
1833         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1834         d.addCallback(lambda ignored:
1835             self._fn.check(Monitor(), verify=True))
1836         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1837         d.addCallback(self.check_expected_failure,
1838                       CorruptShareError, "invalid privkey",
1839                       "test_verify_one_bad_encprivkey")
1840         return d
1841
1842     def test_verify_one_bad_encprivkey_uncheckable(self):
1843         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1844         readonly_fn = self._fn.get_readonly()
1845         # a read-only node has no way to validate the privkey
1846         d.addCallback(lambda ignored:
1847             readonly_fn.check(Monitor(), verify=True))
1848         d.addCallback(self.check_good,
1849                       "test_verify_one_bad_encprivkey_uncheckable")
1850         return d
1851
1852
1853     def test_verify_mdmf_good(self):
1854         d = self.publish_mdmf()
1855         d.addCallback(lambda ignored:
1856             self._fn.check(Monitor(), verify=True))
1857         d.addCallback(self.check_good, "test_verify_mdmf_good")
1858         return d
1859
1860
1861     def test_verify_mdmf_one_bad_block(self):
1862         d = self.publish_mdmf()
1863         d.addCallback(lambda ignored:
1864             corrupt(None, self._storage, "share_data", [1]))
1865         d.addCallback(lambda ignored:
1866             self._fn.check(Monitor(), verify=True))
1867         # We should find one bad block here
1868         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1869         d.addCallback(self.check_expected_failure,
1870                       CorruptShareError, "block hash tree failure",
1871                       "test_verify_mdmf_one_bad_block")
1872         return d
1873
1874
1875     def test_verify_mdmf_bad_encprivkey(self):
1876         d = self.publish_mdmf()
1877         d.addCallback(lambda ignored:
1878             corrupt(None, self._storage, "enc_privkey", [0]))
1879         d.addCallback(lambda ignored:
1880             self._fn.check(Monitor(), verify=True))
1881         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1882         d.addCallback(self.check_expected_failure,
1883                       CorruptShareError, "privkey",
1884                       "test_verify_mdmf_bad_encprivkey")
1885         return d
1886
1887
1888     def test_verify_mdmf_bad_sig(self):
1889         d = self.publish_mdmf()
1890         d.addCallback(lambda ignored:
1891             corrupt(None, self._storage, 1, [1]))
1892         d.addCallback(lambda ignored:
1893             self._fn.check(Monitor(), verify=True))
1894         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1895         return d
1896
1897
1898     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1899         d = self.publish_mdmf()
1900         d.addCallback(lambda ignored:
1901             corrupt(None, self._storage, "enc_privkey", [1]))
1902         d.addCallback(lambda ignored:
1903             self._fn.get_readonly())
1904         d.addCallback(lambda fn:
1905             fn.check(Monitor(), verify=True))
1906         d.addCallback(self.check_good,
1907                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1908         return d
1909
1910
1911 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1912
1913     def get_shares(self, s):
1914         all_shares = {} # maps (peerid, shnum) to share data
1915         for peerid in s._peers:
1916             shares = s._peers[peerid]
1917             for shnum in shares:
1918                 data = shares[shnum]
1919                 all_shares[ (peerid, shnum) ] = data
1920         return all_shares
1921
1922     def copy_shares(self, ignored=None):
1923         self.old_shares.append(self.get_shares(self._storage))
1924
1925     def test_repair_nop(self):
1926         self.old_shares = []
1927         d = self.publish_one()
1928         d.addCallback(self.copy_shares)
1929         d.addCallback(lambda res: self._fn.check(Monitor()))
1930         d.addCallback(lambda check_results: self._fn.repair(check_results))
1931         def _check_results(rres):
1932             self.failUnless(IRepairResults.providedBy(rres))
1933             self.failUnless(rres.get_successful())
1934             # TODO: examine results
1935
1936             self.copy_shares()
1937
1938             initial_shares = self.old_shares[0]
1939             new_shares = self.old_shares[1]
1940             # TODO: this really shouldn't change anything. When we implement
1941             # a "minimal-bandwidth" repairer", change this test to assert:
1942             #self.failUnlessEqual(new_shares, initial_shares)
1943
1944             # all shares should be in the same place as before
1945             self.failUnlessEqual(set(initial_shares.keys()),
1946                                  set(new_shares.keys()))
1947             # but they should all be at a newer seqnum. The IV will be
1948             # different, so the roothash will be too.
1949             for key in initial_shares:
1950                 (version0,
1951                  seqnum0,
1952                  root_hash0,
1953                  IV0,
1954                  k0, N0, segsize0, datalen0,
1955                  o0) = unpack_header(initial_shares[key])
1956                 (version1,
1957                  seqnum1,
1958                  root_hash1,
1959                  IV1,
1960                  k1, N1, segsize1, datalen1,
1961                  o1) = unpack_header(new_shares[key])
1962                 self.failUnlessEqual(version0, version1)
1963                 self.failUnlessEqual(seqnum0+1, seqnum1)
1964                 self.failUnlessEqual(k0, k1)
1965                 self.failUnlessEqual(N0, N1)
1966                 self.failUnlessEqual(segsize0, segsize1)
1967                 self.failUnlessEqual(datalen0, datalen1)
1968         d.addCallback(_check_results)
1969         return d
1970
1971     def failIfSharesChanged(self, ignored=None):
1972         old_shares = self.old_shares[-2]
1973         current_shares = self.old_shares[-1]
1974         self.failUnlessEqual(old_shares, current_shares)
1975
1976
1977     def _test_whether_repairable(self, publisher, nshares, expected_result):
1978         d = publisher()
1979         def _delete_some_shares(ign):
1980             shares = self._storage._peers
1981             for peerid in shares:
1982                 for shnum in list(shares[peerid]):
1983                     if shnum >= nshares:
1984                         del shares[peerid][shnum]
1985         d.addCallback(_delete_some_shares)
1986         d.addCallback(lambda ign: self._fn.check(Monitor()))
1987         def _check(cr):
1988             self.failIf(cr.is_healthy())
1989             self.failUnlessEqual(cr.is_recoverable(), expected_result)
1990             return cr
1991         d.addCallback(_check)
1992         d.addCallback(lambda check_results: self._fn.repair(check_results))
1993         d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
1994         return d
1995
1996     def test_unrepairable_0shares(self):
1997         return self._test_whether_repairable(self.publish_one, 0, False)
1998
1999     def test_mdmf_unrepairable_0shares(self):
2000         return self._test_whether_repairable(self.publish_mdmf, 0, False)
2001
2002     def test_unrepairable_1share(self):
2003         return self._test_whether_repairable(self.publish_one, 1, False)
2004
2005     def test_mdmf_unrepairable_1share(self):
2006         return self._test_whether_repairable(self.publish_mdmf, 1, False)
2007
2008     def test_repairable_5shares(self):
2009         return self._test_whether_repairable(self.publish_one, 5, True)
2010
2011     def test_mdmf_repairable_5shares(self):
2012         return self._test_whether_repairable(self.publish_mdmf, 5, True)
2013
2014     def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
2015         """
2016         Like the _test_whether_repairable tests, but invoking check_and_repair
2017         instead of invoking check and then invoking repair.
2018         """
2019         d = publisher()
2020         def _delete_some_shares(ign):
2021             shares = self._storage._peers
2022             for peerid in shares:
2023                 for shnum in list(shares[peerid]):
2024                     if shnum >= nshares:
2025                         del shares[peerid][shnum]
2026         d.addCallback(_delete_some_shares)
2027         d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
2028         d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
2029         return d
2030
2031     def test_unrepairable_0shares_checkandrepair(self):
2032         return self._test_whether_checkandrepairable(self.publish_one, 0, False)
2033
2034     def test_mdmf_unrepairable_0shares_checkandrepair(self):
2035         return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
2036
2037     def test_unrepairable_1share_checkandrepair(self):
2038         return self._test_whether_checkandrepairable(self.publish_one, 1, False)
2039
2040     def test_mdmf_unrepairable_1share_checkandrepair(self):
2041         return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
2042
2043     def test_repairable_5shares_checkandrepair(self):
2044         return self._test_whether_checkandrepairable(self.publish_one, 5, True)
2045
2046     def test_mdmf_repairable_5shares_checkandrepair(self):
2047         return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
2048
2049
2050     def test_merge(self):
2051         self.old_shares = []
2052         d = self.publish_multiple()
2053         # repair will refuse to merge multiple highest seqnums unless you
2054         # pass force=True
2055         d.addCallback(lambda res:
2056                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2057                                           1:4,3:4,5:4,7:4,9:4}))
2058         d.addCallback(self.copy_shares)
2059         d.addCallback(lambda res: self._fn.check(Monitor()))
2060         def _try_repair(check_results):
2061             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2062             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2063                                  self._fn.repair, check_results)
2064             d2.addCallback(self.copy_shares)
2065             d2.addCallback(self.failIfSharesChanged)
2066             d2.addCallback(lambda res: check_results)
2067             return d2
2068         d.addCallback(_try_repair)
2069         d.addCallback(lambda check_results:
2070                       self._fn.repair(check_results, force=True))
2071         # this should give us 10 shares of the highest roothash
2072         def _check_repair_results(rres):
2073             self.failUnless(rres.get_successful())
2074             pass # TODO
2075         d.addCallback(_check_repair_results)
2076         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2077         def _check_smap(smap):
2078             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2079             self.failIf(smap.unrecoverable_versions())
2080             # now, which should have won?
2081             roothash_s4a = self.get_roothash_for(3)
2082             roothash_s4b = self.get_roothash_for(4)
2083             if roothash_s4b > roothash_s4a:
2084                 expected_contents = self.CONTENTS[4]
2085             else:
2086                 expected_contents = self.CONTENTS[3]
2087             new_versionid = smap.best_recoverable_version()
2088             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2089             d2 = self._fn.download_version(smap, new_versionid)
2090             d2.addCallback(self.failUnlessEqual, expected_contents)
2091             return d2
2092         d.addCallback(_check_smap)
2093         return d
2094
2095     def test_non_merge(self):
2096         self.old_shares = []
2097         d = self.publish_multiple()
2098         # repair should not refuse a repair that doesn't need to merge. In
2099         # this case, we combine v2 with v3. The repair should ignore v2 and
2100         # copy v3 into a new v5.
2101         d.addCallback(lambda res:
2102                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2103                                           1:3,3:3,5:3,7:3,9:3}))
2104         d.addCallback(lambda res: self._fn.check(Monitor()))
2105         d.addCallback(lambda check_results: self._fn.repair(check_results))
2106         # this should give us 10 shares of v3
2107         def _check_repair_results(rres):
2108             self.failUnless(rres.get_successful())
2109             pass # TODO
2110         d.addCallback(_check_repair_results)
2111         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2112         def _check_smap(smap):
2113             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2114             self.failIf(smap.unrecoverable_versions())
2115             # now, which should have won?
2116             expected_contents = self.CONTENTS[3]
2117             new_versionid = smap.best_recoverable_version()
2118             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2119             d2 = self._fn.download_version(smap, new_versionid)
2120             d2.addCallback(self.failUnlessEqual, expected_contents)
2121             return d2
2122         d.addCallback(_check_smap)
2123         return d
2124
2125     def get_roothash_for(self, index):
2126         # return the roothash for the first share we see in the saved set
2127         shares = self._copied_shares[index]
2128         for peerid in shares:
2129             for shnum in shares[peerid]:
2130                 share = shares[peerid][shnum]
2131                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2132                           unpack_header(share)
2133                 return root_hash
2134
2135     def test_check_and_repair_readcap(self):
2136         # we can't currently repair from a mutable readcap: #625
2137         self.old_shares = []
2138         d = self.publish_one()
2139         d.addCallback(self.copy_shares)
2140         def _get_readcap(res):
2141             self._fn3 = self._fn.get_readonly()
2142             # also delete some shares
2143             for peerid,shares in self._storage._peers.items():
2144                 shares.pop(0, None)
2145         d.addCallback(_get_readcap)
2146         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2147         def _check_results(crr):
2148             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2149             # we should detect the unhealthy, but skip over mutable-readcap
2150             # repairs until #625 is fixed
2151             self.failIf(crr.get_pre_repair_results().is_healthy())
2152             self.failIf(crr.get_repair_attempted())
2153             self.failIf(crr.get_post_repair_results().is_healthy())
2154         d.addCallback(_check_results)
2155         return d
2156
2157     def test_repair_empty(self):
2158         # bug 1689: delete one share of an empty mutable file, then repair.
2159         # In the buggy version, the check that precedes the retrieve+publish
2160         # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
2161         # privkey that repair needs.
2162         d = self.publish_empty_sdmf()
2163         def _delete_one_share(ign):
2164             shares = self._storage._peers
2165             for peerid in shares:
2166                 for shnum in list(shares[peerid]):
2167                     if shnum == 0:
2168                         del shares[peerid][shnum]
2169         d.addCallback(_delete_one_share)
2170         d.addCallback(lambda ign: self._fn2.check(Monitor()))
2171         d.addCallback(lambda check_results: self._fn2.repair(check_results))
2172         def _check(crr):
2173             self.failUnlessEqual(crr.get_successful(), True)
2174         d.addCallback(_check)
2175         return d
2176
2177 class DevNullDictionary(dict):
2178     def __setitem__(self, key, value):
2179         return
2180
2181 class MultipleEncodings(unittest.TestCase):
2182     def setUp(self):
2183         self.CONTENTS = "New contents go here"
2184         self.uploadable = MutableData(self.CONTENTS)
2185         self._storage = FakeStorage()
2186         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2187         self._storage_broker = self._nodemaker.storage_broker
2188         d = self._nodemaker.create_mutable_file(self.uploadable)
2189         def _created(node):
2190             self._fn = node
2191         d.addCallback(_created)
2192         return d
2193
2194     def _encode(self, k, n, data, version=SDMF_VERSION):
2195         # encode 'data' into a peerid->shares dict.
2196
2197         fn = self._fn
2198         # disable the nodecache, since for these tests we explicitly need
2199         # multiple nodes pointing at the same file
2200         self._nodemaker._node_cache = DevNullDictionary()
2201         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2202         # then we copy over other fields that are normally fetched from the
2203         # existing shares
2204         fn2._pubkey = fn._pubkey
2205         fn2._privkey = fn._privkey
2206         fn2._encprivkey = fn._encprivkey
2207         # and set the encoding parameters to something completely different
2208         fn2._required_shares = k
2209         fn2._total_shares = n
2210
2211         s = self._storage
2212         s._peers = {} # clear existing storage
2213         p2 = Publish(fn2, self._storage_broker, None)
2214         uploadable = MutableData(data)
2215         d = p2.publish(uploadable)
2216         def _published(res):
2217             shares = s._peers
2218             s._peers = {}
2219             return shares
2220         d.addCallback(_published)
2221         return d
2222
2223     def make_servermap(self, mode=MODE_READ, oldmap=None):
2224         if oldmap is None:
2225             oldmap = ServerMap()
2226         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2227                                oldmap, mode)
2228         d = smu.update()
2229         return d
2230
2231     def test_multiple_encodings(self):
2232         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2233         # then mix up the shares, to make sure that download survives seeing
2234         # a variety of encodings. This is actually kind of tricky to set up.
2235
2236         contents1 = "Contents for encoding 1 (3-of-10) go here"
2237         contents2 = "Contents for encoding 2 (4-of-9) go here"
2238         contents3 = "Contents for encoding 3 (4-of-7) go here"
2239
2240         # we make a retrieval object that doesn't know what encoding
2241         # parameters to use
2242         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2243
2244         # now we upload a file through fn1, and grab its shares
2245         d = self._encode(3, 10, contents1)
2246         def _encoded_1(shares):
2247             self._shares1 = shares
2248         d.addCallback(_encoded_1)
2249         d.addCallback(lambda res: self._encode(4, 9, contents2))
2250         def _encoded_2(shares):
2251             self._shares2 = shares
2252         d.addCallback(_encoded_2)
2253         d.addCallback(lambda res: self._encode(4, 7, contents3))
2254         def _encoded_3(shares):
2255             self._shares3 = shares
2256         d.addCallback(_encoded_3)
2257
2258         def _merge(res):
2259             log.msg("merging sharelists")
2260             # we merge the shares from the two sets, leaving each shnum in
2261             # its original location, but using a share from set1 or set2
2262             # according to the following sequence:
2263             #
2264             #  4-of-9  a  s2
2265             #  4-of-9  b  s2
2266             #  4-of-7  c   s3
2267             #  4-of-9  d  s2
2268             #  3-of-9  e s1
2269             #  3-of-9  f s1
2270             #  3-of-9  g s1
2271             #  4-of-9  h  s2
2272             #
2273             # so that neither form can be recovered until fetch [f], at which
2274             # point version-s1 (the 3-of-10 form) should be recoverable. If
2275             # the implementation latches on to the first version it sees,
2276             # then s2 will be recoverable at fetch [g].
2277
2278             # Later, when we implement code that handles multiple versions,
2279             # we can use this framework to assert that all recoverable
2280             # versions are retrieved, and test that 'epsilon' does its job
2281
2282             places = [2, 2, 3, 2, 1, 1, 1, 2]
2283
2284             sharemap = {}
2285             sb = self._storage_broker
2286
2287             for peerid in sorted(sb.get_all_serverids()):
2288                 for shnum in self._shares1.get(peerid, {}):
2289                     if shnum < len(places):
2290                         which = places[shnum]
2291                     else:
2292                         which = "x"
2293                     self._storage._peers[peerid] = peers = {}
2294                     in_1 = shnum in self._shares1[peerid]
2295                     in_2 = shnum in self._shares2.get(peerid, {})
2296                     in_3 = shnum in self._shares3.get(peerid, {})
2297                     if which == 1:
2298                         if in_1:
2299                             peers[shnum] = self._shares1[peerid][shnum]
2300                             sharemap[shnum] = peerid
2301                     elif which == 2:
2302                         if in_2:
2303                             peers[shnum] = self._shares2[peerid][shnum]
2304                             sharemap[shnum] = peerid
2305                     elif which == 3:
2306                         if in_3:
2307                             peers[shnum] = self._shares3[peerid][shnum]
2308                             sharemap[shnum] = peerid
2309
2310             # we don't bother placing any other shares
2311             # now sort the sequence so that share 0 is returned first
2312             new_sequence = [sharemap[shnum]
2313                             for shnum in sorted(sharemap.keys())]
2314             self._storage._sequence = new_sequence
2315             log.msg("merge done")
2316         d.addCallback(_merge)
2317         d.addCallback(lambda res: fn3.download_best_version())
2318         def _retrieved(new_contents):
2319             # the current specified behavior is "first version recoverable"
2320             self.failUnlessEqual(new_contents, contents1)
2321         d.addCallback(_retrieved)
2322         return d
2323
2324
2325 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2326
2327     def setUp(self):
2328         return self.publish_multiple()
2329
2330     def test_multiple_versions(self):
2331         # if we see a mix of versions in the grid, download_best_version
2332         # should get the latest one
2333         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2334         d = self._fn.download_best_version()
2335         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2336         # and the checker should report problems
2337         d.addCallback(lambda res: self._fn.check(Monitor()))
2338         d.addCallback(self.check_bad, "test_multiple_versions")
2339
2340         # but if everything is at version 2, that's what we should download
2341         d.addCallback(lambda res:
2342                       self._set_versions(dict([(i,2) for i in range(10)])))
2343         d.addCallback(lambda res: self._fn.download_best_version())
2344         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2345         # if exactly one share is at version 3, we should still get v2
2346         d.addCallback(lambda res:
2347                       self._set_versions({0:3}))
2348         d.addCallback(lambda res: self._fn.download_best_version())
2349         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2350         # but the servermap should see the unrecoverable version. This
2351         # depends upon the single newer share being queried early.
2352         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2353         def _check_smap(smap):
2354             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2355             newer = smap.unrecoverable_newer_versions()
2356             self.failUnlessEqual(len(newer), 1)
2357             verinfo, health = newer.items()[0]
2358             self.failUnlessEqual(verinfo[0], 4)
2359             self.failUnlessEqual(health, (1,3))
2360             self.failIf(smap.needs_merge())
2361         d.addCallback(_check_smap)
2362         # if we have a mix of two parallel versions (s4a and s4b), we could
2363         # recover either
2364         d.addCallback(lambda res:
2365                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2366                                           1:4,3:4,5:4,7:4,9:4}))
2367         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2368         def _check_smap_mixed(smap):
2369             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2370             newer = smap.unrecoverable_newer_versions()
2371             self.failUnlessEqual(len(newer), 0)
2372             self.failUnless(smap.needs_merge())
2373         d.addCallback(_check_smap_mixed)
2374         d.addCallback(lambda res: self._fn.download_best_version())
2375         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2376                                                   res == self.CONTENTS[4]))
2377         return d
2378
2379     def test_replace(self):
2380         # if we see a mix of versions in the grid, we should be able to
2381         # replace them all with a newer version
2382
2383         # if exactly one share is at version 3, we should download (and
2384         # replace) v2, and the result should be v4. Note that the index we
2385         # give to _set_versions is different than the sequence number.
2386         target = dict([(i,2) for i in range(10)]) # seqnum3
2387         target[0] = 3 # seqnum4
2388         self._set_versions(target)
2389
2390         def _modify(oldversion, servermap, first_time):
2391             return oldversion + " modified"
2392         d = self._fn.modify(_modify)
2393         d.addCallback(lambda res: self._fn.download_best_version())
2394         expected = self.CONTENTS[2] + " modified"
2395         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2396         # and the servermap should indicate that the outlier was replaced too
2397         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2398         def _check_smap(smap):
2399             self.failUnlessEqual(smap.highest_seqnum(), 5)
2400             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2401             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2402         d.addCallback(_check_smap)
2403         return d
2404
2405
2406 class Utils(unittest.TestCase):
2407     def test_cache(self):
2408         c = ResponseCache()
2409         # xdata = base62.b2a(os.urandom(100))[:100]
2410         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2411         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2412         c.add("v1", 1, 0, xdata)
2413         c.add("v1", 1, 2000, ydata)
2414         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2415         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2416         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2417         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2418         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2419         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2420         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2421         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2422         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2423         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2424         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2425         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2426         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2427         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2428         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2429         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2430         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2431         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2432
2433         # test joining fragments
2434         c = ResponseCache()
2435         c.add("v1", 1, 0, xdata[:10])
2436         c.add("v1", 1, 10, xdata[10:20])
2437         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2438
2439 class Exceptions(unittest.TestCase):
2440     def test_repr(self):
2441         nmde = NeedMoreDataError(100, 50, 100)
2442         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2443         ucwe = UncoordinatedWriteError()
2444         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2445
2446 class SameKeyGenerator:
2447     def __init__(self, pubkey, privkey):
2448         self.pubkey = pubkey
2449         self.privkey = privkey
2450     def generate(self, keysize=None):
2451         return defer.succeed( (self.pubkey, self.privkey) )
2452
2453 class FirstServerGetsKilled:
2454     done = False
2455     def notify(self, retval, wrapper, methname):
2456         if not self.done:
2457             wrapper.broken = True
2458             self.done = True
2459         return retval
2460
2461 class FirstServerGetsDeleted:
2462     def __init__(self):
2463         self.done = False
2464         self.silenced = None
2465     def notify(self, retval, wrapper, methname):
2466         if not self.done:
2467             # this query will work, but later queries should think the share
2468             # has been deleted
2469             self.done = True
2470             self.silenced = wrapper
2471             return retval
2472         if wrapper == self.silenced:
2473             assert methname == "slot_testv_and_readv_and_writev"
2474             return (True, {})
2475         return retval
2476
2477 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2478     def do_publish_surprise(self, version):
2479         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2480         self.set_up_grid()
2481         nm = self.g.clients[0].nodemaker
2482         d = nm.create_mutable_file(MutableData("contents 1"),
2483                                     version=version)
2484         def _created(n):
2485             d = defer.succeed(None)
2486             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2487             def _got_smap1(smap):
2488                 # stash the old state of the file
2489                 self.old_map = smap
2490             d.addCallback(_got_smap1)
2491             # then modify the file, leaving the old map untouched
2492             d.addCallback(lambda res: log.msg("starting winning write"))
2493             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2494             # now attempt to modify the file with the old servermap. This
2495             # will look just like an uncoordinated write, in which every
2496             # single share got updated between our mapupdate and our publish
2497             d.addCallback(lambda res: log.msg("starting doomed write"))
2498             d.addCallback(lambda res:
2499                           self.shouldFail(UncoordinatedWriteError,
2500                                           "test_publish_surprise", None,
2501                                           n.upload,
2502                                           MutableData("contents 2a"), self.old_map))
2503             return d
2504         d.addCallback(_created)
2505         return d
2506
2507     def test_publish_surprise_sdmf(self):
2508         return self.do_publish_surprise(SDMF_VERSION)
2509
2510     def test_publish_surprise_mdmf(self):
2511         return self.do_publish_surprise(MDMF_VERSION)
2512
2513     def test_retrieve_surprise(self):
2514         self.basedir = "mutable/Problems/test_retrieve_surprise"
2515         self.set_up_grid()
2516         nm = self.g.clients[0].nodemaker
2517         d = nm.create_mutable_file(MutableData("contents 1"))
2518         def _created(n):
2519             d = defer.succeed(None)
2520             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2521             def _got_smap1(smap):
2522                 # stash the old state of the file
2523                 self.old_map = smap
2524             d.addCallback(_got_smap1)
2525             # then modify the file, leaving the old map untouched
2526             d.addCallback(lambda res: log.msg("starting winning write"))
2527             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2528             # now attempt to retrieve the old version with the old servermap.
2529             # This will look like someone has changed the file since we
2530             # updated the servermap.
2531             d.addCallback(lambda res: n._cache._clear())
2532             d.addCallback(lambda res: log.msg("starting doomed read"))
2533             d.addCallback(lambda res:
2534                           self.shouldFail(NotEnoughSharesError,
2535                                           "test_retrieve_surprise",
2536                                           "ran out of servers: have 0 of 1",
2537                                           n.download_version,
2538                                           self.old_map,
2539                                           self.old_map.best_recoverable_version(),
2540                                           ))
2541             return d
2542         d.addCallback(_created)
2543         return d
2544
2545
2546     def test_unexpected_shares(self):
2547         # upload the file, take a servermap, shut down one of the servers,
2548         # upload it again (causing shares to appear on a new server), then
2549         # upload using the old servermap. The last upload should fail with an
2550         # UncoordinatedWriteError, because of the shares that didn't appear
2551         # in the servermap.
2552         self.basedir = "mutable/Problems/test_unexpected_shares"
2553         self.set_up_grid()
2554         nm = self.g.clients[0].nodemaker
2555         d = nm.create_mutable_file(MutableData("contents 1"))
2556         def _created(n):
2557             d = defer.succeed(None)
2558             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2559             def _got_smap1(smap):
2560                 # stash the old state of the file
2561                 self.old_map = smap
2562                 # now shut down one of the servers
2563                 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2564                 self.g.remove_server(peer0)
2565                 # then modify the file, leaving the old map untouched
2566                 log.msg("starting winning write")
2567                 return n.overwrite(MutableData("contents 2"))
2568             d.addCallback(_got_smap1)
2569             # now attempt to modify the file with the old servermap. This
2570             # will look just like an uncoordinated write, in which every
2571             # single share got updated between our mapupdate and our publish
2572             d.addCallback(lambda res: log.msg("starting doomed write"))
2573             d.addCallback(lambda res:
2574                           self.shouldFail(UncoordinatedWriteError,
2575                                           "test_surprise", None,
2576                                           n.upload,
2577                                           MutableData("contents 2a"), self.old_map))
2578             return d
2579         d.addCallback(_created)
2580         return d
2581
2582     def test_multiply_placed_shares(self):
2583         self.basedir = "mutable/Problems/test_multiply_placed_shares"
2584         self.set_up_grid()
2585         nm = self.g.clients[0].nodemaker
2586         d = nm.create_mutable_file(MutableData("contents 1"))
2587         # remove one of the servers and reupload the file.
2588         def _created(n):
2589             self._node = n
2590
2591             servers = self.g.get_all_serverids()
2592             self.ss = self.g.remove_server(servers[len(servers)-1])
2593
2594             new_server = self.g.make_server(len(servers)-1)
2595             self.g.add_server(len(servers)-1, new_server)
2596
2597             return self._node.download_best_version()
2598         d.addCallback(_created)
2599         d.addCallback(lambda data: MutableData(data))
2600         d.addCallback(lambda data: self._node.overwrite(data))
2601
2602         # restore the server we removed earlier, then download+upload
2603         # the file again
2604         def _overwritten(ign):
2605             self.g.add_server(len(self.g.servers_by_number), self.ss)
2606             return self._node.download_best_version()
2607         d.addCallback(_overwritten)
2608         d.addCallback(lambda data: MutableData(data))
2609         d.addCallback(lambda data: self._node.overwrite(data))
2610         d.addCallback(lambda ignored:
2611             self._node.get_servermap(MODE_CHECK))
2612         def _overwritten_again(smap):
2613             # Make sure that all shares were updated by making sure that
2614             # there aren't any other versions in the sharemap.
2615             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2616             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2617         d.addCallback(_overwritten_again)
2618         return d
2619
2620     def test_bad_server(self):
2621         # Break one server, then create the file: the initial publish should
2622         # complete with an alternate server. Breaking a second server should
2623         # not prevent an update from succeeding either.
2624         self.basedir = "mutable/Problems/test_bad_server"
2625         self.set_up_grid()
2626         nm = self.g.clients[0].nodemaker
2627
2628         # to make sure that one of the initial peers is broken, we have to
2629         # get creative. We create an RSA key and compute its storage-index.
2630         # Then we make a KeyGenerator that always returns that one key, and
2631         # use it to create the mutable file. This will get easier when we can
2632         # use #467 static-server-selection to disable permutation and force
2633         # the choice of server for share[0].
2634
2635         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2636         def _got_key( (pubkey, privkey) ):
2637             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2638             pubkey_s = pubkey.serialize()
2639             privkey_s = privkey.serialize()
2640             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2641                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2642             self._storage_index = u.get_storage_index()
2643         d.addCallback(_got_key)
2644         def _break_peer0(res):
2645             si = self._storage_index
2646             servers = nm.storage_broker.get_servers_for_psi(si)
2647             self.g.break_server(servers[0].get_serverid())
2648             self.server1 = servers[1]
2649         d.addCallback(_break_peer0)
2650         # now "create" the file, using the pre-established key, and let the
2651         # initial publish finally happen
2652         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2653         # that ought to work
2654         def _got_node(n):
2655             d = n.download_best_version()
2656             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2657             # now break the second peer
2658             def _break_peer1(res):
2659                 self.g.break_server(self.server1.get_serverid())
2660             d.addCallback(_break_peer1)
2661             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2662             # that ought to work too
2663             d.addCallback(lambda res: n.download_best_version())
2664             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2665             def _explain_error(f):
2666                 print f
2667                 if f.check(NotEnoughServersError):
2668                     print "first_error:", f.value.first_error
2669                 return f
2670             d.addErrback(_explain_error)
2671             return d
2672         d.addCallback(_got_node)
2673         return d
2674
2675     def test_bad_server_overlap(self):
2676         # like test_bad_server, but with no extra unused servers to fall back
2677         # upon. This means that we must re-use a server which we've already
2678         # used. If we don't remember the fact that we sent them one share
2679         # already, we'll mistakenly think we're experiencing an
2680         # UncoordinatedWriteError.
2681
2682         # Break one server, then create the file: the initial publish should
2683         # complete with an alternate server. Breaking a second server should
2684         # not prevent an update from succeeding either.
2685         self.basedir = "mutable/Problems/test_bad_server_overlap"
2686         self.set_up_grid()
2687         nm = self.g.clients[0].nodemaker
2688         sb = nm.storage_broker
2689
2690         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2691         self.g.break_server(peerids[0])
2692
2693         d = nm.create_mutable_file(MutableData("contents 1"))
2694         def _created(n):
2695             d = n.download_best_version()
2696             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2697             # now break one of the remaining servers
2698             def _break_second_server(res):
2699                 self.g.break_server(peerids[1])
2700             d.addCallback(_break_second_server)
2701             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2702             # that ought to work too
2703             d.addCallback(lambda res: n.download_best_version())
2704             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2705             return d
2706         d.addCallback(_created)
2707         return d
2708
2709     def test_publish_all_servers_bad(self):
2710         # Break all servers: the publish should fail
2711         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2712         self.set_up_grid()
2713         nm = self.g.clients[0].nodemaker
2714         for s in nm.storage_broker.get_connected_servers():
2715             s.get_rref().broken = True
2716
2717         d = self.shouldFail(NotEnoughServersError,
2718                             "test_publish_all_servers_bad",
2719                             "ran out of good servers",
2720                             nm.create_mutable_file, MutableData("contents"))
2721         return d
2722
2723     def test_publish_no_servers(self):
2724         # no servers at all: the publish should fail
2725         self.basedir = "mutable/Problems/test_publish_no_servers"
2726         self.set_up_grid(num_servers=0)
2727         nm = self.g.clients[0].nodemaker
2728
2729         d = self.shouldFail(NotEnoughServersError,
2730                             "test_publish_no_servers",
2731                             "Ran out of non-bad servers",
2732                             nm.create_mutable_file, MutableData("contents"))
2733         return d
2734
2735
2736     def test_privkey_query_error(self):
2737         # when a servermap is updated with MODE_WRITE, it tries to get the
2738         # privkey. Something might go wrong during this query attempt.
2739         # Exercise the code in _privkey_query_failed which tries to handle
2740         # such an error.
2741         self.basedir = "mutable/Problems/test_privkey_query_error"
2742         self.set_up_grid(num_servers=20)
2743         nm = self.g.clients[0].nodemaker
2744         nm._node_cache = DevNullDictionary() # disable the nodecache
2745
2746         # we need some contents that are large enough to push the privkey out
2747         # of the early part of the file
2748         LARGE = "These are Larger contents" * 2000 # about 50KB
2749         LARGE_uploadable = MutableData(LARGE)
2750         d = nm.create_mutable_file(LARGE_uploadable)
2751         def _created(n):
2752             self.uri = n.get_uri()
2753             self.n2 = nm.create_from_cap(self.uri)
2754
2755             # When a mapupdate is performed on a node that doesn't yet know
2756             # the privkey, a short read is sent to a batch of servers, to get
2757             # the verinfo and (hopefully, if the file is short enough) the
2758             # encprivkey. Our file is too large to let this first read
2759             # contain the encprivkey. Each non-encprivkey-bearing response
2760             # that arrives (until the node gets the encprivkey) will trigger
2761             # a second read to specifically read the encprivkey.
2762             #
2763             # So, to exercise this case:
2764             #  1. notice which server gets a read() call first
2765             #  2. tell that server to start throwing errors
2766             killer = FirstServerGetsKilled()
2767             for s in nm.storage_broker.get_connected_servers():
2768                 s.get_rref().post_call_notifier = killer.notify
2769         d.addCallback(_created)
2770
2771         # now we update a servermap from a new node (which doesn't have the
2772         # privkey yet, forcing it to use a separate privkey query). Note that
2773         # the map-update will succeed, since we'll just get a copy from one
2774         # of the other shares.
2775         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2776
2777         return d
2778
2779     def test_privkey_query_missing(self):
2780         # like test_privkey_query_error, but the shares are deleted by the
2781         # second query, instead of raising an exception.
2782         self.basedir = "mutable/Problems/test_privkey_query_missing"
2783         self.set_up_grid(num_servers=20)
2784         nm = self.g.clients[0].nodemaker
2785         LARGE = "These are Larger contents" * 2000 # about 50KiB
2786         LARGE_uploadable = MutableData(LARGE)
2787         nm._node_cache = DevNullDictionary() # disable the nodecache
2788
2789         d = nm.create_mutable_file(LARGE_uploadable)
2790         def _created(n):
2791             self.uri = n.get_uri()
2792             self.n2 = nm.create_from_cap(self.uri)
2793             deleter = FirstServerGetsDeleted()
2794             for s in nm.storage_broker.get_connected_servers():
2795                 s.get_rref().post_call_notifier = deleter.notify
2796         d.addCallback(_created)
2797         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2798         return d
2799
2800
2801     def test_block_and_hash_query_error(self):
2802         # This tests for what happens when a query to a remote server
2803         # fails in either the hash validation step or the block getting
2804         # step (because of batching, this is the same actual query).
2805         # We need to have the storage server persist up until the point
2806         # that its prefix is validated, then suddenly die. This
2807         # exercises some exception handling code in Retrieve.
2808         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2809         self.set_up_grid(num_servers=20)
2810         nm = self.g.clients[0].nodemaker
2811         CONTENTS = "contents" * 2000
2812         CONTENTS_uploadable = MutableData(CONTENTS)
2813         d = nm.create_mutable_file(CONTENTS_uploadable)
2814         def _created(node):
2815             self._node = node
2816         d.addCallback(_created)
2817         d.addCallback(lambda ignored:
2818             self._node.get_servermap(MODE_READ))
2819         def _then(servermap):
2820             # we have our servermap. Now we set up the servers like the
2821             # tests above -- the first one that gets a read call should
2822             # start throwing errors, but only after returning its prefix
2823             # for validation. Since we'll download without fetching the
2824             # private key, the next query to the remote server will be
2825             # for either a block and salt or for hashes, either of which
2826             # will exercise the error handling code.
2827             killer = FirstServerGetsKilled()
2828             for s in nm.storage_broker.get_connected_servers():
2829                 s.get_rref().post_call_notifier = killer.notify
2830             ver = servermap.best_recoverable_version()
2831             assert ver
2832             return self._node.download_version(servermap, ver)
2833         d.addCallback(_then)
2834         d.addCallback(lambda data:
2835             self.failUnlessEqual(data, CONTENTS))
2836         return d
2837
2838     def test_1654(self):
2839         # test that the Retrieve object unconditionally verifies the block
2840         # hash tree root for mutable shares. The failure mode is that
2841         # carefully crafted shares can cause undetected corruption (the
2842         # retrieve appears to finish successfully, but the result is
2843         # corrupted). When fixed, these shares always cause a
2844         # CorruptShareError, which results in NotEnoughSharesError in this
2845         # 2-of-2 file.
2846         self.basedir = "mutable/Problems/test_1654"
2847         self.set_up_grid(num_servers=2)
2848         cap = uri.from_string(TEST_1654_CAP)
2849         si = cap.get_storage_index()
2850
2851         for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2852             sharedata = base64.b64decode(share)
2853             storedir = self.get_serverdir(shnum)
2854             storage_path = os.path.join(storedir, "shares",
2855                                         storage_index_to_dir(si))
2856             fileutil.make_dirs(storage_path)
2857             fileutil.write(os.path.join(storage_path, "%d" % shnum),
2858                            sharedata)
2859
2860         nm = self.g.clients[0].nodemaker
2861         n = nm.create_from_cap(TEST_1654_CAP)
2862         # to exercise the problem correctly, we must ensure that sh0 is
2863         # processed first, and sh1 second. NoNetworkGrid has facilities to
2864         # stall the first request from a single server, but it's not
2865         # currently easy to extend that to stall the second request (mutable
2866         # retrievals will see two: first the mapupdate, then the fetch).
2867         # However, repeated executions of this run without the #1654 fix
2868         # suggests that we're failing reliably even without explicit stalls,
2869         # probably because the servers are queried in a fixed order. So I'm
2870         # ok with relying upon that.
2871         d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2872                             "ran out of servers",
2873                             n.download_best_version)
2874         return d
2875
2876
2877 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2878
2879 TEST_1654_SH0 = """\
2880 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2881 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2882 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2883 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2884 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2885 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2886 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2887 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2888 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2889 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2890 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2891 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2892 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2893 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2894 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2895 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2896 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2897 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2898 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2899 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2900 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2901 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2902 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2903 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2904 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2905 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2906 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2907 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2908 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2909 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2910 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2911 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2912 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2913 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2914 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2915 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2916 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2917 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2918 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2919 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2920 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2921 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2922 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2923 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2924 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2925 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2926 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2927 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2928 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2929 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2930 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2931 72mXGlqyLyWYuAAAAAA="""
2932
2933 TEST_1654_SH1 = """\
2934 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2935 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2936 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2937 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2938 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2939 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2940 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2941 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2942 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2943 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2944 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2945 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2946 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2947 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2948 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2949 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2950 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2951 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2952 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2953 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2954 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2955 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2956 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2957 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2958 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2959 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2960 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2961 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2962 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2963 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2964 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2965 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2966 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2967 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2968 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2969 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2970 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2971 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2972 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2973 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2974 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2975 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2976 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2977 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2978 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2979 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2980 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2981 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2982 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2983 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2984 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2985 72mXGlqyLyWYuAAAAAA="""
2986
2987
2988 class FileHandle(unittest.TestCase):
2989     def setUp(self):
2990         self.test_data = "Test Data" * 50000
2991         self.sio = StringIO(self.test_data)
2992         self.uploadable = MutableFileHandle(self.sio)
2993
2994
2995     def test_filehandle_read(self):
2996         self.basedir = "mutable/FileHandle/test_filehandle_read"
2997         chunk_size = 10
2998         for i in xrange(0, len(self.test_data), chunk_size):
2999             data = self.uploadable.read(chunk_size)
3000             data = "".join(data)
3001             start = i
3002             end = i + chunk_size
3003             self.failUnlessEqual(data, self.test_data[start:end])
3004
3005
3006     def test_filehandle_get_size(self):
3007         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
3008         actual_size = len(self.test_data)
3009         size = self.uploadable.get_size()
3010         self.failUnlessEqual(size, actual_size)
3011
3012
3013     def test_filehandle_get_size_out_of_order(self):
3014         # We should be able to call get_size whenever we want without
3015         # disturbing the location of the seek pointer.
3016         chunk_size = 100
3017         data = self.uploadable.read(chunk_size)
3018         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3019
3020         # Now get the size.
3021         size = self.uploadable.get_size()
3022         self.failUnlessEqual(size, len(self.test_data))
3023
3024         # Now get more data. We should be right where we left off.
3025         more_data = self.uploadable.read(chunk_size)
3026         start = chunk_size
3027         end = chunk_size * 2
3028         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3029
3030
3031     def test_filehandle_file(self):
3032         # Make sure that the MutableFileHandle works on a file as well
3033         # as a StringIO object, since in some cases it will be asked to
3034         # deal with files.
3035         self.basedir = self.mktemp()
3036         # necessary? What am I doing wrong here?
3037         os.mkdir(self.basedir)
3038         f_path = os.path.join(self.basedir, "test_file")
3039         f = open(f_path, "w")
3040         f.write(self.test_data)
3041         f.close()
3042         f = open(f_path, "r")
3043
3044         uploadable = MutableFileHandle(f)
3045
3046         data = uploadable.read(len(self.test_data))
3047         self.failUnlessEqual("".join(data), self.test_data)
3048         size = uploadable.get_size()
3049         self.failUnlessEqual(size, len(self.test_data))
3050
3051
3052     def test_close(self):
3053         # Make sure that the MutableFileHandle closes its handle when
3054         # told to do so.
3055         self.uploadable.close()
3056         self.failUnless(self.sio.closed)
3057
3058
3059 class DataHandle(unittest.TestCase):
3060     def setUp(self):
3061         self.test_data = "Test Data" * 50000
3062         self.uploadable = MutableData(self.test_data)
3063
3064
3065     def test_datahandle_read(self):
3066         chunk_size = 10
3067         for i in xrange(0, len(self.test_data), chunk_size):
3068             data = self.uploadable.read(chunk_size)
3069             data = "".join(data)
3070             start = i
3071             end = i + chunk_size
3072             self.failUnlessEqual(data, self.test_data[start:end])
3073
3074
3075     def test_datahandle_get_size(self):
3076         actual_size = len(self.test_data)
3077         size = self.uploadable.get_size()
3078         self.failUnlessEqual(size, actual_size)
3079
3080
3081     def test_datahandle_get_size_out_of_order(self):
3082         # We should be able to call get_size whenever we want without
3083         # disturbing the location of the seek pointer.
3084         chunk_size = 100
3085         data = self.uploadable.read(chunk_size)
3086         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3087
3088         # Now get the size.
3089         size = self.uploadable.get_size()
3090         self.failUnlessEqual(size, len(self.test_data))
3091
3092         # Now get more data. We should be right where we left off.
3093         more_data = self.uploadable.read(chunk_size)
3094         start = chunk_size
3095         end = chunk_size * 2
3096         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3097
3098
3099 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3100               PublishMixin):
3101     def setUp(self):
3102         GridTestMixin.setUp(self)
3103         self.basedir = self.mktemp()
3104         self.set_up_grid()
3105         self.c = self.g.clients[0]
3106         self.nm = self.c.nodemaker
3107         self.data = "test data" * 100000 # about 900 KiB; MDMF
3108         self.small_data = "test data" * 10 # about 90 B; SDMF
3109
3110
3111     def do_upload_mdmf(self):
3112         d = self.nm.create_mutable_file(MutableData(self.data),
3113                                         version=MDMF_VERSION)
3114         def _then(n):
3115             assert isinstance(n, MutableFileNode)
3116             assert n._protocol_version == MDMF_VERSION
3117             self.mdmf_node = n
3118             return n
3119         d.addCallback(_then)
3120         return d
3121
3122     def do_upload_sdmf(self):
3123         d = self.nm.create_mutable_file(MutableData(self.small_data))
3124         def _then(n):
3125             assert isinstance(n, MutableFileNode)
3126             assert n._protocol_version == SDMF_VERSION
3127             self.sdmf_node = n
3128             return n
3129         d.addCallback(_then)
3130         return d
3131
3132     def do_upload_empty_sdmf(self):
3133         d = self.nm.create_mutable_file(MutableData(""))
3134         def _then(n):
3135             assert isinstance(n, MutableFileNode)
3136             self.sdmf_zero_length_node = n
3137             assert n._protocol_version == SDMF_VERSION
3138             return n
3139         d.addCallback(_then)
3140         return d
3141
3142     def do_upload(self):
3143         d = self.do_upload_mdmf()
3144         d.addCallback(lambda ign: self.do_upload_sdmf())
3145         return d
3146
3147     def test_debug(self):
3148         d = self.do_upload_mdmf()
3149         def _debug(n):
3150             fso = debug.FindSharesOptions()
3151             storage_index = base32.b2a(n.get_storage_index())
3152             fso.si_s = storage_index
3153             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3154                             for (i,ss,storedir)
3155                             in self.iterate_servers()]
3156             fso.stdout = StringIO()
3157             fso.stderr = StringIO()
3158             debug.find_shares(fso)
3159             sharefiles = fso.stdout.getvalue().splitlines()
3160             expected = self.nm.default_encoding_parameters["n"]
3161             self.failUnlessEqual(len(sharefiles), expected)
3162
3163             do = debug.DumpOptions()
3164             do["filename"] = sharefiles[0]
3165             do.stdout = StringIO()
3166             debug.dump_share(do)
3167             output = do.stdout.getvalue()
3168             lines = set(output.splitlines())
3169             self.failUnless("Mutable slot found:" in lines, output)
3170             self.failUnless(" share_type: MDMF" in lines, output)
3171             self.failUnless(" num_extra_leases: 0" in lines, output)
3172             self.failUnless(" MDMF contents:" in lines, output)
3173             self.failUnless("  seqnum: 1" in lines, output)
3174             self.failUnless("  required_shares: 3" in lines, output)
3175             self.failUnless("  total_shares: 10" in lines, output)
3176             self.failUnless("  segsize: 131073" in lines, output)
3177             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
3178             vcap = n.get_verify_cap().to_string()
3179             self.failUnless("  verify-cap: %s" % vcap in lines, output)
3180
3181             cso = debug.CatalogSharesOptions()
3182             cso.nodedirs = fso.nodedirs
3183             cso.stdout = StringIO()
3184             cso.stderr = StringIO()
3185             debug.catalog_shares(cso)
3186             shares = cso.stdout.getvalue().splitlines()
3187             oneshare = shares[0] # all shares should be MDMF
3188             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3189             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3190             fields = oneshare.split()
3191             self.failUnlessEqual(fields[0], "MDMF")
3192             self.failUnlessEqual(fields[1], storage_index)
3193             self.failUnlessEqual(fields[2], "3/10")
3194             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3195             self.failUnless(fields[4].startswith("#1:"), fields[3])
3196             # the rest of fields[4] is the roothash, which depends upon
3197             # encryption salts and is not constant. fields[5] is the
3198             # remaining time on the longest lease, which is timing dependent.
3199             # The rest of the line is the quoted pathname to the share.
3200         d.addCallback(_debug)
3201         return d
3202
3203     def test_get_sequence_number(self):
3204         d = self.do_upload()
3205         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3206         d.addCallback(lambda bv:
3207             self.failUnlessEqual(bv.get_sequence_number(), 1))
3208         d.addCallback(lambda ignored:
3209             self.sdmf_node.get_best_readable_version())
3210         d.addCallback(lambda bv:
3211             self.failUnlessEqual(bv.get_sequence_number(), 1))
3212         # Now update. The sequence number in both cases should be 1 in
3213         # both cases.
3214         def _do_update(ignored):
3215             new_data = MutableData("foo bar baz" * 100000)
3216             new_small_data = MutableData("foo bar baz" * 10)
3217             d1 = self.mdmf_node.overwrite(new_data)
3218             d2 = self.sdmf_node.overwrite(new_small_data)
3219             dl = gatherResults([d1, d2])
3220             return dl
3221         d.addCallback(_do_update)
3222         d.addCallback(lambda ignored:
3223             self.mdmf_node.get_best_readable_version())
3224         d.addCallback(lambda bv:
3225             self.failUnlessEqual(bv.get_sequence_number(), 2))
3226         d.addCallback(lambda ignored:
3227             self.sdmf_node.get_best_readable_version())
3228         d.addCallback(lambda bv:
3229             self.failUnlessEqual(bv.get_sequence_number(), 2))
3230         return d
3231
3232
3233     def test_cap_after_upload(self):
3234         # If we create a new mutable file and upload things to it, and
3235         # it's an MDMF file, we should get an MDMF cap back from that
3236         # file and should be able to use that.
3237         # That's essentially what MDMF node is, so just check that.
3238         d = self.do_upload_mdmf()
3239         def _then(ign):
3240             mdmf_uri = self.mdmf_node.get_uri()
3241             cap = uri.from_string(mdmf_uri)
3242             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3243             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3244             cap = uri.from_string(readonly_mdmf_uri)
3245             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3246         d.addCallback(_then)
3247         return d
3248
3249     def test_mutable_version(self):
3250         # assert that getting parameters from the IMutableVersion object
3251         # gives us the same data as getting them from the filenode itself
3252         d = self.do_upload()
3253         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3254         def _check_mdmf(bv):
3255             n = self.mdmf_node
3256             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3257             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3258             self.failIf(bv.is_readonly())
3259         d.addCallback(_check_mdmf)
3260         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3261         def _check_sdmf(bv):
3262             n = self.sdmf_node
3263             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3264             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3265             self.failIf(bv.is_readonly())
3266         d.addCallback(_check_sdmf)
3267         return d
3268
3269
3270     def test_get_readonly_version(self):
3271         d = self.do_upload()
3272         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3273         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3274
3275         # Attempting to get a mutable version of a mutable file from a
3276         # filenode initialized with a readcap should return a readonly
3277         # version of that same node.
3278         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3279         d.addCallback(lambda ro: ro.get_best_mutable_version())
3280         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3281
3282         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3283         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3284
3285         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3286         d.addCallback(lambda ro: ro.get_best_mutable_version())
3287         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3288         return d
3289
3290
3291     def test_toplevel_overwrite(self):
3292         new_data = MutableData("foo bar baz" * 100000)
3293         new_small_data = MutableData("foo bar baz" * 10)
3294         d = self.do_upload()
3295         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3296         d.addCallback(lambda ignored:
3297             self.mdmf_node.download_best_version())
3298         d.addCallback(lambda data:
3299             self.failUnlessEqual(data, "foo bar baz" * 100000))
3300         d.addCallback(lambda ignored:
3301             self.sdmf_node.overwrite(new_small_data))
3302         d.addCallback(lambda ignored:
3303             self.sdmf_node.download_best_version())
3304         d.addCallback(lambda data:
3305             self.failUnlessEqual(data, "foo bar baz" * 10))
3306         return d
3307
3308
3309     def test_toplevel_modify(self):
3310         d = self.do_upload()
3311         def modifier(old_contents, servermap, first_time):
3312             return old_contents + "modified"
3313         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3314         d.addCallback(lambda ignored:
3315             self.mdmf_node.download_best_version())
3316         d.addCallback(lambda data:
3317             self.failUnlessIn("modified", data))
3318         d.addCallback(lambda ignored:
3319             self.sdmf_node.modify(modifier))
3320         d.addCallback(lambda ignored:
3321             self.sdmf_node.download_best_version())
3322         d.addCallback(lambda data:
3323             self.failUnlessIn("modified", data))
3324         return d
3325
3326
3327     def test_version_modify(self):
3328         # TODO: When we can publish multiple versions, alter this test
3329         # to modify a version other than the best usable version, then
3330         # test to see that the best recoverable version is that.
3331         d = self.do_upload()
3332         def modifier(old_contents, servermap, first_time):
3333             return old_contents + "modified"
3334         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3335         d.addCallback(lambda ignored:
3336             self.mdmf_node.download_best_version())
3337         d.addCallback(lambda data:
3338             self.failUnlessIn("modified", data))
3339         d.addCallback(lambda ignored:
3340             self.sdmf_node.modify(modifier))
3341         d.addCallback(lambda ignored:
3342             self.sdmf_node.download_best_version())
3343         d.addCallback(lambda data:
3344             self.failUnlessIn("modified", data))
3345         return d
3346
3347
3348     def test_download_version(self):
3349         d = self.publish_multiple()
3350         # We want to have two recoverable versions on the grid.
3351         d.addCallback(lambda res:
3352                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3353                                           1:1,3:1,5:1,7:1,9:1}))
3354         # Now try to download each version. We should get the plaintext
3355         # associated with that version.
3356         d.addCallback(lambda ignored:
3357             self._fn.get_servermap(mode=MODE_READ))
3358         def _got_servermap(smap):
3359             versions = smap.recoverable_versions()
3360             assert len(versions) == 2
3361
3362             self.servermap = smap
3363             self.version1, self.version2 = versions
3364             assert self.version1 != self.version2
3365
3366             self.version1_seqnum = self.version1[0]
3367             self.version2_seqnum = self.version2[0]
3368             self.version1_index = self.version1_seqnum - 1
3369             self.version2_index = self.version2_seqnum - 1
3370
3371         d.addCallback(_got_servermap)
3372         d.addCallback(lambda ignored:
3373             self._fn.download_version(self.servermap, self.version1))
3374         d.addCallback(lambda results:
3375             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3376                                  results))
3377         d.addCallback(lambda ignored:
3378             self._fn.download_version(self.servermap, self.version2))
3379         d.addCallback(lambda results:
3380             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3381                                  results))
3382         return d
3383
3384
3385     def test_download_nonexistent_version(self):
3386         d = self.do_upload_mdmf()
3387         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3388         def _set_servermap(servermap):
3389             self.servermap = servermap
3390         d.addCallback(_set_servermap)
3391         d.addCallback(lambda ignored:
3392            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3393                            None,
3394                            self.mdmf_node.download_version, self.servermap,
3395                            "not a version"))
3396         return d
3397
3398
3399     def test_partial_read(self):
3400         d = self.do_upload_mdmf()
3401         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3402         modes = [("start_on_segment_boundary",
3403                   mathutil.next_multiple(128 * 1024, 3), 50),
3404                  ("ending_one_byte_after_segment_boundary",
3405                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3406                  ("zero_length_at_start", 0, 0),
3407                  ("zero_length_in_middle", 50, 0),
3408                  ("zero_length_at_segment_boundary",
3409                   mathutil.next_multiple(128 * 1024, 3), 0),
3410                  ]
3411         for (name, offset, length) in modes:
3412             d.addCallback(self._do_partial_read, name, offset, length)
3413         # then read only a few bytes at a time, and see that the results are
3414         # what we expect.
3415         def _read_data(version):
3416             c = consumer.MemoryConsumer()
3417             d2 = defer.succeed(None)
3418             for i in xrange(0, len(self.data), 10000):
3419                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3420             d2.addCallback(lambda ignored:
3421                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3422             return d2
3423         d.addCallback(_read_data)
3424         return d
3425     def _do_partial_read(self, version, name, offset, length):
3426         c = consumer.MemoryConsumer()
3427         d = version.read(c, offset, length)
3428         expected = self.data[offset:offset+length]
3429         d.addCallback(lambda ignored: "".join(c.chunks))
3430         def _check(results):
3431             if results != expected:
3432                 print
3433                 print "got: %s ... %s" % (results[:20], results[-20:])
3434                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3435                 self.fail("results[%s] != expected" % name)
3436             return version # daisy-chained to next call
3437         d.addCallback(_check)
3438         return d
3439
3440
3441     def _test_read_and_download(self, node, expected):
3442         d = node.get_best_readable_version()
3443         def _read_data(version):
3444             c = consumer.MemoryConsumer()
3445             d2 = defer.succeed(None)
3446             d2.addCallback(lambda ignored: version.read(c))
3447             d2.addCallback(lambda ignored:
3448                 self.failUnlessEqual(expected, "".join(c.chunks)))
3449             return d2
3450         d.addCallback(_read_data)
3451         d.addCallback(lambda ignored: node.download_best_version())
3452         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3453         return d
3454
3455     def test_read_and_download_mdmf(self):
3456         d = self.do_upload_mdmf()
3457         d.addCallback(self._test_read_and_download, self.data)
3458         return d
3459
3460     def test_read_and_download_sdmf(self):
3461         d = self.do_upload_sdmf()
3462         d.addCallback(self._test_read_and_download, self.small_data)
3463         return d
3464
3465     def test_read_and_download_sdmf_zero_length(self):
3466         d = self.do_upload_empty_sdmf()
3467         d.addCallback(self._test_read_and_download, "")
3468         return d
3469
3470
3471 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3472     timeout = 400 # these tests are too big, 120s is not enough on slow
3473                   # platforms
3474     def setUp(self):
3475         GridTestMixin.setUp(self)
3476         self.basedir = self.mktemp()
3477         self.set_up_grid()
3478         self.c = self.g.clients[0]
3479         self.nm = self.c.nodemaker
3480         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3481         self.small_data = "test data" * 10 # about 90 B; SDMF
3482
3483
3484     def do_upload_sdmf(self):
3485         d = self.nm.create_mutable_file(MutableData(self.small_data))
3486         def _then(n):
3487             assert isinstance(n, MutableFileNode)
3488             self.sdmf_node = n
3489             # Make SDMF node that has 255 shares.
3490             self.nm.default_encoding_parameters['n'] = 255
3491             self.nm.default_encoding_parameters['k'] = 127
3492             return self.nm.create_mutable_file(MutableData(self.small_data))
3493         d.addCallback(_then)
3494         def _then2(n):
3495             assert isinstance(n, MutableFileNode)
3496             self.sdmf_max_shares_node = n
3497         d.addCallback(_then2)
3498         return d
3499
3500     def do_upload_mdmf(self):
3501         d = self.nm.create_mutable_file(MutableData(self.data),
3502                                         version=MDMF_VERSION)
3503         def _then(n):
3504             assert isinstance(n, MutableFileNode)
3505             self.mdmf_node = n
3506             # Make MDMF node that has 255 shares.
3507             self.nm.default_encoding_parameters['n'] = 255
3508             self.nm.default_encoding_parameters['k'] = 127
3509             return self.nm.create_mutable_file(MutableData(self.data),
3510                                                version=MDMF_VERSION)
3511         d.addCallback(_then)
3512         def _then2(n):
3513             assert isinstance(n, MutableFileNode)
3514             self.mdmf_max_shares_node = n
3515         d.addCallback(_then2)
3516         return d
3517
3518     def _test_replace(self, offset, new_data):
3519         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3520         d0 = self.do_upload_mdmf()
3521         def _run(ign):
3522             d = defer.succeed(None)
3523             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3524                 # close over 'node'.
3525                 d.addCallback(lambda ign, node=node:
3526                               node.get_best_mutable_version())
3527                 d.addCallback(lambda mv:
3528                               mv.update(MutableData(new_data), offset))
3529                 d.addCallback(lambda ign, node=node:
3530                               node.download_best_version())
3531                 def _check(results):
3532                     if results != expected:
3533                         print
3534                         print "got: %s ... %s" % (results[:20], results[-20:])
3535                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3536                         self.fail("results != expected")
3537                 d.addCallback(_check)
3538             return d
3539         d0.addCallback(_run)
3540         return d0
3541
3542     def test_append(self):
3543         # We should be able to append data to a mutable file and get
3544         # what we expect.
3545         return self._test_replace(len(self.data), "appended")
3546
3547     def test_replace_middle(self):
3548         # We should be able to replace data in the middle of a mutable
3549         # file and get what we expect back.
3550         return self._test_replace(100, "replaced")
3551
3552     def test_replace_beginning(self):
3553         # We should be able to replace data at the beginning of the file
3554         # without truncating the file
3555         return self._test_replace(0, "beginning")
3556
3557     def test_replace_segstart1(self):
3558         return self._test_replace(128*1024+1, "NNNN")
3559
3560     def test_replace_zero_length_beginning(self):
3561         return self._test_replace(0, "")
3562
3563     def test_replace_zero_length_middle(self):
3564         return self._test_replace(50, "")
3565
3566     def test_replace_zero_length_segstart1(self):
3567         return self._test_replace(128*1024+1, "")
3568
3569     def test_replace_and_extend(self):
3570         # We should be able to replace data in the middle of a mutable
3571         # file and extend that mutable file and get what we expect.
3572         return self._test_replace(100, "modified " * 100000)
3573
3574
3575     def _check_differences(self, got, expected):
3576         # displaying arbitrary file corruption is tricky for a
3577         # 1MB file of repeating data,, so look for likely places
3578         # with problems and display them separately
3579         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3580         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3581         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3582                     for (start,end) in gotmods]
3583         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3584                     for (start,end) in expmods]
3585         #print "expecting: %s" % expspans
3586
3587         SEGSIZE = 128*1024
3588         if got != expected:
3589             print "differences:"
3590             for segnum in range(len(expected)//SEGSIZE):
3591                 start = segnum * SEGSIZE
3592                 end = (segnum+1) * SEGSIZE
3593                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3594                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3595                 if got_ends != exp_ends:
3596                     print "expected[%d]: %s" % (start, exp_ends)
3597                     print "got     [%d]: %s" % (start, got_ends)
3598             if expspans != gotspans:
3599                 print "expected: %s" % expspans
3600                 print "got     : %s" % gotspans
3601             open("EXPECTED","wb").write(expected)
3602             open("GOT","wb").write(got)
3603             print "wrote data to EXPECTED and GOT"
3604             self.fail("didn't get expected data")
3605
3606
3607     def test_replace_locations(self):
3608         # exercise fencepost conditions
3609         SEGSIZE = 128*1024
3610         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3611         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3612         d0 = self.do_upload_mdmf()
3613         def _run(ign):
3614             expected = self.data
3615             d = defer.succeed(None)
3616             for offset in suspects:
3617                 new_data = letters.next()*2 # "AA", then "BB", etc
3618                 expected = expected[:offset]+new_data+expected[offset+2:]
3619                 d.addCallback(lambda ign:
3620                               self.mdmf_node.get_best_mutable_version())
3621                 def _modify(mv, offset=offset, new_data=new_data):
3622                     # close over 'offset','new_data'
3623                     md = MutableData(new_data)
3624                     return mv.update(md, offset)
3625                 d.addCallback(_modify)
3626                 d.addCallback(lambda ignored:
3627                               self.mdmf_node.download_best_version())
3628                 d.addCallback(self._check_differences, expected)
3629             return d
3630         d0.addCallback(_run)
3631         return d0
3632
3633     def test_replace_locations_max_shares(self):
3634         # exercise fencepost conditions
3635         SEGSIZE = 128*1024
3636         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3637         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3638         d0 = self.do_upload_mdmf()
3639         def _run(ign):
3640             expected = self.data
3641             d = defer.succeed(None)
3642             for offset in suspects:
3643                 new_data = letters.next()*2 # "AA", then "BB", etc
3644                 expected = expected[:offset]+new_data+expected[offset+2:]
3645                 d.addCallback(lambda ign:
3646                               self.mdmf_max_shares_node.get_best_mutable_version())
3647                 def _modify(mv, offset=offset, new_data=new_data):
3648                     # close over 'offset','new_data'
3649                     md = MutableData(new_data)
3650                     return mv.update(md, offset)
3651                 d.addCallback(_modify)
3652                 d.addCallback(lambda ignored:
3653                               self.mdmf_max_shares_node.download_best_version())
3654                 d.addCallback(self._check_differences, expected)
3655             return d
3656         d0.addCallback(_run)
3657         return d0
3658
3659
3660     def test_append_power_of_two(self):
3661         # If we attempt to extend a mutable file so that its segment
3662         # count crosses a power-of-two boundary, the update operation
3663         # should know how to reencode the file.
3664
3665         # Note that the data populating self.mdmf_node is about 900 KiB
3666         # long -- this is 7 segments in the default segment size. So we
3667         # need to add 2 segments worth of data to push it over a
3668         # power-of-two boundary.
3669         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3670         new_data = self.data + (segment * 2)
3671         d0 = self.do_upload_mdmf()
3672         def _run(ign):
3673             d = defer.succeed(None)
3674             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3675                 # close over 'node'.
3676                 d.addCallback(lambda ign, node=node:
3677                               node.get_best_mutable_version())
3678                 d.addCallback(lambda mv:
3679                               mv.update(MutableData(segment * 2), len(self.data)))
3680                 d.addCallback(lambda ign, node=node:
3681                               node.download_best_version())
3682                 d.addCallback(lambda results:
3683                               self.failUnlessEqual(results, new_data))
3684             return d
3685         d0.addCallback(_run)
3686         return d0
3687
3688     def test_update_sdmf(self):
3689         # Running update on a single-segment file should still work.
3690         new_data = self.small_data + "appended"
3691         d0 = self.do_upload_sdmf()
3692         def _run(ign):
3693             d = defer.succeed(None)
3694             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3695                 # close over 'node'.
3696                 d.addCallback(lambda ign, node=node:
3697                               node.get_best_mutable_version())
3698                 d.addCallback(lambda mv:
3699                               mv.update(MutableData("appended"), len(self.small_data)))
3700                 d.addCallback(lambda ign, node=node:
3701                               node.download_best_version())
3702                 d.addCallback(lambda results:
3703                               self.failUnlessEqual(results, new_data))
3704             return d
3705         d0.addCallback(_run)
3706         return d0
3707
3708     def test_replace_in_last_segment(self):
3709         # The wrapper should know how to handle the tail segment
3710         # appropriately.
3711         replace_offset = len(self.data) - 100
3712         new_data = self.data[:replace_offset] + "replaced"
3713         rest_offset = replace_offset + len("replaced")
3714         new_data += self.data[rest_offset:]
3715         d0 = self.do_upload_mdmf()
3716         def _run(ign):
3717             d = defer.succeed(None)
3718             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3719                 # close over 'node'.
3720                 d.addCallback(lambda ign, node=node:
3721                               node.get_best_mutable_version())
3722                 d.addCallback(lambda mv:
3723                               mv.update(MutableData("replaced"), replace_offset))
3724                 d.addCallback(lambda ign, node=node:
3725                               node.download_best_version())
3726                 d.addCallback(lambda results:
3727                               self.failUnlessEqual(results, new_data))
3728             return d
3729         d0.addCallback(_run)
3730         return d0
3731
3732     def test_multiple_segment_replace(self):
3733         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3734         new_data = self.data[:replace_offset]
3735         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3736         new_data += 2 * new_segment
3737         new_data += "replaced"
3738         rest_offset = len(new_data)
3739         new_data += self.data[rest_offset:]
3740         d0 = self.do_upload_mdmf()
3741         def _run(ign):
3742             d = defer.succeed(None)
3743             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3744                 # close over 'node'.
3745                 d.addCallback(lambda ign, node=node:
3746                               node.get_best_mutable_version())
3747                 d.addCallback(lambda mv:
3748                               mv.update(MutableData((2 * new_segment) + "replaced"),
3749                                         replace_offset))
3750                 d.addCallback(lambda ignored, node=node:
3751                               node.download_best_version())
3752                 d.addCallback(lambda results:
3753                               self.failUnlessEqual(results, new_data))
3754             return d
3755         d0.addCallback(_run)
3756         return d0
3757
3758 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3759     sdmf_old_shares = {}
3760     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3761     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3762     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3763     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3764     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3765     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3766     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3767     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3768     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3769     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3770     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3771     sdmf_old_contents = "This is a test file.\n"
3772     def copy_sdmf_shares(self):
3773         # We'll basically be short-circuiting the upload process.
3774         servernums = self.g.servers_by_number.keys()
3775         assert len(servernums) == 10
3776
3777         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3778         # Get the storage index.
3779         cap = uri.from_string(self.sdmf_old_cap)
3780         si = cap.get_storage_index()
3781
3782         # Now execute each assignment by writing the storage.
3783         for (share, servernum) in assignments:
3784             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3785             storedir = self.get_serverdir(servernum)
3786             storage_path = os.path.join(storedir, "shares",
3787                                         storage_index_to_dir(si))
3788             fileutil.make_dirs(storage_path)
3789             fileutil.write(os.path.join(storage_path, "%d" % share),
3790                            sharedata)
3791         # ...and verify that the shares are there.
3792         shares = self.find_uri_shares(self.sdmf_old_cap)
3793         assert len(shares) == 10
3794
3795     def test_new_downloader_can_read_old_shares(self):
3796         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3797         self.set_up_grid()
3798         self.copy_sdmf_shares()
3799         nm = self.g.clients[0].nodemaker
3800         n = nm.create_from_cap(self.sdmf_old_cap)
3801         d = n.download_best_version()
3802         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3803         return d
3804
3805 class DifferentEncoding(unittest.TestCase):
3806     def setUp(self):
3807         self._storage = s = FakeStorage()
3808         self.nodemaker = make_nodemaker(s)
3809
3810     def test_filenode(self):
3811         # create a file with 3-of-20, then modify it with a client configured
3812         # to do 3-of-10. #1510 tracks a failure here
3813         self.nodemaker.default_encoding_parameters["n"] = 20
3814         d = self.nodemaker.create_mutable_file("old contents")
3815         def _created(n):
3816             filecap = n.get_cap().to_string()
3817             del n # we want a new object, not the cached one
3818             self.nodemaker.default_encoding_parameters["n"] = 10
3819             n2 = self.nodemaker.create_from_cap(filecap)
3820             return n2
3821         d.addCallback(_created)
3822         def modifier(old_contents, servermap, first_time):
3823             return "new contents"
3824         d.addCallback(lambda n: n.modify(modifier))
3825         return d