]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
Improvements to test refs #1742
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1 import os, re, base64
2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9      ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
22
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import \
25      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27      NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
30                                       MutableData, \
31                                       DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
35
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39      PausingAndStoppingConsumer, StoppingConsumer, \
40      ImmediatelyStoppingConsumer
41
42 def eventuaaaaaly(res=None):
43     d = fireEventually(res)
44     d.addCallback(fireEventually)
45     d.addCallback(fireEventually)
46     return d
47
48
49 # this "FakeStorage" exists to put the share data in RAM and avoid using real
50 # network connections, both to speed up the tests and to reduce the amount of
51 # non-mutable.py code being exercised.
52
53 class FakeStorage:
54     # this class replaces the collection of storage servers, allowing the
55     # tests to examine and manipulate the published shares. It also lets us
56     # control the order in which read queries are answered, to exercise more
57     # of the error-handling code in Retrieve .
58     #
59     # Note that we ignore the storage index: this FakeStorage instance can
60     # only be used for a single storage index.
61
62
63     def __init__(self):
64         self._peers = {}
65         # _sequence is used to cause the responses to occur in a specific
66         # order. If it is in use, then we will defer queries instead of
67         # answering them right away, accumulating the Deferreds in a dict. We
68         # don't know exactly how many queries we'll get, so exactly one
69         # second after the first query arrives, we will release them all (in
70         # order).
71         self._sequence = None
72         self._pending = {}
73         self._pending_timer = None
74
75     def read(self, peerid, storage_index):
76         shares = self._peers.get(peerid, {})
77         if self._sequence is None:
78             return eventuaaaaaly(shares)
79         d = defer.Deferred()
80         if not self._pending:
81             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
82         if peerid not in self._pending:
83             self._pending[peerid] = []
84         self._pending[peerid].append( (d, shares) )
85         return d
86
87     def _fire_readers(self):
88         self._pending_timer = None
89         pending = self._pending
90         self._pending = {}
91         for peerid in self._sequence:
92             if peerid in pending:
93                 for (d, shares) in pending.pop(peerid):
94                     eventually(d.callback, shares)
95         for peerid in pending:
96             for (d, shares) in pending[peerid]:
97                 eventually(d.callback, shares)
98
99     def write(self, peerid, storage_index, shnum, offset, data):
100         if peerid not in self._peers:
101             self._peers[peerid] = {}
102         shares = self._peers[peerid]
103         f = StringIO()
104         f.write(shares.get(shnum, ""))
105         f.seek(offset)
106         f.write(data)
107         shares[shnum] = f.getvalue()
108
109
110 class FakeStorageServer:
111     def __init__(self, peerid, storage):
112         self.peerid = peerid
113         self.storage = storage
114         self.queries = 0
115     def callRemote(self, methname, *args, **kwargs):
116         self.queries += 1
117         def _call():
118             meth = getattr(self, methname)
119             return meth(*args, **kwargs)
120         d = fireEventually()
121         d.addCallback(lambda res: _call())
122         return d
123
124     def callRemoteOnly(self, methname, *args, **kwargs):
125         self.queries += 1
126         d = self.callRemote(methname, *args, **kwargs)
127         d.addBoth(lambda ignore: None)
128         pass
129
130     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
131         pass
132
133     def slot_readv(self, storage_index, shnums, readv):
134         d = self.storage.read(self.peerid, storage_index)
135         def _read(shares):
136             response = {}
137             for shnum in shares:
138                 if shnums and shnum not in shnums:
139                     continue
140                 vector = response[shnum] = []
141                 for (offset, length) in readv:
142                     assert isinstance(offset, (int, long)), offset
143                     assert isinstance(length, (int, long)), length
144                     vector.append(shares[shnum][offset:offset+length])
145             return response
146         d.addCallback(_read)
147         return d
148
149     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
150                                         tw_vectors, read_vector):
151         # always-pass: parrot the test vectors back to them.
152         readv = {}
153         for shnum, (testv, writev, new_length) in tw_vectors.items():
154             for (offset, length, op, specimen) in testv:
155                 assert op in ("le", "eq", "ge")
156             # TODO: this isn't right, the read is controlled by read_vector,
157             # not by testv
158             readv[shnum] = [ specimen
159                              for (offset, length, op, specimen)
160                              in testv ]
161             for (offset, data) in writev:
162                 self.storage.write(self.peerid, storage_index, shnum,
163                                    offset, data)
164         answer = (True, readv)
165         return fireEventually(answer)
166
167
168 def flip_bit(original, byte_offset):
169     return (original[:byte_offset] +
170             chr(ord(original[byte_offset]) ^ 0x01) +
171             original[byte_offset+1:])
172
173 def add_two(original, byte_offset):
174     # It isn't enough to simply flip the bit for the version number,
175     # because 1 is a valid version number. So we add two instead.
176     return (original[:byte_offset] +
177             chr(ord(original[byte_offset]) ^ 0x02) +
178             original[byte_offset+1:])
179
180 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
181     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
182     # list of shnums to corrupt.
183     ds = []
184     for peerid in s._peers:
185         shares = s._peers[peerid]
186         for shnum in shares:
187             if (shnums_to_corrupt is not None
188                 and shnum not in shnums_to_corrupt):
189                 continue
190             data = shares[shnum]
191             # We're feeding the reader all of the share data, so it
192             # won't need to use the rref that we didn't provide, nor the
193             # storage index that we didn't provide. We do this because
194             # the reader will work for both MDMF and SDMF.
195             reader = MDMFSlotReadProxy(None, None, shnum, data)
196             # We need to get the offsets for the next part.
197             d = reader.get_verinfo()
198             def _do_corruption(verinfo, data, shnum, shares):
199                 (seqnum,
200                  root_hash,
201                  IV,
202                  segsize,
203                  datalen,
204                  k, n, prefix, o) = verinfo
205                 if isinstance(offset, tuple):
206                     offset1, offset2 = offset
207                 else:
208                     offset1 = offset
209                     offset2 = 0
210                 if offset1 == "pubkey" and IV:
211                     real_offset = 107
212                 elif offset1 in o:
213                     real_offset = o[offset1]
214                 else:
215                     real_offset = offset1
216                 real_offset = int(real_offset) + offset2 + offset_offset
217                 assert isinstance(real_offset, int), offset
218                 if offset1 == 0: # verbyte
219                     f = add_two
220                 else:
221                     f = flip_bit
222                 shares[shnum] = f(data, real_offset)
223             d.addCallback(_do_corruption, data, shnum, shares)
224             ds.append(d)
225     dl = defer.DeferredList(ds)
226     dl.addCallback(lambda ignored: res)
227     return dl
228
229 def make_storagebroker(s=None, num_peers=10):
230     if not s:
231         s = FakeStorage()
232     peerids = [tagged_hash("peerid", "%d" % i)[:20]
233                for i in range(num_peers)]
234     storage_broker = StorageFarmBroker(None, True)
235     for peerid in peerids:
236         fss = FakeStorageServer(peerid, s)
237         ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
238                "permutation-seed-base32": base32.b2a(peerid) }
239         storage_broker.test_add_rref(peerid, fss, ann)
240     return storage_broker
241
242 def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
243     storage_broker = make_storagebroker(s, num_peers)
244     sh = client.SecretHolder("lease secret", "convergence secret")
245     keygen = client.KeyGenerator()
246     if keysize:
247         keygen.set_default_keysize(keysize)
248     nodemaker = NodeMaker(storage_broker, sh, None,
249                           None, None,
250                           {"k": 3, "n": 10}, SDMF_VERSION, keygen)
251     return nodemaker
252
253 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
254     # this used to be in Publish, but we removed the limit. Some of
255     # these tests test whether the new code correctly allows files
256     # larger than the limit.
257     OLD_MAX_SEGMENT_SIZE = 3500000
258     def setUp(self):
259         self._storage = s = FakeStorage()
260         self.nodemaker = make_nodemaker(s)
261
262     def test_create(self):
263         d = self.nodemaker.create_mutable_file()
264         def _created(n):
265             self.failUnless(isinstance(n, MutableFileNode))
266             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267             sb = self.nodemaker.storage_broker
268             peer0 = sorted(sb.get_all_serverids())[0]
269             shnums = self._storage._peers[peer0].keys()
270             self.failUnlessEqual(len(shnums), 1)
271         d.addCallback(_created)
272         return d
273
274
275     def test_create_mdmf(self):
276         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
277         def _created(n):
278             self.failUnless(isinstance(n, MutableFileNode))
279             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
280             sb = self.nodemaker.storage_broker
281             peer0 = sorted(sb.get_all_serverids())[0]
282             shnums = self._storage._peers[peer0].keys()
283             self.failUnlessEqual(len(shnums), 1)
284         d.addCallback(_created)
285         return d
286
287     def test_single_share(self):
288         # Make sure that we tolerate publishing a single share.
289         self.nodemaker.default_encoding_parameters['k'] = 1
290         self.nodemaker.default_encoding_parameters['happy'] = 1
291         self.nodemaker.default_encoding_parameters['n'] = 1
292         d = defer.succeed(None)
293         for v in (SDMF_VERSION, MDMF_VERSION):
294             d.addCallback(lambda ignored, v=v:
295                 self.nodemaker.create_mutable_file(version=v))
296             def _created(n):
297                 self.failUnless(isinstance(n, MutableFileNode))
298                 self._node = n
299                 return n
300             d.addCallback(_created)
301             d.addCallback(lambda n:
302                 n.overwrite(MutableData("Contents" * 50000)))
303             d.addCallback(lambda ignored:
304                 self._node.download_best_version())
305             d.addCallback(lambda contents:
306                 self.failUnlessEqual(contents, "Contents" * 50000))
307         return d
308
309     def test_max_shares(self):
310         self.nodemaker.default_encoding_parameters['n'] = 255
311         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
312         def _created(n):
313             self.failUnless(isinstance(n, MutableFileNode))
314             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
315             sb = self.nodemaker.storage_broker
316             num_shares = sum([len(self._storage._peers[x].keys()) for x \
317                               in sb.get_all_serverids()])
318             self.failUnlessEqual(num_shares, 255)
319             self._node = n
320             return n
321         d.addCallback(_created)
322         # Now we upload some contents
323         d.addCallback(lambda n:
324             n.overwrite(MutableData("contents" * 50000)))
325         # ...then download contents
326         d.addCallback(lambda ignored:
327             self._node.download_best_version())
328         # ...and check to make sure everything went okay.
329         d.addCallback(lambda contents:
330             self.failUnlessEqual("contents" * 50000, contents))
331         return d
332
333     def test_max_shares_mdmf(self):
334         # Test how files behave when there are 255 shares.
335         self.nodemaker.default_encoding_parameters['n'] = 255
336         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
337         def _created(n):
338             self.failUnless(isinstance(n, MutableFileNode))
339             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
340             sb = self.nodemaker.storage_broker
341             num_shares = sum([len(self._storage._peers[x].keys()) for x \
342                               in sb.get_all_serverids()])
343             self.failUnlessEqual(num_shares, 255)
344             self._node = n
345             return n
346         d.addCallback(_created)
347         d.addCallback(lambda n:
348             n.overwrite(MutableData("contents" * 50000)))
349         d.addCallback(lambda ignored:
350             self._node.download_best_version())
351         d.addCallback(lambda contents:
352             self.failUnlessEqual(contents, "contents" * 50000))
353         return d
354
355     def test_mdmf_filenode_cap(self):
356         # Test that an MDMF filenode, once created, returns an MDMF URI.
357         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
358         def _created(n):
359             self.failUnless(isinstance(n, MutableFileNode))
360             cap = n.get_cap()
361             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
362             rcap = n.get_readcap()
363             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
364             vcap = n.get_verify_cap()
365             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
366         d.addCallback(_created)
367         return d
368
369
370     def test_create_from_mdmf_writecap(self):
371         # Test that the nodemaker is capable of creating an MDMF
372         # filenode given an MDMF cap.
373         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
374         def _created(n):
375             self.failUnless(isinstance(n, MutableFileNode))
376             s = n.get_uri()
377             self.failUnless(s.startswith("URI:MDMF"))
378             n2 = self.nodemaker.create_from_cap(s)
379             self.failUnless(isinstance(n2, MutableFileNode))
380             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
381             self.failUnlessEqual(n.get_uri(), n2.get_uri())
382         d.addCallback(_created)
383         return d
384
385
386     def test_create_from_mdmf_readcap(self):
387         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
388         def _created(n):
389             self.failUnless(isinstance(n, MutableFileNode))
390             s = n.get_readonly_uri()
391             n2 = self.nodemaker.create_from_cap(s)
392             self.failUnless(isinstance(n2, MutableFileNode))
393
394             # Check that it's a readonly node
395             self.failUnless(n2.is_readonly())
396         d.addCallback(_created)
397         return d
398
399
400     def test_internal_version_from_cap(self):
401         # MutableFileNodes and MutableFileVersions have an internal
402         # switch that tells them whether they're dealing with an SDMF or
403         # MDMF mutable file when they start doing stuff. We want to make
404         # sure that this is set appropriately given an MDMF cap.
405         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
406         def _created(n):
407             self.uri = n.get_uri()
408             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
409
410             n2 = self.nodemaker.create_from_cap(self.uri)
411             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
412         d.addCallback(_created)
413         return d
414
415
416     def test_serialize(self):
417         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
418         calls = []
419         def _callback(*args, **kwargs):
420             self.failUnlessEqual(args, (4,) )
421             self.failUnlessEqual(kwargs, {"foo": 5})
422             calls.append(1)
423             return 6
424         d = n._do_serialized(_callback, 4, foo=5)
425         def _check_callback(res):
426             self.failUnlessEqual(res, 6)
427             self.failUnlessEqual(calls, [1])
428         d.addCallback(_check_callback)
429
430         def _errback():
431             raise ValueError("heya")
432         d.addCallback(lambda res:
433                       self.shouldFail(ValueError, "_check_errback", "heya",
434                                       n._do_serialized, _errback))
435         return d
436
437     def test_upload_and_download(self):
438         d = self.nodemaker.create_mutable_file()
439         def _created(n):
440             d = defer.succeed(None)
441             d.addCallback(lambda res: n.get_servermap(MODE_READ))
442             d.addCallback(lambda smap: smap.dump(StringIO()))
443             d.addCallback(lambda sio:
444                           self.failUnless("3-of-10" in sio.getvalue()))
445             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
446             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
447             d.addCallback(lambda res: n.download_best_version())
448             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
449             d.addCallback(lambda res: n.get_size_of_best_version())
450             d.addCallback(lambda size:
451                           self.failUnlessEqual(size, len("contents 1")))
452             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
453             d.addCallback(lambda res: n.download_best_version())
454             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
455             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
456             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
457             d.addCallback(lambda res: n.download_best_version())
458             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
459             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
460             d.addCallback(lambda smap:
461                           n.download_version(smap,
462                                              smap.best_recoverable_version()))
463             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
464             # test a file that is large enough to overcome the
465             # mapupdate-to-retrieve data caching (i.e. make the shares larger
466             # than the default readsize, which is 2000 bytes). A 15kB file
467             # will have 5kB shares.
468             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
469             d.addCallback(lambda res: n.download_best_version())
470             d.addCallback(lambda res:
471                           self.failUnlessEqual(res, "large size file" * 1000))
472             return d
473         d.addCallback(_created)
474         return d
475
476
477     def test_upload_and_download_mdmf(self):
478         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
479         def _created(n):
480             d = defer.succeed(None)
481             d.addCallback(lambda ignored:
482                 n.get_servermap(MODE_READ))
483             def _then(servermap):
484                 dumped = servermap.dump(StringIO())
485                 self.failUnlessIn("3-of-10", dumped.getvalue())
486             d.addCallback(_then)
487             # Now overwrite the contents with some new contents. We want
488             # to make them big enough to force the file to be uploaded
489             # in more than one segment.
490             big_contents = "contents1" * 100000 # about 900 KiB
491             big_contents_uploadable = MutableData(big_contents)
492             d.addCallback(lambda ignored:
493                 n.overwrite(big_contents_uploadable))
494             d.addCallback(lambda ignored:
495                 n.download_best_version())
496             d.addCallback(lambda data:
497                 self.failUnlessEqual(data, big_contents))
498             # Overwrite the contents again with some new contents. As
499             # before, they need to be big enough to force multiple
500             # segments, so that we make the downloader deal with
501             # multiple segments.
502             bigger_contents = "contents2" * 1000000 # about 9MiB
503             bigger_contents_uploadable = MutableData(bigger_contents)
504             d.addCallback(lambda ignored:
505                 n.overwrite(bigger_contents_uploadable))
506             d.addCallback(lambda ignored:
507                 n.download_best_version())
508             d.addCallback(lambda data:
509                 self.failUnlessEqual(data, bigger_contents))
510             return d
511         d.addCallback(_created)
512         return d
513
514
515     def test_retrieve_producer_mdmf(self):
516         # We should make sure that the retriever is able to pause and stop
517         # correctly.
518         data = "contents1" * 100000
519         d = self.nodemaker.create_mutable_file(MutableData(data),
520                                                version=MDMF_VERSION)
521         d.addCallback(lambda node: node.get_best_mutable_version())
522         d.addCallback(self._test_retrieve_producer, "MDMF", data)
523         return d
524
525     # note: SDMF has only one big segment, so we can't use the usual
526     # after-the-first-write() trick to pause or stop the download.
527     # Disabled until we find a better approach.
528     def OFF_test_retrieve_producer_sdmf(self):
529         data = "contents1" * 100000
530         d = self.nodemaker.create_mutable_file(MutableData(data),
531                                                version=SDMF_VERSION)
532         d.addCallback(lambda node: node.get_best_mutable_version())
533         d.addCallback(self._test_retrieve_producer, "SDMF", data)
534         return d
535
536     def _test_retrieve_producer(self, version, kind, data):
537         # Now we'll retrieve it into a pausing consumer.
538         c = PausingConsumer()
539         d = version.read(c)
540         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
541
542         c2 = PausingAndStoppingConsumer()
543         d.addCallback(lambda ign:
544                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
545                                       "our Consumer called stopProducing()",
546                                       version.read, c2))
547
548         c3 = StoppingConsumer()
549         d.addCallback(lambda ign:
550                       self.shouldFail(DownloadStopped, kind+"_stop",
551                                       "our Consumer called stopProducing()",
552                                       version.read, c3))
553
554         c4 = ImmediatelyStoppingConsumer()
555         d.addCallback(lambda ign:
556                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
557                                       "our Consumer called stopProducing()",
558                                       version.read, c4))
559
560         def _then(ign):
561             c5 = MemoryConsumer()
562             d1 = version.read(c5)
563             c5.producer.stopProducing()
564             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
565                                    "our Consumer called stopProducing()",
566                                    lambda: d1)
567         d.addCallback(_then)
568         return d
569
570     def test_download_from_mdmf_cap(self):
571         # We should be able to download an MDMF file given its cap
572         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
573         def _created(node):
574             self.uri = node.get_uri()
575             # also confirm that the cap has no extension fields
576             pieces = self.uri.split(":")
577             self.failUnlessEqual(len(pieces), 4)
578
579             return node.overwrite(MutableData("contents1" * 100000))
580         def _then(ignored):
581             node = self.nodemaker.create_from_cap(self.uri)
582             return node.download_best_version()
583         def _downloaded(data):
584             self.failUnlessEqual(data, "contents1" * 100000)
585         d.addCallback(_created)
586         d.addCallback(_then)
587         d.addCallback(_downloaded)
588         return d
589
590
591     def test_mdmf_write_count(self):
592         # Publishing an MDMF file should only cause one write for each
593         # share that is to be published. Otherwise, we introduce
594         # undesirable semantics that are a regression from SDMF
595         upload = MutableData("MDMF" * 100000) # about 400 KiB
596         d = self.nodemaker.create_mutable_file(upload,
597                                                version=MDMF_VERSION)
598         def _check_server_write_counts(ignored):
599             sb = self.nodemaker.storage_broker
600             for server in sb.servers.itervalues():
601                 self.failUnlessEqual(server.get_rref().queries, 1)
602         d.addCallback(_check_server_write_counts)
603         return d
604
605
606     def test_create_with_initial_contents(self):
607         upload1 = MutableData("contents 1")
608         d = self.nodemaker.create_mutable_file(upload1)
609         def _created(n):
610             d = n.download_best_version()
611             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
612             upload2 = MutableData("contents 2")
613             d.addCallback(lambda res: n.overwrite(upload2))
614             d.addCallback(lambda res: n.download_best_version())
615             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
616             return d
617         d.addCallback(_created)
618         return d
619
620
621     def test_create_mdmf_with_initial_contents(self):
622         initial_contents = "foobarbaz" * 131072 # 900KiB
623         initial_contents_uploadable = MutableData(initial_contents)
624         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
625                                                version=MDMF_VERSION)
626         def _created(n):
627             d = n.download_best_version()
628             d.addCallback(lambda data:
629                 self.failUnlessEqual(data, initial_contents))
630             uploadable2 = MutableData(initial_contents + "foobarbaz")
631             d.addCallback(lambda ignored:
632                 n.overwrite(uploadable2))
633             d.addCallback(lambda ignored:
634                 n.download_best_version())
635             d.addCallback(lambda data:
636                 self.failUnlessEqual(data, initial_contents +
637                                            "foobarbaz"))
638             return d
639         d.addCallback(_created)
640         return d
641
642     def test_create_with_initial_contents_function(self):
643         data = "initial contents"
644         def _make_contents(n):
645             self.failUnless(isinstance(n, MutableFileNode))
646             key = n.get_writekey()
647             self.failUnless(isinstance(key, str), key)
648             self.failUnlessEqual(len(key), 16) # AES key size
649             return MutableData(data)
650         d = self.nodemaker.create_mutable_file(_make_contents)
651         def _created(n):
652             return n.download_best_version()
653         d.addCallback(_created)
654         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
655         return d
656
657
658     def test_create_mdmf_with_initial_contents_function(self):
659         data = "initial contents" * 100000
660         def _make_contents(n):
661             self.failUnless(isinstance(n, MutableFileNode))
662             key = n.get_writekey()
663             self.failUnless(isinstance(key, str), key)
664             self.failUnlessEqual(len(key), 16)
665             return MutableData(data)
666         d = self.nodemaker.create_mutable_file(_make_contents,
667                                                version=MDMF_VERSION)
668         d.addCallback(lambda n:
669             n.download_best_version())
670         d.addCallback(lambda data2:
671             self.failUnlessEqual(data2, data))
672         return d
673
674
675     def test_create_with_too_large_contents(self):
676         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
677         BIG_uploadable = MutableData(BIG)
678         d = self.nodemaker.create_mutable_file(BIG_uploadable)
679         def _created(n):
680             other_BIG_uploadable = MutableData(BIG)
681             d = n.overwrite(other_BIG_uploadable)
682             return d
683         d.addCallback(_created)
684         return d
685
686     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
687         d = n.get_servermap(MODE_READ)
688         d.addCallback(lambda servermap: servermap.best_recoverable_version())
689         d.addCallback(lambda verinfo:
690                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
691         return d
692
693     def test_modify(self):
694         def _modifier(old_contents, servermap, first_time):
695             new_contents = old_contents + "line2"
696             return new_contents
697         def _non_modifier(old_contents, servermap, first_time):
698             return old_contents
699         def _none_modifier(old_contents, servermap, first_time):
700             return None
701         def _error_modifier(old_contents, servermap, first_time):
702             raise ValueError("oops")
703         def _toobig_modifier(old_contents, servermap, first_time):
704             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
705             return new_content
706         calls = []
707         def _ucw_error_modifier(old_contents, servermap, first_time):
708             # simulate an UncoordinatedWriteError once
709             calls.append(1)
710             if len(calls) <= 1:
711                 raise UncoordinatedWriteError("simulated")
712             new_contents = old_contents + "line3"
713             return new_contents
714         def _ucw_error_non_modifier(old_contents, servermap, first_time):
715             # simulate an UncoordinatedWriteError once, and don't actually
716             # modify the contents on subsequent invocations
717             calls.append(1)
718             if len(calls) <= 1:
719                 raise UncoordinatedWriteError("simulated")
720             return old_contents
721
722         initial_contents = "line1"
723         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
724         def _created(n):
725             d = n.modify(_modifier)
726             d.addCallback(lambda res: n.download_best_version())
727             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
728             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
729
730             d.addCallback(lambda res: n.modify(_non_modifier))
731             d.addCallback(lambda res: n.download_best_version())
732             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
733             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
734
735             d.addCallback(lambda res: n.modify(_none_modifier))
736             d.addCallback(lambda res: n.download_best_version())
737             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
738             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
739
740             d.addCallback(lambda res:
741                           self.shouldFail(ValueError, "error_modifier", None,
742                                           n.modify, _error_modifier))
743             d.addCallback(lambda res: n.download_best_version())
744             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
745             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
746
747
748             d.addCallback(lambda res: n.download_best_version())
749             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
750             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
751
752             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
753             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
754             d.addCallback(lambda res: n.download_best_version())
755             d.addCallback(lambda res: self.failUnlessEqual(res,
756                                                            "line1line2line3"))
757             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
758
759             def _reset_ucw_error_modifier(res):
760                 calls[:] = []
761                 return res
762             d.addCallback(_reset_ucw_error_modifier)
763
764             # in practice, this n.modify call should publish twice: the first
765             # one gets a UCWE, the second does not. But our test jig (in
766             # which the modifier raises the UCWE) skips over the first one,
767             # so in this test there will be only one publish, and the seqnum
768             # will only be one larger than the previous test, not two (i.e. 4
769             # instead of 5).
770             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
771             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
772             d.addCallback(lambda res: n.download_best_version())
773             d.addCallback(lambda res: self.failUnlessEqual(res,
774                                                            "line1line2line3"))
775             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
776             d.addCallback(lambda res: n.modify(_toobig_modifier))
777             return d
778         d.addCallback(_created)
779         return d
780
781
782     def test_modify_backoffer(self):
783         def _modifier(old_contents, servermap, first_time):
784             return old_contents + "line2"
785         calls = []
786         def _ucw_error_modifier(old_contents, servermap, first_time):
787             # simulate an UncoordinatedWriteError once
788             calls.append(1)
789             if len(calls) <= 1:
790                 raise UncoordinatedWriteError("simulated")
791             return old_contents + "line3"
792         def _always_ucw_error_modifier(old_contents, servermap, first_time):
793             raise UncoordinatedWriteError("simulated")
794         def _backoff_stopper(node, f):
795             return f
796         def _backoff_pauser(node, f):
797             d = defer.Deferred()
798             reactor.callLater(0.5, d.callback, None)
799             return d
800
801         # the give-up-er will hit its maximum retry count quickly
802         giveuper = BackoffAgent()
803         giveuper._delay = 0.1
804         giveuper.factor = 1
805
806         d = self.nodemaker.create_mutable_file(MutableData("line1"))
807         def _created(n):
808             d = n.modify(_modifier)
809             d.addCallback(lambda res: n.download_best_version())
810             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
811             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
812
813             d.addCallback(lambda res:
814                           self.shouldFail(UncoordinatedWriteError,
815                                           "_backoff_stopper", None,
816                                           n.modify, _ucw_error_modifier,
817                                           _backoff_stopper))
818             d.addCallback(lambda res: n.download_best_version())
819             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
820             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
821
822             def _reset_ucw_error_modifier(res):
823                 calls[:] = []
824                 return res
825             d.addCallback(_reset_ucw_error_modifier)
826             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
827                                                _backoff_pauser))
828             d.addCallback(lambda res: n.download_best_version())
829             d.addCallback(lambda res: self.failUnlessEqual(res,
830                                                            "line1line2line3"))
831             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
832
833             d.addCallback(lambda res:
834                           self.shouldFail(UncoordinatedWriteError,
835                                           "giveuper", None,
836                                           n.modify, _always_ucw_error_modifier,
837                                           giveuper.delay))
838             d.addCallback(lambda res: n.download_best_version())
839             d.addCallback(lambda res: self.failUnlessEqual(res,
840                                                            "line1line2line3"))
841             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
842
843             return d
844         d.addCallback(_created)
845         return d
846
847     def test_upload_and_download_full_size_keys(self):
848         self.nodemaker.key_generator = client.KeyGenerator()
849         d = self.nodemaker.create_mutable_file()
850         def _created(n):
851             d = defer.succeed(None)
852             d.addCallback(lambda res: n.get_servermap(MODE_READ))
853             d.addCallback(lambda smap: smap.dump(StringIO()))
854             d.addCallback(lambda sio:
855                           self.failUnless("3-of-10" in sio.getvalue()))
856             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
857             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
858             d.addCallback(lambda res: n.download_best_version())
859             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
860             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
861             d.addCallback(lambda res: n.download_best_version())
862             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
863             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
864             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
865             d.addCallback(lambda res: n.download_best_version())
866             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
867             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
868             d.addCallback(lambda smap:
869                           n.download_version(smap,
870                                              smap.best_recoverable_version()))
871             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
872             return d
873         d.addCallback(_created)
874         return d
875
876
877     def test_size_after_servermap_update(self):
878         # a mutable file node should have something to say about how big
879         # it is after a servermap update is performed, since this tells
880         # us how large the best version of that mutable file is.
881         d = self.nodemaker.create_mutable_file()
882         def _created(n):
883             self.n = n
884             return n.get_servermap(MODE_READ)
885         d.addCallback(_created)
886         d.addCallback(lambda ignored:
887             self.failUnlessEqual(self.n.get_size(), 0))
888         d.addCallback(lambda ignored:
889             self.n.overwrite(MutableData("foobarbaz")))
890         d.addCallback(lambda ignored:
891             self.failUnlessEqual(self.n.get_size(), 9))
892         d.addCallback(lambda ignored:
893             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
894         d.addCallback(_created)
895         d.addCallback(lambda ignored:
896             self.failUnlessEqual(self.n.get_size(), 9))
897         return d
898
899
900 class PublishMixin:
901     def publish_one(self):
902         # publish a file and create shares, which can then be manipulated
903         # later.
904         self.CONTENTS = "New contents go here" * 1000
905         self.uploadable = MutableData(self.CONTENTS)
906         self._storage = FakeStorage()
907         self._nodemaker = make_nodemaker(self._storage)
908         self._storage_broker = self._nodemaker.storage_broker
909         d = self._nodemaker.create_mutable_file(self.uploadable)
910         def _created(node):
911             self._fn = node
912             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
913         d.addCallback(_created)
914         return d
915
916     def publish_mdmf(self):
917         # like publish_one, except that the result is guaranteed to be
918         # an MDMF file.
919         # self.CONTENTS should have more than one segment.
920         self.CONTENTS = "This is an MDMF file" * 100000
921         self.uploadable = MutableData(self.CONTENTS)
922         self._storage = FakeStorage()
923         self._nodemaker = make_nodemaker(self._storage)
924         self._storage_broker = self._nodemaker.storage_broker
925         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
926         def _created(node):
927             self._fn = node
928             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
929         d.addCallback(_created)
930         return d
931
932
933     def publish_sdmf(self):
934         # like publish_one, except that the result is guaranteed to be
935         # an SDMF file
936         self.CONTENTS = "This is an SDMF file" * 1000
937         self.uploadable = MutableData(self.CONTENTS)
938         self._storage = FakeStorage()
939         self._nodemaker = make_nodemaker(self._storage)
940         self._storage_broker = self._nodemaker.storage_broker
941         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
942         def _created(node):
943             self._fn = node
944             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
945         d.addCallback(_created)
946         return d
947
948     def publish_empty_sdmf(self):
949         self.CONTENTS = ""
950         self.uploadable = MutableData(self.CONTENTS)
951         self._storage = FakeStorage()
952         self._nodemaker = make_nodemaker(self._storage, keysize=None)
953         self._storage_broker = self._nodemaker.storage_broker
954         d = self._nodemaker.create_mutable_file(self.uploadable,
955                                                 version=SDMF_VERSION)
956         def _created(node):
957             self._fn = node
958             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
959         d.addCallback(_created)
960         return d
961
962
963     def publish_multiple(self, version=0):
964         self.CONTENTS = ["Contents 0",
965                          "Contents 1",
966                          "Contents 2",
967                          "Contents 3a",
968                          "Contents 3b"]
969         self.uploadables = [MutableData(d) for d in self.CONTENTS]
970         self._copied_shares = {}
971         self._storage = FakeStorage()
972         self._nodemaker = make_nodemaker(self._storage)
973         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
974         def _created(node):
975             self._fn = node
976             # now create multiple versions of the same file, and accumulate
977             # their shares, so we can mix and match them later.
978             d = defer.succeed(None)
979             d.addCallback(self._copy_shares, 0)
980             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
981             d.addCallback(self._copy_shares, 1)
982             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
983             d.addCallback(self._copy_shares, 2)
984             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
985             d.addCallback(self._copy_shares, 3)
986             # now we replace all the shares with version s3, and upload a new
987             # version to get s4b.
988             rollback = dict([(i,2) for i in range(10)])
989             d.addCallback(lambda res: self._set_versions(rollback))
990             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
991             d.addCallback(self._copy_shares, 4)
992             # we leave the storage in state 4
993             return d
994         d.addCallback(_created)
995         return d
996
997
998     def _copy_shares(self, ignored, index):
999         shares = self._storage._peers
1000         # we need a deep copy
1001         new_shares = {}
1002         for peerid in shares:
1003             new_shares[peerid] = {}
1004             for shnum in shares[peerid]:
1005                 new_shares[peerid][shnum] = shares[peerid][shnum]
1006         self._copied_shares[index] = new_shares
1007
1008     def _set_versions(self, versionmap):
1009         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1010         # share to be at. Any shnum which is left out of the map will stay at
1011         # its current version.
1012         shares = self._storage._peers
1013         oldshares = self._copied_shares
1014         for peerid in shares:
1015             for shnum in shares[peerid]:
1016                 if shnum in versionmap:
1017                     index = versionmap[shnum]
1018                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1019
1020 class Servermap(unittest.TestCase, PublishMixin):
1021     def setUp(self):
1022         return self.publish_one()
1023
1024     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1025                        update_range=None):
1026         if fn is None:
1027             fn = self._fn
1028         if sb is None:
1029             sb = self._storage_broker
1030         smu = ServermapUpdater(fn, sb, Monitor(),
1031                                ServerMap(), mode, update_range=update_range)
1032         d = smu.update()
1033         return d
1034
1035     def update_servermap(self, oldmap, mode=MODE_CHECK):
1036         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1037                                oldmap, mode)
1038         d = smu.update()
1039         return d
1040
1041     def failUnlessOneRecoverable(self, sm, num_shares):
1042         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1043         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1044         best = sm.best_recoverable_version()
1045         self.failIfEqual(best, None)
1046         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1047         self.failUnlessEqual(len(sm.shares_available()), 1)
1048         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1049         shnum, servers = sm.make_sharemap().items()[0]
1050         server = list(servers)[0]
1051         self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1052         self.failUnlessEqual(sm.version_on_server(server, 666), None)
1053         return sm
1054
1055     def test_basic(self):
1056         d = defer.succeed(None)
1057         ms = self.make_servermap
1058         us = self.update_servermap
1059
1060         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1061         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1062         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1063         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1064         d.addCallback(lambda res: ms(mode=MODE_READ))
1065         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1066         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1067         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1068         # this mode stops at 'k' shares
1069         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1070
1071         # and can we re-use the same servermap? Note that these are sorted in
1072         # increasing order of number of servers queried, since once a server
1073         # gets into the servermap, we'll always ask it for an update.
1074         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1075         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1076         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1077         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1078         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1079         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1080         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1081         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1082         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1083
1084         return d
1085
1086     def test_fetch_privkey(self):
1087         d = defer.succeed(None)
1088         # use the sibling filenode (which hasn't been used yet), and make
1089         # sure it can fetch the privkey. The file is small, so the privkey
1090         # will be fetched on the first (query) pass.
1091         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1092         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1093
1094         # create a new file, which is large enough to knock the privkey out
1095         # of the early part of the file
1096         LARGE = "These are Larger contents" * 200 # about 5KB
1097         LARGE_uploadable = MutableData(LARGE)
1098         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1099         def _created(large_fn):
1100             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1101             return self.make_servermap(MODE_WRITE, large_fn2)
1102         d.addCallback(_created)
1103         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1104         return d
1105
1106
1107     def test_mark_bad(self):
1108         d = defer.succeed(None)
1109         ms = self.make_servermap
1110
1111         d.addCallback(lambda res: ms(mode=MODE_READ))
1112         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1113         def _made_map(sm):
1114             v = sm.best_recoverable_version()
1115             vm = sm.make_versionmap()
1116             shares = list(vm[v])
1117             self.failUnlessEqual(len(shares), 6)
1118             self._corrupted = set()
1119             # mark the first 5 shares as corrupt, then update the servermap.
1120             # The map should not have the marked shares it in any more, and
1121             # new shares should be found to replace the missing ones.
1122             for (shnum, server, timestamp) in shares:
1123                 if shnum < 5:
1124                     self._corrupted.add( (server, shnum) )
1125                     sm.mark_bad_share(server, shnum, "")
1126             return self.update_servermap(sm, MODE_WRITE)
1127         d.addCallback(_made_map)
1128         def _check_map(sm):
1129             # this should find all 5 shares that weren't marked bad
1130             v = sm.best_recoverable_version()
1131             vm = sm.make_versionmap()
1132             shares = list(vm[v])
1133             for (server, shnum) in self._corrupted:
1134                 server_shares = sm.debug_shares_on_server(server)
1135                 self.failIf(shnum in server_shares,
1136                             "%d was in %s" % (shnum, server_shares))
1137             self.failUnlessEqual(len(shares), 5)
1138         d.addCallback(_check_map)
1139         return d
1140
1141     def failUnlessNoneRecoverable(self, sm):
1142         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1143         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1144         best = sm.best_recoverable_version()
1145         self.failUnlessEqual(best, None)
1146         self.failUnlessEqual(len(sm.shares_available()), 0)
1147
1148     def test_no_shares(self):
1149         self._storage._peers = {} # delete all shares
1150         ms = self.make_servermap
1151         d = defer.succeed(None)
1152 #
1153         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1154         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1155
1156         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1157         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1158
1159         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1160         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1161
1162         d.addCallback(lambda res: ms(mode=MODE_READ))
1163         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1164
1165         return d
1166
1167     def failUnlessNotQuiteEnough(self, sm):
1168         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1169         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1170         best = sm.best_recoverable_version()
1171         self.failUnlessEqual(best, None)
1172         self.failUnlessEqual(len(sm.shares_available()), 1)
1173         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1174         return sm
1175
1176     def test_not_quite_enough_shares(self):
1177         s = self._storage
1178         ms = self.make_servermap
1179         num_shares = len(s._peers)
1180         for peerid in s._peers:
1181             s._peers[peerid] = {}
1182             num_shares -= 1
1183             if num_shares == 2:
1184                 break
1185         # now there ought to be only two shares left
1186         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1187
1188         d = defer.succeed(None)
1189
1190         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1191         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1192         d.addCallback(lambda sm:
1193                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1194         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1195         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1196         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1197         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1198         d.addCallback(lambda res: ms(mode=MODE_READ))
1199         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1200
1201         return d
1202
1203
1204     def test_servermapupdater_finds_mdmf_files(self):
1205         # setUp already published an MDMF file for us. We just need to
1206         # make sure that when we run the ServermapUpdater, the file is
1207         # reported to have one recoverable version.
1208         d = defer.succeed(None)
1209         d.addCallback(lambda ignored:
1210             self.publish_mdmf())
1211         d.addCallback(lambda ignored:
1212             self.make_servermap(mode=MODE_CHECK))
1213         # Calling make_servermap also updates the servermap in the mode
1214         # that we specify, so we just need to see what it says.
1215         def _check_servermap(sm):
1216             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1217         d.addCallback(_check_servermap)
1218         return d
1219
1220
1221     def test_fetch_update(self):
1222         d = defer.succeed(None)
1223         d.addCallback(lambda ignored:
1224             self.publish_mdmf())
1225         d.addCallback(lambda ignored:
1226             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1227         def _check_servermap(sm):
1228             # 10 shares
1229             self.failUnlessEqual(len(sm.update_data), 10)
1230             # one version
1231             for data in sm.update_data.itervalues():
1232                 self.failUnlessEqual(len(data), 1)
1233         d.addCallback(_check_servermap)
1234         return d
1235
1236
1237     def test_servermapupdater_finds_sdmf_files(self):
1238         d = defer.succeed(None)
1239         d.addCallback(lambda ignored:
1240             self.publish_sdmf())
1241         d.addCallback(lambda ignored:
1242             self.make_servermap(mode=MODE_CHECK))
1243         d.addCallback(lambda servermap:
1244             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1245         return d
1246
1247
1248 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1249     def setUp(self):
1250         return self.publish_one()
1251
1252     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1253         if oldmap is None:
1254             oldmap = ServerMap()
1255         if sb is None:
1256             sb = self._storage_broker
1257         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1258         d = smu.update()
1259         return d
1260
1261     def abbrev_verinfo(self, verinfo):
1262         if verinfo is None:
1263             return None
1264         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1265          offsets_tuple) = verinfo
1266         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1267
1268     def abbrev_verinfo_dict(self, verinfo_d):
1269         output = {}
1270         for verinfo,value in verinfo_d.items():
1271             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1272              offsets_tuple) = verinfo
1273             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1274         return output
1275
1276     def dump_servermap(self, servermap):
1277         print "SERVERMAP", servermap
1278         print "RECOVERABLE", [self.abbrev_verinfo(v)
1279                               for v in servermap.recoverable_versions()]
1280         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1281         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1282
1283     def do_download(self, servermap, version=None):
1284         if version is None:
1285             version = servermap.best_recoverable_version()
1286         r = Retrieve(self._fn, self._storage_broker, servermap, version)
1287         c = consumer.MemoryConsumer()
1288         d = r.download(consumer=c)
1289         d.addCallback(lambda mc: "".join(mc.chunks))
1290         return d
1291
1292
1293     def test_basic(self):
1294         d = self.make_servermap()
1295         def _do_retrieve(servermap):
1296             self._smap = servermap
1297             #self.dump_servermap(servermap)
1298             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1299             return self.do_download(servermap)
1300         d.addCallback(_do_retrieve)
1301         def _retrieved(new_contents):
1302             self.failUnlessEqual(new_contents, self.CONTENTS)
1303         d.addCallback(_retrieved)
1304         # we should be able to re-use the same servermap, both with and
1305         # without updating it.
1306         d.addCallback(lambda res: self.do_download(self._smap))
1307         d.addCallback(_retrieved)
1308         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1309         d.addCallback(lambda res: self.do_download(self._smap))
1310         d.addCallback(_retrieved)
1311         # clobbering the pubkey should make the servermap updater re-fetch it
1312         def _clobber_pubkey(res):
1313             self._fn._pubkey = None
1314         d.addCallback(_clobber_pubkey)
1315         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1316         d.addCallback(lambda res: self.do_download(self._smap))
1317         d.addCallback(_retrieved)
1318         return d
1319
1320     def test_all_shares_vanished(self):
1321         d = self.make_servermap()
1322         def _remove_shares(servermap):
1323             for shares in self._storage._peers.values():
1324                 shares.clear()
1325             d1 = self.shouldFail(NotEnoughSharesError,
1326                                  "test_all_shares_vanished",
1327                                  "ran out of servers",
1328                                  self.do_download, servermap)
1329             return d1
1330         d.addCallback(_remove_shares)
1331         return d
1332
1333     def test_all_but_two_shares_vanished_updated_servermap(self):
1334         # tests error reporting for ticket #1742
1335         d = self.make_servermap()
1336         def _remove_shares(servermap):
1337             self._version = servermap.best_recoverable_version()
1338             for shares in self._storage._peers.values()[2:]:
1339                 shares.clear()
1340             return self.make_servermap(servermap)
1341         d.addCallback(_remove_shares)
1342         def _check(updated_servermap):
1343             d1 = self.shouldFail(NotEnoughSharesError,
1344                                  "test_all_but_two_shares_vanished_updated_servermap",
1345                                  "ran out of servers",
1346                                  self.do_download, updated_servermap, version=self._version)
1347             return d1
1348         d.addCallback(_check)
1349         return d
1350
1351     def test_no_servers(self):
1352         sb2 = make_storagebroker(num_peers=0)
1353         # if there are no servers, then a MODE_READ servermap should come
1354         # back empty
1355         d = self.make_servermap(sb=sb2)
1356         def _check_servermap(servermap):
1357             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1358             self.failIf(servermap.recoverable_versions())
1359             self.failIf(servermap.unrecoverable_versions())
1360             self.failIf(servermap.all_servers())
1361         d.addCallback(_check_servermap)
1362         return d
1363
1364     def test_no_servers_download(self):
1365         sb2 = make_storagebroker(num_peers=0)
1366         self._fn._storage_broker = sb2
1367         d = self.shouldFail(UnrecoverableFileError,
1368                             "test_no_servers_download",
1369                             "no recoverable versions",
1370                             self._fn.download_best_version)
1371         def _restore(res):
1372             # a failed download that occurs while we aren't connected to
1373             # anybody should not prevent a subsequent download from working.
1374             # This isn't quite the webapi-driven test that #463 wants, but it
1375             # should be close enough.
1376             self._fn._storage_broker = self._storage_broker
1377             return self._fn.download_best_version()
1378         def _retrieved(new_contents):
1379             self.failUnlessEqual(new_contents, self.CONTENTS)
1380         d.addCallback(_restore)
1381         d.addCallback(_retrieved)
1382         return d
1383
1384
1385     def _test_corrupt_all(self, offset, substring,
1386                           should_succeed=False,
1387                           corrupt_early=True,
1388                           failure_checker=None,
1389                           fetch_privkey=False):
1390         d = defer.succeed(None)
1391         if corrupt_early:
1392             d.addCallback(corrupt, self._storage, offset)
1393         d.addCallback(lambda res: self.make_servermap())
1394         if not corrupt_early:
1395             d.addCallback(corrupt, self._storage, offset)
1396         def _do_retrieve(servermap):
1397             ver = servermap.best_recoverable_version()
1398             if ver is None and not should_succeed:
1399                 # no recoverable versions == not succeeding. The problem
1400                 # should be noted in the servermap's list of problems.
1401                 if substring:
1402                     allproblems = [str(f) for f in servermap.get_problems()]
1403                     self.failUnlessIn(substring, "".join(allproblems))
1404                 return servermap
1405             if should_succeed:
1406                 d1 = self._fn.download_version(servermap, ver,
1407                                                fetch_privkey)
1408                 d1.addCallback(lambda new_contents:
1409                                self.failUnlessEqual(new_contents, self.CONTENTS))
1410             else:
1411                 d1 = self.shouldFail(NotEnoughSharesError,
1412                                      "_corrupt_all(offset=%s)" % (offset,),
1413                                      substring,
1414                                      self._fn.download_version, servermap,
1415                                                                 ver,
1416                                                                 fetch_privkey)
1417             if failure_checker:
1418                 d1.addCallback(failure_checker)
1419             d1.addCallback(lambda res: servermap)
1420             return d1
1421         d.addCallback(_do_retrieve)
1422         return d
1423
1424     def test_corrupt_all_verbyte(self):
1425         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1426         # error in unpack_share().
1427         d = self._test_corrupt_all(0, "UnknownVersionError")
1428         def _check_servermap(servermap):
1429             # and the dump should mention the problems
1430             s = StringIO()
1431             dump = servermap.dump(s).getvalue()
1432             self.failUnless("30 PROBLEMS" in dump, dump)
1433         d.addCallback(_check_servermap)
1434         return d
1435
1436     def test_corrupt_all_seqnum(self):
1437         # a corrupt sequence number will trigger a bad signature
1438         return self._test_corrupt_all(1, "signature is invalid")
1439
1440     def test_corrupt_all_R(self):
1441         # a corrupt root hash will trigger a bad signature
1442         return self._test_corrupt_all(9, "signature is invalid")
1443
1444     def test_corrupt_all_IV(self):
1445         # a corrupt salt/IV will trigger a bad signature
1446         return self._test_corrupt_all(41, "signature is invalid")
1447
1448     def test_corrupt_all_k(self):
1449         # a corrupt 'k' will trigger a bad signature
1450         return self._test_corrupt_all(57, "signature is invalid")
1451
1452     def test_corrupt_all_N(self):
1453         # a corrupt 'N' will trigger a bad signature
1454         return self._test_corrupt_all(58, "signature is invalid")
1455
1456     def test_corrupt_all_segsize(self):
1457         # a corrupt segsize will trigger a bad signature
1458         return self._test_corrupt_all(59, "signature is invalid")
1459
1460     def test_corrupt_all_datalen(self):
1461         # a corrupt data length will trigger a bad signature
1462         return self._test_corrupt_all(67, "signature is invalid")
1463
1464     def test_corrupt_all_pubkey(self):
1465         # a corrupt pubkey won't match the URI's fingerprint. We need to
1466         # remove the pubkey from the filenode, or else it won't bother trying
1467         # to update it.
1468         self._fn._pubkey = None
1469         return self._test_corrupt_all("pubkey",
1470                                       "pubkey doesn't match fingerprint")
1471
1472     def test_corrupt_all_sig(self):
1473         # a corrupt signature is a bad one
1474         # the signature runs from about [543:799], depending upon the length
1475         # of the pubkey
1476         return self._test_corrupt_all("signature", "signature is invalid")
1477
1478     def test_corrupt_all_share_hash_chain_number(self):
1479         # a corrupt share hash chain entry will show up as a bad hash. If we
1480         # mangle the first byte, that will look like a bad hash number,
1481         # causing an IndexError
1482         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1483
1484     def test_corrupt_all_share_hash_chain_hash(self):
1485         # a corrupt share hash chain entry will show up as a bad hash. If we
1486         # mangle a few bytes in, that will look like a bad hash.
1487         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1488
1489     def test_corrupt_all_block_hash_tree(self):
1490         return self._test_corrupt_all("block_hash_tree",
1491                                       "block hash tree failure")
1492
1493     def test_corrupt_all_block(self):
1494         return self._test_corrupt_all("share_data", "block hash tree failure")
1495
1496     def test_corrupt_all_encprivkey(self):
1497         # a corrupted privkey won't even be noticed by the reader, only by a
1498         # writer.
1499         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1500
1501
1502     def test_corrupt_all_encprivkey_late(self):
1503         # this should work for the same reason as above, but we corrupt
1504         # after the servermap update to exercise the error handling
1505         # code.
1506         # We need to remove the privkey from the node, or the retrieve
1507         # process won't know to update it.
1508         self._fn._privkey = None
1509         return self._test_corrupt_all("enc_privkey",
1510                                       None, # this shouldn't fail
1511                                       should_succeed=True,
1512                                       corrupt_early=False,
1513                                       fetch_privkey=True)
1514
1515
1516     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1517     # just use a .todo because the failing-but-ignored test emits about 30kB
1518     # of noise.
1519     def OFF_test_corrupt_all_seqnum_late(self):
1520         # corrupting the seqnum between mapupdate and retrieve should result
1521         # in NotEnoughSharesError, since each share will look invalid
1522         def _check(res):
1523             f = res[0]
1524             self.failUnless(f.check(NotEnoughSharesError))
1525             self.failUnless("uncoordinated write" in str(f))
1526         return self._test_corrupt_all(1, "ran out of servers",
1527                                       corrupt_early=False,
1528                                       failure_checker=_check)
1529
1530
1531     def test_corrupt_all_block_late(self):
1532         def _check(res):
1533             f = res[0]
1534             self.failUnless(f.check(NotEnoughSharesError))
1535         return self._test_corrupt_all("share_data", "block hash tree failure",
1536                                       corrupt_early=False,
1537                                       failure_checker=_check)
1538
1539
1540     def test_basic_pubkey_at_end(self):
1541         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1542         # download to succeed but forcing a bunch of retries first. Note that
1543         # this is rather pessimistic: our Retrieve process will throw away
1544         # the whole share if the pubkey is bad, even though the rest of the
1545         # share might be good.
1546
1547         self._fn._pubkey = None
1548         k = self._fn.get_required_shares()
1549         N = self._fn.get_total_shares()
1550         d = defer.succeed(None)
1551         d.addCallback(corrupt, self._storage, "pubkey",
1552                       shnums_to_corrupt=range(0, N-k))
1553         d.addCallback(lambda res: self.make_servermap())
1554         def _do_retrieve(servermap):
1555             self.failUnless(servermap.get_problems())
1556             self.failUnless("pubkey doesn't match fingerprint"
1557                             in str(servermap.get_problems()[0]))
1558             ver = servermap.best_recoverable_version()
1559             r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1560             c = consumer.MemoryConsumer()
1561             return r.download(c)
1562         d.addCallback(_do_retrieve)
1563         d.addCallback(lambda mc: "".join(mc.chunks))
1564         d.addCallback(lambda new_contents:
1565                       self.failUnlessEqual(new_contents, self.CONTENTS))
1566         return d
1567
1568
1569     def _test_corrupt_some(self, offset, mdmf=False):
1570         if mdmf:
1571             d = self.publish_mdmf()
1572         else:
1573             d = defer.succeed(None)
1574         d.addCallback(lambda ignored:
1575             corrupt(None, self._storage, offset, range(5)))
1576         d.addCallback(lambda ignored:
1577             self.make_servermap())
1578         def _do_retrieve(servermap):
1579             ver = servermap.best_recoverable_version()
1580             self.failUnless(ver)
1581             return self._fn.download_best_version()
1582         d.addCallback(_do_retrieve)
1583         d.addCallback(lambda new_contents:
1584             self.failUnlessEqual(new_contents, self.CONTENTS))
1585         return d
1586
1587
1588     def test_corrupt_some(self):
1589         # corrupt the data of first five shares (so the servermap thinks
1590         # they're good but retrieve marks them as bad), so that the
1591         # MODE_READ set of 6 will be insufficient, forcing node.download to
1592         # retry with more servers.
1593         return self._test_corrupt_some("share_data")
1594
1595
1596     def test_download_fails(self):
1597         d = corrupt(None, self._storage, "signature")
1598         d.addCallback(lambda ignored:
1599             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1600                             "no recoverable versions",
1601                             self._fn.download_best_version))
1602         return d
1603
1604
1605
1606     def test_corrupt_mdmf_block_hash_tree(self):
1607         d = self.publish_mdmf()
1608         d.addCallback(lambda ignored:
1609             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1610                                    "block hash tree failure",
1611                                    corrupt_early=True,
1612                                    should_succeed=False))
1613         return d
1614
1615
1616     def test_corrupt_mdmf_block_hash_tree_late(self):
1617         # Note - there is no SDMF counterpart to this test, as the SDMF
1618         # files are guaranteed to have exactly one block, and therefore
1619         # the block hash tree fits within the initial read (#1240).
1620         d = self.publish_mdmf()
1621         d.addCallback(lambda ignored:
1622             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1623                                    "block hash tree failure",
1624                                    corrupt_early=False,
1625                                    should_succeed=False))
1626         return d
1627
1628
1629     def test_corrupt_mdmf_share_data(self):
1630         d = self.publish_mdmf()
1631         d.addCallback(lambda ignored:
1632             # TODO: Find out what the block size is and corrupt a
1633             # specific block, rather than just guessing.
1634             self._test_corrupt_all(("share_data", 12 * 40),
1635                                     "block hash tree failure",
1636                                     corrupt_early=True,
1637                                     should_succeed=False))
1638         return d
1639
1640
1641     def test_corrupt_some_mdmf(self):
1642         return self._test_corrupt_some(("share_data", 12 * 40),
1643                                        mdmf=True)
1644
1645
1646 class CheckerMixin:
1647     def check_good(self, r, where):
1648         self.failUnless(r.is_healthy(), where)
1649         return r
1650
1651     def check_bad(self, r, where):
1652         self.failIf(r.is_healthy(), where)
1653         return r
1654
1655     def check_expected_failure(self, r, expected_exception, substring, where):
1656         for (peerid, storage_index, shnum, f) in r.get_share_problems():
1657             if f.check(expected_exception):
1658                 self.failUnless(substring in str(f),
1659                                 "%s: substring '%s' not in '%s'" %
1660                                 (where, substring, str(f)))
1661                 return
1662         self.fail("%s: didn't see expected exception %s in problems %s" %
1663                   (where, expected_exception, r.get_share_problems()))
1664
1665
1666 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1667     def setUp(self):
1668         return self.publish_one()
1669
1670
1671     def test_check_good(self):
1672         d = self._fn.check(Monitor())
1673         d.addCallback(self.check_good, "test_check_good")
1674         return d
1675
1676     def test_check_mdmf_good(self):
1677         d = self.publish_mdmf()
1678         d.addCallback(lambda ignored:
1679             self._fn.check(Monitor()))
1680         d.addCallback(self.check_good, "test_check_mdmf_good")
1681         return d
1682
1683     def test_check_no_shares(self):
1684         for shares in self._storage._peers.values():
1685             shares.clear()
1686         d = self._fn.check(Monitor())
1687         d.addCallback(self.check_bad, "test_check_no_shares")
1688         return d
1689
1690     def test_check_mdmf_no_shares(self):
1691         d = self.publish_mdmf()
1692         def _then(ignored):
1693             for share in self._storage._peers.values():
1694                 share.clear()
1695         d.addCallback(_then)
1696         d.addCallback(lambda ignored:
1697             self._fn.check(Monitor()))
1698         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1699         return d
1700
1701     def test_check_not_enough_shares(self):
1702         for shares in self._storage._peers.values():
1703             for shnum in shares.keys():
1704                 if shnum > 0:
1705                     del shares[shnum]
1706         d = self._fn.check(Monitor())
1707         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1708         return d
1709
1710     def test_check_mdmf_not_enough_shares(self):
1711         d = self.publish_mdmf()
1712         def _then(ignored):
1713             for shares in self._storage._peers.values():
1714                 for shnum in shares.keys():
1715                     if shnum > 0:
1716                         del shares[shnum]
1717         d.addCallback(_then)
1718         d.addCallback(lambda ignored:
1719             self._fn.check(Monitor()))
1720         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1721         return d
1722
1723
1724     def test_check_all_bad_sig(self):
1725         d = corrupt(None, self._storage, 1) # bad sig
1726         d.addCallback(lambda ignored:
1727             self._fn.check(Monitor()))
1728         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1729         return d
1730
1731     def test_check_mdmf_all_bad_sig(self):
1732         d = self.publish_mdmf()
1733         d.addCallback(lambda ignored:
1734             corrupt(None, self._storage, 1))
1735         d.addCallback(lambda ignored:
1736             self._fn.check(Monitor()))
1737         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1738         return d
1739
1740     def test_verify_mdmf_all_bad_sharedata(self):
1741         d = self.publish_mdmf()
1742         # On 8 of the shares, corrupt the beginning of the share data.
1743         # The signature check during the servermap update won't catch this.
1744         d.addCallback(lambda ignored:
1745             corrupt(None, self._storage, "share_data", range(8)))
1746         # On 2 of the shares, corrupt the end of the share data.
1747         # The signature check during the servermap update won't catch
1748         # this either, and the retrieval process will have to process
1749         # all of the segments before it notices.
1750         d.addCallback(lambda ignored:
1751             # the block hash tree comes right after the share data, so if we
1752             # corrupt a little before the block hash tree, we'll corrupt in the
1753             # last block of each share.
1754             corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1755         d.addCallback(lambda ignored:
1756             self._fn.check(Monitor(), verify=True))
1757         # The verifier should flag the file as unhealthy, and should
1758         # list all 10 shares as bad.
1759         d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1760         def _check_num_bad(r):
1761             self.failIf(r.is_recoverable())
1762             smap = r.get_servermap()
1763             self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1764         d.addCallback(_check_num_bad)
1765         return d
1766
1767     def test_check_all_bad_blocks(self):
1768         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1769         # the Checker won't notice this.. it doesn't look at actual data
1770         d.addCallback(lambda ignored:
1771             self._fn.check(Monitor()))
1772         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1773         return d
1774
1775
1776     def test_check_mdmf_all_bad_blocks(self):
1777         d = self.publish_mdmf()
1778         d.addCallback(lambda ignored:
1779             corrupt(None, self._storage, "share_data"))
1780         d.addCallback(lambda ignored:
1781             self._fn.check(Monitor()))
1782         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1783         return d
1784
1785     def test_verify_good(self):
1786         d = self._fn.check(Monitor(), verify=True)
1787         d.addCallback(self.check_good, "test_verify_good")
1788         return d
1789
1790     def test_verify_all_bad_sig(self):
1791         d = corrupt(None, self._storage, 1) # bad sig
1792         d.addCallback(lambda ignored:
1793             self._fn.check(Monitor(), verify=True))
1794         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1795         return d
1796
1797     def test_verify_one_bad_sig(self):
1798         d = corrupt(None, self._storage, 1, [9]) # bad sig
1799         d.addCallback(lambda ignored:
1800             self._fn.check(Monitor(), verify=True))
1801         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1802         return d
1803
1804     def test_verify_one_bad_block(self):
1805         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1806         # the Verifier *will* notice this, since it examines every byte
1807         d.addCallback(lambda ignored:
1808             self._fn.check(Monitor(), verify=True))
1809         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1810         d.addCallback(self.check_expected_failure,
1811                       CorruptShareError, "block hash tree failure",
1812                       "test_verify_one_bad_block")
1813         return d
1814
1815     def test_verify_one_bad_sharehash(self):
1816         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1817         d.addCallback(lambda ignored:
1818             self._fn.check(Monitor(), verify=True))
1819         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1820         d.addCallback(self.check_expected_failure,
1821                       CorruptShareError, "corrupt hashes",
1822                       "test_verify_one_bad_sharehash")
1823         return d
1824
1825     def test_verify_one_bad_encprivkey(self):
1826         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1827         d.addCallback(lambda ignored:
1828             self._fn.check(Monitor(), verify=True))
1829         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1830         d.addCallback(self.check_expected_failure,
1831                       CorruptShareError, "invalid privkey",
1832                       "test_verify_one_bad_encprivkey")
1833         return d
1834
1835     def test_verify_one_bad_encprivkey_uncheckable(self):
1836         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1837         readonly_fn = self._fn.get_readonly()
1838         # a read-only node has no way to validate the privkey
1839         d.addCallback(lambda ignored:
1840             readonly_fn.check(Monitor(), verify=True))
1841         d.addCallback(self.check_good,
1842                       "test_verify_one_bad_encprivkey_uncheckable")
1843         return d
1844
1845
1846     def test_verify_mdmf_good(self):
1847         d = self.publish_mdmf()
1848         d.addCallback(lambda ignored:
1849             self._fn.check(Monitor(), verify=True))
1850         d.addCallback(self.check_good, "test_verify_mdmf_good")
1851         return d
1852
1853
1854     def test_verify_mdmf_one_bad_block(self):
1855         d = self.publish_mdmf()
1856         d.addCallback(lambda ignored:
1857             corrupt(None, self._storage, "share_data", [1]))
1858         d.addCallback(lambda ignored:
1859             self._fn.check(Monitor(), verify=True))
1860         # We should find one bad block here
1861         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1862         d.addCallback(self.check_expected_failure,
1863                       CorruptShareError, "block hash tree failure",
1864                       "test_verify_mdmf_one_bad_block")
1865         return d
1866
1867
1868     def test_verify_mdmf_bad_encprivkey(self):
1869         d = self.publish_mdmf()
1870         d.addCallback(lambda ignored:
1871             corrupt(None, self._storage, "enc_privkey", [0]))
1872         d.addCallback(lambda ignored:
1873             self._fn.check(Monitor(), verify=True))
1874         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1875         d.addCallback(self.check_expected_failure,
1876                       CorruptShareError, "privkey",
1877                       "test_verify_mdmf_bad_encprivkey")
1878         return d
1879
1880
1881     def test_verify_mdmf_bad_sig(self):
1882         d = self.publish_mdmf()
1883         d.addCallback(lambda ignored:
1884             corrupt(None, self._storage, 1, [1]))
1885         d.addCallback(lambda ignored:
1886             self._fn.check(Monitor(), verify=True))
1887         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1888         return d
1889
1890
1891     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1892         d = self.publish_mdmf()
1893         d.addCallback(lambda ignored:
1894             corrupt(None, self._storage, "enc_privkey", [1]))
1895         d.addCallback(lambda ignored:
1896             self._fn.get_readonly())
1897         d.addCallback(lambda fn:
1898             fn.check(Monitor(), verify=True))
1899         d.addCallback(self.check_good,
1900                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1901         return d
1902
1903
1904 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1905
1906     def get_shares(self, s):
1907         all_shares = {} # maps (peerid, shnum) to share data
1908         for peerid in s._peers:
1909             shares = s._peers[peerid]
1910             for shnum in shares:
1911                 data = shares[shnum]
1912                 all_shares[ (peerid, shnum) ] = data
1913         return all_shares
1914
1915     def copy_shares(self, ignored=None):
1916         self.old_shares.append(self.get_shares(self._storage))
1917
1918     def test_repair_nop(self):
1919         self.old_shares = []
1920         d = self.publish_one()
1921         d.addCallback(self.copy_shares)
1922         d.addCallback(lambda res: self._fn.check(Monitor()))
1923         d.addCallback(lambda check_results: self._fn.repair(check_results))
1924         def _check_results(rres):
1925             self.failUnless(IRepairResults.providedBy(rres))
1926             self.failUnless(rres.get_successful())
1927             # TODO: examine results
1928
1929             self.copy_shares()
1930
1931             initial_shares = self.old_shares[0]
1932             new_shares = self.old_shares[1]
1933             # TODO: this really shouldn't change anything. When we implement
1934             # a "minimal-bandwidth" repairer", change this test to assert:
1935             #self.failUnlessEqual(new_shares, initial_shares)
1936
1937             # all shares should be in the same place as before
1938             self.failUnlessEqual(set(initial_shares.keys()),
1939                                  set(new_shares.keys()))
1940             # but they should all be at a newer seqnum. The IV will be
1941             # different, so the roothash will be too.
1942             for key in initial_shares:
1943                 (version0,
1944                  seqnum0,
1945                  root_hash0,
1946                  IV0,
1947                  k0, N0, segsize0, datalen0,
1948                  o0) = unpack_header(initial_shares[key])
1949                 (version1,
1950                  seqnum1,
1951                  root_hash1,
1952                  IV1,
1953                  k1, N1, segsize1, datalen1,
1954                  o1) = unpack_header(new_shares[key])
1955                 self.failUnlessEqual(version0, version1)
1956                 self.failUnlessEqual(seqnum0+1, seqnum1)
1957                 self.failUnlessEqual(k0, k1)
1958                 self.failUnlessEqual(N0, N1)
1959                 self.failUnlessEqual(segsize0, segsize1)
1960                 self.failUnlessEqual(datalen0, datalen1)
1961         d.addCallback(_check_results)
1962         return d
1963
1964     def failIfSharesChanged(self, ignored=None):
1965         old_shares = self.old_shares[-2]
1966         current_shares = self.old_shares[-1]
1967         self.failUnlessEqual(old_shares, current_shares)
1968
1969
1970     def _test_whether_repairable(self, publisher, nshares, expected_result):
1971         d = publisher()
1972         def _delete_some_shares(ign):
1973             shares = self._storage._peers
1974             for peerid in shares:
1975                 for shnum in list(shares[peerid]):
1976                     if shnum >= nshares:
1977                         del shares[peerid][shnum]
1978         d.addCallback(_delete_some_shares)
1979         d.addCallback(lambda ign: self._fn.check(Monitor()))
1980         def _check(cr):
1981             self.failIf(cr.is_healthy())
1982             self.failUnlessEqual(cr.is_recoverable(), expected_result)
1983             return cr
1984         d.addCallback(_check)
1985         d.addCallback(lambda check_results: self._fn.repair(check_results))
1986         d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
1987         return d
1988
1989     def test_unrepairable_0shares(self):
1990         return self._test_whether_repairable(self.publish_one, 0, False)
1991
1992     def test_mdmf_unrepairable_0shares(self):
1993         return self._test_whether_repairable(self.publish_mdmf, 0, False)
1994
1995     def test_unrepairable_1share(self):
1996         return self._test_whether_repairable(self.publish_one, 1, False)
1997
1998     def test_mdmf_unrepairable_1share(self):
1999         return self._test_whether_repairable(self.publish_mdmf, 1, False)
2000
2001     def test_repairable_5shares(self):
2002         return self._test_whether_repairable(self.publish_one, 5, True)
2003
2004     def test_mdmf_repairable_5shares(self):
2005         return self._test_whether_repairable(self.publish_mdmf, 5, True)
2006
2007     def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
2008         """
2009         Like the _test_whether_repairable tests, but invoking check_and_repair
2010         instead of invoking check and then invoking repair.
2011         """
2012         d = publisher()
2013         def _delete_some_shares(ign):
2014             shares = self._storage._peers
2015             for peerid in shares:
2016                 for shnum in list(shares[peerid]):
2017                     if shnum >= nshares:
2018                         del shares[peerid][shnum]
2019         d.addCallback(_delete_some_shares)
2020         d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
2021         d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
2022         return d
2023
2024     def test_unrepairable_0shares_checkandrepair(self):
2025         return self._test_whether_checkandrepairable(self.publish_one, 0, False)
2026
2027     def test_mdmf_unrepairable_0shares_checkandrepair(self):
2028         return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
2029
2030     def test_unrepairable_1share_checkandrepair(self):
2031         return self._test_whether_checkandrepairable(self.publish_one, 1, False)
2032
2033     def test_mdmf_unrepairable_1share_checkandrepair(self):
2034         return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
2035
2036     def test_repairable_5shares_checkandrepair(self):
2037         return self._test_whether_checkandrepairable(self.publish_one, 5, True)
2038
2039     def test_mdmf_repairable_5shares_checkandrepair(self):
2040         return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
2041
2042
2043     def test_merge(self):
2044         self.old_shares = []
2045         d = self.publish_multiple()
2046         # repair will refuse to merge multiple highest seqnums unless you
2047         # pass force=True
2048         d.addCallback(lambda res:
2049                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2050                                           1:4,3:4,5:4,7:4,9:4}))
2051         d.addCallback(self.copy_shares)
2052         d.addCallback(lambda res: self._fn.check(Monitor()))
2053         def _try_repair(check_results):
2054             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2055             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2056                                  self._fn.repair, check_results)
2057             d2.addCallback(self.copy_shares)
2058             d2.addCallback(self.failIfSharesChanged)
2059             d2.addCallback(lambda res: check_results)
2060             return d2
2061         d.addCallback(_try_repair)
2062         d.addCallback(lambda check_results:
2063                       self._fn.repair(check_results, force=True))
2064         # this should give us 10 shares of the highest roothash
2065         def _check_repair_results(rres):
2066             self.failUnless(rres.get_successful())
2067             pass # TODO
2068         d.addCallback(_check_repair_results)
2069         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2070         def _check_smap(smap):
2071             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2072             self.failIf(smap.unrecoverable_versions())
2073             # now, which should have won?
2074             roothash_s4a = self.get_roothash_for(3)
2075             roothash_s4b = self.get_roothash_for(4)
2076             if roothash_s4b > roothash_s4a:
2077                 expected_contents = self.CONTENTS[4]
2078             else:
2079                 expected_contents = self.CONTENTS[3]
2080             new_versionid = smap.best_recoverable_version()
2081             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2082             d2 = self._fn.download_version(smap, new_versionid)
2083             d2.addCallback(self.failUnlessEqual, expected_contents)
2084             return d2
2085         d.addCallback(_check_smap)
2086         return d
2087
2088     def test_non_merge(self):
2089         self.old_shares = []
2090         d = self.publish_multiple()
2091         # repair should not refuse a repair that doesn't need to merge. In
2092         # this case, we combine v2 with v3. The repair should ignore v2 and
2093         # copy v3 into a new v5.
2094         d.addCallback(lambda res:
2095                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2096                                           1:3,3:3,5:3,7:3,9:3}))
2097         d.addCallback(lambda res: self._fn.check(Monitor()))
2098         d.addCallback(lambda check_results: self._fn.repair(check_results))
2099         # this should give us 10 shares of v3
2100         def _check_repair_results(rres):
2101             self.failUnless(rres.get_successful())
2102             pass # TODO
2103         d.addCallback(_check_repair_results)
2104         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2105         def _check_smap(smap):
2106             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2107             self.failIf(smap.unrecoverable_versions())
2108             # now, which should have won?
2109             expected_contents = self.CONTENTS[3]
2110             new_versionid = smap.best_recoverable_version()
2111             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2112             d2 = self._fn.download_version(smap, new_versionid)
2113             d2.addCallback(self.failUnlessEqual, expected_contents)
2114             return d2
2115         d.addCallback(_check_smap)
2116         return d
2117
2118     def get_roothash_for(self, index):
2119         # return the roothash for the first share we see in the saved set
2120         shares = self._copied_shares[index]
2121         for peerid in shares:
2122             for shnum in shares[peerid]:
2123                 share = shares[peerid][shnum]
2124                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2125                           unpack_header(share)
2126                 return root_hash
2127
2128     def test_check_and_repair_readcap(self):
2129         # we can't currently repair from a mutable readcap: #625
2130         self.old_shares = []
2131         d = self.publish_one()
2132         d.addCallback(self.copy_shares)
2133         def _get_readcap(res):
2134             self._fn3 = self._fn.get_readonly()
2135             # also delete some shares
2136             for peerid,shares in self._storage._peers.items():
2137                 shares.pop(0, None)
2138         d.addCallback(_get_readcap)
2139         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2140         def _check_results(crr):
2141             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2142             # we should detect the unhealthy, but skip over mutable-readcap
2143             # repairs until #625 is fixed
2144             self.failIf(crr.get_pre_repair_results().is_healthy())
2145             self.failIf(crr.get_repair_attempted())
2146             self.failIf(crr.get_post_repair_results().is_healthy())
2147         d.addCallback(_check_results)
2148         return d
2149
2150     def test_repair_empty(self):
2151         # bug 1689: delete one share of an empty mutable file, then repair.
2152         # In the buggy version, the check that precedes the retrieve+publish
2153         # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
2154         # privkey that repair needs.
2155         d = self.publish_empty_sdmf()
2156         def _delete_one_share(ign):
2157             shares = self._storage._peers
2158             for peerid in shares:
2159                 for shnum in list(shares[peerid]):
2160                     if shnum == 0:
2161                         del shares[peerid][shnum]
2162         d.addCallback(_delete_one_share)
2163         d.addCallback(lambda ign: self._fn2.check(Monitor()))
2164         d.addCallback(lambda check_results: self._fn2.repair(check_results))
2165         def _check(crr):
2166             self.failUnlessEqual(crr.get_successful(), True)
2167         d.addCallback(_check)
2168         return d
2169
2170 class DevNullDictionary(dict):
2171     def __setitem__(self, key, value):
2172         return
2173
2174 class MultipleEncodings(unittest.TestCase):
2175     def setUp(self):
2176         self.CONTENTS = "New contents go here"
2177         self.uploadable = MutableData(self.CONTENTS)
2178         self._storage = FakeStorage()
2179         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2180         self._storage_broker = self._nodemaker.storage_broker
2181         d = self._nodemaker.create_mutable_file(self.uploadable)
2182         def _created(node):
2183             self._fn = node
2184         d.addCallback(_created)
2185         return d
2186
2187     def _encode(self, k, n, data, version=SDMF_VERSION):
2188         # encode 'data' into a peerid->shares dict.
2189
2190         fn = self._fn
2191         # disable the nodecache, since for these tests we explicitly need
2192         # multiple nodes pointing at the same file
2193         self._nodemaker._node_cache = DevNullDictionary()
2194         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2195         # then we copy over other fields that are normally fetched from the
2196         # existing shares
2197         fn2._pubkey = fn._pubkey
2198         fn2._privkey = fn._privkey
2199         fn2._encprivkey = fn._encprivkey
2200         # and set the encoding parameters to something completely different
2201         fn2._required_shares = k
2202         fn2._total_shares = n
2203
2204         s = self._storage
2205         s._peers = {} # clear existing storage
2206         p2 = Publish(fn2, self._storage_broker, None)
2207         uploadable = MutableData(data)
2208         d = p2.publish(uploadable)
2209         def _published(res):
2210             shares = s._peers
2211             s._peers = {}
2212             return shares
2213         d.addCallback(_published)
2214         return d
2215
2216     def make_servermap(self, mode=MODE_READ, oldmap=None):
2217         if oldmap is None:
2218             oldmap = ServerMap()
2219         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2220                                oldmap, mode)
2221         d = smu.update()
2222         return d
2223
2224     def test_multiple_encodings(self):
2225         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2226         # then mix up the shares, to make sure that download survives seeing
2227         # a variety of encodings. This is actually kind of tricky to set up.
2228
2229         contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
2230         contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
2231         contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
2232
2233         # we make a retrieval object that doesn't know what encoding
2234         # parameters to use
2235         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2236
2237         # now we upload a file through fn1, and grab its shares
2238         d = self._encode(3, 10, contents1)
2239         def _encoded_1(shares):
2240             self._shares1 = shares
2241         d.addCallback(_encoded_1)
2242         d.addCallback(lambda res: self._encode(4, 9, contents2))
2243         def _encoded_2(shares):
2244             self._shares2 = shares
2245         d.addCallback(_encoded_2)
2246         d.addCallback(lambda res: self._encode(4, 7, contents3))
2247         def _encoded_3(shares):
2248             self._shares3 = shares
2249         d.addCallback(_encoded_3)
2250
2251         def _merge(res):
2252             log.msg("merging sharelists")
2253             # we merge the shares from the two sets, leaving each shnum in
2254             # its original location, but using a share from set1 or set2
2255             # according to the following sequence:
2256             #
2257             #  4-of-9  a  s2
2258             #  4-of-9  b  s2
2259             #  4-of-7  c   s3
2260             #  4-of-9  d  s2
2261             #  3-of-9  e s1
2262             #  3-of-9  f s1
2263             #  3-of-9  g s1
2264             #  4-of-9  h  s2
2265             #
2266             # so that neither form can be recovered until fetch [f], at which
2267             # point version-s1 (the 3-of-10 form) should be recoverable. If
2268             # the implementation latches on to the first version it sees,
2269             # then s2 will be recoverable at fetch [g].
2270
2271             # Later, when we implement code that handles multiple versions,
2272             # we can use this framework to assert that all recoverable
2273             # versions are retrieved, and test that 'epsilon' does its job
2274
2275             places = [2, 2, 3, 2, 1, 1, 1, 2]
2276
2277             sharemap = {}
2278             sb = self._storage_broker
2279
2280             for peerid in sorted(sb.get_all_serverids()):
2281                 for shnum in self._shares1.get(peerid, {}):
2282                     if shnum < len(places):
2283                         which = places[shnum]
2284                     else:
2285                         which = "x"
2286                     self._storage._peers[peerid] = peers = {}
2287                     in_1 = shnum in self._shares1[peerid]
2288                     in_2 = shnum in self._shares2.get(peerid, {})
2289                     in_3 = shnum in self._shares3.get(peerid, {})
2290                     if which == 1:
2291                         if in_1:
2292                             peers[shnum] = self._shares1[peerid][shnum]
2293                             sharemap[shnum] = peerid
2294                     elif which == 2:
2295                         if in_2:
2296                             peers[shnum] = self._shares2[peerid][shnum]
2297                             sharemap[shnum] = peerid
2298                     elif which == 3:
2299                         if in_3:
2300                             peers[shnum] = self._shares3[peerid][shnum]
2301                             sharemap[shnum] = peerid
2302
2303             # we don't bother placing any other shares
2304             # now sort the sequence so that share 0 is returned first
2305             new_sequence = [sharemap[shnum]
2306                             for shnum in sorted(sharemap.keys())]
2307             self._storage._sequence = new_sequence
2308             log.msg("merge done")
2309         d.addCallback(_merge)
2310         d.addCallback(lambda res: fn3.download_best_version())
2311         def _retrieved(new_contents):
2312             # the current specified behavior is "first version recoverable"
2313             self.failUnlessEqual(new_contents, contents1)
2314         d.addCallback(_retrieved)
2315         return d
2316
2317
2318 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2319
2320     def setUp(self):
2321         return self.publish_multiple()
2322
2323     def test_multiple_versions(self):
2324         # if we see a mix of versions in the grid, download_best_version
2325         # should get the latest one
2326         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2327         d = self._fn.download_best_version()
2328         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2329         # and the checker should report problems
2330         d.addCallback(lambda res: self._fn.check(Monitor()))
2331         d.addCallback(self.check_bad, "test_multiple_versions")
2332
2333         # but if everything is at version 2, that's what we should download
2334         d.addCallback(lambda res:
2335                       self._set_versions(dict([(i,2) for i in range(10)])))
2336         d.addCallback(lambda res: self._fn.download_best_version())
2337         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2338         # if exactly one share is at version 3, we should still get v2
2339         d.addCallback(lambda res:
2340                       self._set_versions({0:3}))
2341         d.addCallback(lambda res: self._fn.download_best_version())
2342         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2343         # but the servermap should see the unrecoverable version. This
2344         # depends upon the single newer share being queried early.
2345         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2346         def _check_smap(smap):
2347             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2348             newer = smap.unrecoverable_newer_versions()
2349             self.failUnlessEqual(len(newer), 1)
2350             verinfo, health = newer.items()[0]
2351             self.failUnlessEqual(verinfo[0], 4)
2352             self.failUnlessEqual(health, (1,3))
2353             self.failIf(smap.needs_merge())
2354         d.addCallback(_check_smap)
2355         # if we have a mix of two parallel versions (s4a and s4b), we could
2356         # recover either
2357         d.addCallback(lambda res:
2358                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2359                                           1:4,3:4,5:4,7:4,9:4}))
2360         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2361         def _check_smap_mixed(smap):
2362             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2363             newer = smap.unrecoverable_newer_versions()
2364             self.failUnlessEqual(len(newer), 0)
2365             self.failUnless(smap.needs_merge())
2366         d.addCallback(_check_smap_mixed)
2367         d.addCallback(lambda res: self._fn.download_best_version())
2368         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2369                                                   res == self.CONTENTS[4]))
2370         return d
2371
2372     def test_replace(self):
2373         # if we see a mix of versions in the grid, we should be able to
2374         # replace them all with a newer version
2375
2376         # if exactly one share is at version 3, we should download (and
2377         # replace) v2, and the result should be v4. Note that the index we
2378         # give to _set_versions is different than the sequence number.
2379         target = dict([(i,2) for i in range(10)]) # seqnum3
2380         target[0] = 3 # seqnum4
2381         self._set_versions(target)
2382
2383         def _modify(oldversion, servermap, first_time):
2384             return oldversion + " modified"
2385         d = self._fn.modify(_modify)
2386         d.addCallback(lambda res: self._fn.download_best_version())
2387         expected = self.CONTENTS[2] + " modified"
2388         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2389         # and the servermap should indicate that the outlier was replaced too
2390         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2391         def _check_smap(smap):
2392             self.failUnlessEqual(smap.highest_seqnum(), 5)
2393             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2394             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2395         d.addCallback(_check_smap)
2396         return d
2397
2398
2399 class Exceptions(unittest.TestCase):
2400     def test_repr(self):
2401         nmde = NeedMoreDataError(100, 50, 100)
2402         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2403         ucwe = UncoordinatedWriteError()
2404         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2405
2406
2407 class SameKeyGenerator:
2408     def __init__(self, pubkey, privkey):
2409         self.pubkey = pubkey
2410         self.privkey = privkey
2411     def generate(self, keysize=None):
2412         return defer.succeed( (self.pubkey, self.privkey) )
2413
2414 class FirstServerGetsKilled:
2415     done = False
2416     def notify(self, retval, wrapper, methname):
2417         if not self.done:
2418             wrapper.broken = True
2419             self.done = True
2420         return retval
2421
2422 class FirstServerGetsDeleted:
2423     def __init__(self):
2424         self.done = False
2425         self.silenced = None
2426     def notify(self, retval, wrapper, methname):
2427         if not self.done:
2428             # this query will work, but later queries should think the share
2429             # has been deleted
2430             self.done = True
2431             self.silenced = wrapper
2432             return retval
2433         if wrapper == self.silenced:
2434             assert methname == "slot_testv_and_readv_and_writev"
2435             return (True, {})
2436         return retval
2437
2438 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2439     def do_publish_surprise(self, version):
2440         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2441         self.set_up_grid()
2442         nm = self.g.clients[0].nodemaker
2443         d = nm.create_mutable_file(MutableData("contents 1"),
2444                                     version=version)
2445         def _created(n):
2446             d = defer.succeed(None)
2447             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2448             def _got_smap1(smap):
2449                 # stash the old state of the file
2450                 self.old_map = smap
2451             d.addCallback(_got_smap1)
2452             # then modify the file, leaving the old map untouched
2453             d.addCallback(lambda res: log.msg("starting winning write"))
2454             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2455             # now attempt to modify the file with the old servermap. This
2456             # will look just like an uncoordinated write, in which every
2457             # single share got updated between our mapupdate and our publish
2458             d.addCallback(lambda res: log.msg("starting doomed write"))
2459             d.addCallback(lambda res:
2460                           self.shouldFail(UncoordinatedWriteError,
2461                                           "test_publish_surprise", None,
2462                                           n.upload,
2463                                           MutableData("contents 2a"), self.old_map))
2464             return d
2465         d.addCallback(_created)
2466         return d
2467
2468     def test_publish_surprise_sdmf(self):
2469         return self.do_publish_surprise(SDMF_VERSION)
2470
2471     def test_publish_surprise_mdmf(self):
2472         return self.do_publish_surprise(MDMF_VERSION)
2473
2474     def test_retrieve_surprise(self):
2475         self.basedir = "mutable/Problems/test_retrieve_surprise"
2476         self.set_up_grid()
2477         nm = self.g.clients[0].nodemaker
2478         d = nm.create_mutable_file(MutableData("contents 1"*4000))
2479         def _created(n):
2480             d = defer.succeed(None)
2481             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2482             def _got_smap1(smap):
2483                 # stash the old state of the file
2484                 self.old_map = smap
2485             d.addCallback(_got_smap1)
2486             # then modify the file, leaving the old map untouched
2487             d.addCallback(lambda res: log.msg("starting winning write"))
2488             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2489             # now attempt to retrieve the old version with the old servermap.
2490             # This will look like someone has changed the file since we
2491             # updated the servermap.
2492             d.addCallback(lambda res: log.msg("starting doomed read"))
2493             d.addCallback(lambda res:
2494                           self.shouldFail(NotEnoughSharesError,
2495                                           "test_retrieve_surprise",
2496                                           "ran out of servers: have 0 of 1",
2497                                           n.download_version,
2498                                           self.old_map,
2499                                           self.old_map.best_recoverable_version(),
2500                                           ))
2501             return d
2502         d.addCallback(_created)
2503         return d
2504
2505
2506     def test_unexpected_shares(self):
2507         # upload the file, take a servermap, shut down one of the servers,
2508         # upload it again (causing shares to appear on a new server), then
2509         # upload using the old servermap. The last upload should fail with an
2510         # UncoordinatedWriteError, because of the shares that didn't appear
2511         # in the servermap.
2512         self.basedir = "mutable/Problems/test_unexpected_shares"
2513         self.set_up_grid()
2514         nm = self.g.clients[0].nodemaker
2515         d = nm.create_mutable_file(MutableData("contents 1"))
2516         def _created(n):
2517             d = defer.succeed(None)
2518             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2519             def _got_smap1(smap):
2520                 # stash the old state of the file
2521                 self.old_map = smap
2522                 # now shut down one of the servers
2523                 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2524                 self.g.remove_server(peer0)
2525                 # then modify the file, leaving the old map untouched
2526                 log.msg("starting winning write")
2527                 return n.overwrite(MutableData("contents 2"))
2528             d.addCallback(_got_smap1)
2529             # now attempt to modify the file with the old servermap. This
2530             # will look just like an uncoordinated write, in which every
2531             # single share got updated between our mapupdate and our publish
2532             d.addCallback(lambda res: log.msg("starting doomed write"))
2533             d.addCallback(lambda res:
2534                           self.shouldFail(UncoordinatedWriteError,
2535                                           "test_surprise", None,
2536                                           n.upload,
2537                                           MutableData("contents 2a"), self.old_map))
2538             return d
2539         d.addCallback(_created)
2540         return d
2541
2542     def test_multiply_placed_shares(self):
2543         self.basedir = "mutable/Problems/test_multiply_placed_shares"
2544         self.set_up_grid()
2545         nm = self.g.clients[0].nodemaker
2546         d = nm.create_mutable_file(MutableData("contents 1"))
2547         # remove one of the servers and reupload the file.
2548         def _created(n):
2549             self._node = n
2550
2551             servers = self.g.get_all_serverids()
2552             self.ss = self.g.remove_server(servers[len(servers)-1])
2553
2554             new_server = self.g.make_server(len(servers)-1)
2555             self.g.add_server(len(servers)-1, new_server)
2556
2557             return self._node.download_best_version()
2558         d.addCallback(_created)
2559         d.addCallback(lambda data: MutableData(data))
2560         d.addCallback(lambda data: self._node.overwrite(data))
2561
2562         # restore the server we removed earlier, then download+upload
2563         # the file again
2564         def _overwritten(ign):
2565             self.g.add_server(len(self.g.servers_by_number), self.ss)
2566             return self._node.download_best_version()
2567         d.addCallback(_overwritten)
2568         d.addCallback(lambda data: MutableData(data))
2569         d.addCallback(lambda data: self._node.overwrite(data))
2570         d.addCallback(lambda ignored:
2571             self._node.get_servermap(MODE_CHECK))
2572         def _overwritten_again(smap):
2573             # Make sure that all shares were updated by making sure that
2574             # there aren't any other versions in the sharemap.
2575             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2576             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2577         d.addCallback(_overwritten_again)
2578         return d
2579
2580     def test_bad_server(self):
2581         # Break one server, then create the file: the initial publish should
2582         # complete with an alternate server. Breaking a second server should
2583         # not prevent an update from succeeding either.
2584         self.basedir = "mutable/Problems/test_bad_server"
2585         self.set_up_grid()
2586         nm = self.g.clients[0].nodemaker
2587
2588         # to make sure that one of the initial peers is broken, we have to
2589         # get creative. We create an RSA key and compute its storage-index.
2590         # Then we make a KeyGenerator that always returns that one key, and
2591         # use it to create the mutable file. This will get easier when we can
2592         # use #467 static-server-selection to disable permutation and force
2593         # the choice of server for share[0].
2594
2595         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2596         def _got_key( (pubkey, privkey) ):
2597             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2598             pubkey_s = pubkey.serialize()
2599             privkey_s = privkey.serialize()
2600             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2601                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2602             self._storage_index = u.get_storage_index()
2603         d.addCallback(_got_key)
2604         def _break_peer0(res):
2605             si = self._storage_index
2606             servers = nm.storage_broker.get_servers_for_psi(si)
2607             self.g.break_server(servers[0].get_serverid())
2608             self.server1 = servers[1]
2609         d.addCallback(_break_peer0)
2610         # now "create" the file, using the pre-established key, and let the
2611         # initial publish finally happen
2612         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2613         # that ought to work
2614         def _got_node(n):
2615             d = n.download_best_version()
2616             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2617             # now break the second peer
2618             def _break_peer1(res):
2619                 self.g.break_server(self.server1.get_serverid())
2620             d.addCallback(_break_peer1)
2621             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2622             # that ought to work too
2623             d.addCallback(lambda res: n.download_best_version())
2624             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2625             def _explain_error(f):
2626                 print f
2627                 if f.check(NotEnoughServersError):
2628                     print "first_error:", f.value.first_error
2629                 return f
2630             d.addErrback(_explain_error)
2631             return d
2632         d.addCallback(_got_node)
2633         return d
2634
2635     def test_bad_server_overlap(self):
2636         # like test_bad_server, but with no extra unused servers to fall back
2637         # upon. This means that we must re-use a server which we've already
2638         # used. If we don't remember the fact that we sent them one share
2639         # already, we'll mistakenly think we're experiencing an
2640         # UncoordinatedWriteError.
2641
2642         # Break one server, then create the file: the initial publish should
2643         # complete with an alternate server. Breaking a second server should
2644         # not prevent an update from succeeding either.
2645         self.basedir = "mutable/Problems/test_bad_server_overlap"
2646         self.set_up_grid()
2647         nm = self.g.clients[0].nodemaker
2648         sb = nm.storage_broker
2649
2650         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2651         self.g.break_server(peerids[0])
2652
2653         d = nm.create_mutable_file(MutableData("contents 1"))
2654         def _created(n):
2655             d = n.download_best_version()
2656             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2657             # now break one of the remaining servers
2658             def _break_second_server(res):
2659                 self.g.break_server(peerids[1])
2660             d.addCallback(_break_second_server)
2661             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2662             # that ought to work too
2663             d.addCallback(lambda res: n.download_best_version())
2664             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2665             return d
2666         d.addCallback(_created)
2667         return d
2668
2669     def test_publish_all_servers_bad(self):
2670         # Break all servers: the publish should fail
2671         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2672         self.set_up_grid()
2673         nm = self.g.clients[0].nodemaker
2674         for s in nm.storage_broker.get_connected_servers():
2675             s.get_rref().broken = True
2676
2677         d = self.shouldFail(NotEnoughServersError,
2678                             "test_publish_all_servers_bad",
2679                             "ran out of good servers",
2680                             nm.create_mutable_file, MutableData("contents"))
2681         return d
2682
2683     def test_publish_no_servers(self):
2684         # no servers at all: the publish should fail
2685         self.basedir = "mutable/Problems/test_publish_no_servers"
2686         self.set_up_grid(num_servers=0)
2687         nm = self.g.clients[0].nodemaker
2688
2689         d = self.shouldFail(NotEnoughServersError,
2690                             "test_publish_no_servers",
2691                             "Ran out of non-bad servers",
2692                             nm.create_mutable_file, MutableData("contents"))
2693         return d
2694
2695
2696     def test_privkey_query_error(self):
2697         # when a servermap is updated with MODE_WRITE, it tries to get the
2698         # privkey. Something might go wrong during this query attempt.
2699         # Exercise the code in _privkey_query_failed which tries to handle
2700         # such an error.
2701         self.basedir = "mutable/Problems/test_privkey_query_error"
2702         self.set_up_grid(num_servers=20)
2703         nm = self.g.clients[0].nodemaker
2704         nm._node_cache = DevNullDictionary() # disable the nodecache
2705
2706         # we need some contents that are large enough to push the privkey out
2707         # of the early part of the file
2708         LARGE = "These are Larger contents" * 2000 # about 50KB
2709         LARGE_uploadable = MutableData(LARGE)
2710         d = nm.create_mutable_file(LARGE_uploadable)
2711         def _created(n):
2712             self.uri = n.get_uri()
2713             self.n2 = nm.create_from_cap(self.uri)
2714
2715             # When a mapupdate is performed on a node that doesn't yet know
2716             # the privkey, a short read is sent to a batch of servers, to get
2717             # the verinfo and (hopefully, if the file is short enough) the
2718             # encprivkey. Our file is too large to let this first read
2719             # contain the encprivkey. Each non-encprivkey-bearing response
2720             # that arrives (until the node gets the encprivkey) will trigger
2721             # a second read to specifically read the encprivkey.
2722             #
2723             # So, to exercise this case:
2724             #  1. notice which server gets a read() call first
2725             #  2. tell that server to start throwing errors
2726             killer = FirstServerGetsKilled()
2727             for s in nm.storage_broker.get_connected_servers():
2728                 s.get_rref().post_call_notifier = killer.notify
2729         d.addCallback(_created)
2730
2731         # now we update a servermap from a new node (which doesn't have the
2732         # privkey yet, forcing it to use a separate privkey query). Note that
2733         # the map-update will succeed, since we'll just get a copy from one
2734         # of the other shares.
2735         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2736
2737         return d
2738
2739     def test_privkey_query_missing(self):
2740         # like test_privkey_query_error, but the shares are deleted by the
2741         # second query, instead of raising an exception.
2742         self.basedir = "mutable/Problems/test_privkey_query_missing"
2743         self.set_up_grid(num_servers=20)
2744         nm = self.g.clients[0].nodemaker
2745         LARGE = "These are Larger contents" * 2000 # about 50KiB
2746         LARGE_uploadable = MutableData(LARGE)
2747         nm._node_cache = DevNullDictionary() # disable the nodecache
2748
2749         d = nm.create_mutable_file(LARGE_uploadable)
2750         def _created(n):
2751             self.uri = n.get_uri()
2752             self.n2 = nm.create_from_cap(self.uri)
2753             deleter = FirstServerGetsDeleted()
2754             for s in nm.storage_broker.get_connected_servers():
2755                 s.get_rref().post_call_notifier = deleter.notify
2756         d.addCallback(_created)
2757         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2758         return d
2759
2760
2761     def test_block_and_hash_query_error(self):
2762         # This tests for what happens when a query to a remote server
2763         # fails in either the hash validation step or the block getting
2764         # step (because of batching, this is the same actual query).
2765         # We need to have the storage server persist up until the point
2766         # that its prefix is validated, then suddenly die. This
2767         # exercises some exception handling code in Retrieve.
2768         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2769         self.set_up_grid(num_servers=20)
2770         nm = self.g.clients[0].nodemaker
2771         CONTENTS = "contents" * 2000
2772         CONTENTS_uploadable = MutableData(CONTENTS)
2773         d = nm.create_mutable_file(CONTENTS_uploadable)
2774         def _created(node):
2775             self._node = node
2776         d.addCallback(_created)
2777         d.addCallback(lambda ignored:
2778             self._node.get_servermap(MODE_READ))
2779         def _then(servermap):
2780             # we have our servermap. Now we set up the servers like the
2781             # tests above -- the first one that gets a read call should
2782             # start throwing errors, but only after returning its prefix
2783             # for validation. Since we'll download without fetching the
2784             # private key, the next query to the remote server will be
2785             # for either a block and salt or for hashes, either of which
2786             # will exercise the error handling code.
2787             killer = FirstServerGetsKilled()
2788             for s in nm.storage_broker.get_connected_servers():
2789                 s.get_rref().post_call_notifier = killer.notify
2790             ver = servermap.best_recoverable_version()
2791             assert ver
2792             return self._node.download_version(servermap, ver)
2793         d.addCallback(_then)
2794         d.addCallback(lambda data:
2795             self.failUnlessEqual(data, CONTENTS))
2796         return d
2797
2798     def test_1654(self):
2799         # test that the Retrieve object unconditionally verifies the block
2800         # hash tree root for mutable shares. The failure mode is that
2801         # carefully crafted shares can cause undetected corruption (the
2802         # retrieve appears to finish successfully, but the result is
2803         # corrupted). When fixed, these shares always cause a
2804         # CorruptShareError, which results in NotEnoughSharesError in this
2805         # 2-of-2 file.
2806         self.basedir = "mutable/Problems/test_1654"
2807         self.set_up_grid(num_servers=2)
2808         cap = uri.from_string(TEST_1654_CAP)
2809         si = cap.get_storage_index()
2810
2811         for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2812             sharedata = base64.b64decode(share)
2813             storedir = self.get_serverdir(shnum)
2814             storage_path = os.path.join(storedir, "shares",
2815                                         storage_index_to_dir(si))
2816             fileutil.make_dirs(storage_path)
2817             fileutil.write(os.path.join(storage_path, "%d" % shnum),
2818                            sharedata)
2819
2820         nm = self.g.clients[0].nodemaker
2821         n = nm.create_from_cap(TEST_1654_CAP)
2822         # to exercise the problem correctly, we must ensure that sh0 is
2823         # processed first, and sh1 second. NoNetworkGrid has facilities to
2824         # stall the first request from a single server, but it's not
2825         # currently easy to extend that to stall the second request (mutable
2826         # retrievals will see two: first the mapupdate, then the fetch).
2827         # However, repeated executions of this run without the #1654 fix
2828         # suggests that we're failing reliably even without explicit stalls,
2829         # probably because the servers are queried in a fixed order. So I'm
2830         # ok with relying upon that.
2831         d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2832                             "ran out of servers",
2833                             n.download_best_version)
2834         return d
2835
2836
2837 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2838
2839 TEST_1654_SH0 = """\
2840 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2841 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2842 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2843 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2844 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2845 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2846 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2847 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2848 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2849 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2850 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2851 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2852 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2853 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2854 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2855 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2856 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2857 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2858 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2859 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2860 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2861 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2862 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2863 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2864 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2865 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2866 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2867 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2868 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2869 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2870 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2871 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2872 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2873 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2874 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2875 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2876 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2877 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2878 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2879 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2880 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2881 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2882 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2883 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2884 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2885 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2886 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2887 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2888 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2889 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2890 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2891 72mXGlqyLyWYuAAAAAA="""
2892
2893 TEST_1654_SH1 = """\
2894 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2895 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2896 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2897 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2898 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2899 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2900 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2901 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2902 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2903 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2904 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2905 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2906 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2907 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2908 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2909 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2910 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2911 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2912 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2913 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2914 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2915 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2916 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2917 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2918 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2919 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2920 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2921 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2922 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2923 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2924 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2925 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2926 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2927 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2928 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2929 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2930 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2931 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2932 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2933 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2934 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2935 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2936 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2937 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2938 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2939 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2940 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2941 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2942 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2943 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2944 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2945 72mXGlqyLyWYuAAAAAA="""
2946
2947
2948 class FileHandle(unittest.TestCase):
2949     def setUp(self):
2950         self.test_data = "Test Data" * 50000
2951         self.sio = StringIO(self.test_data)
2952         self.uploadable = MutableFileHandle(self.sio)
2953
2954
2955     def test_filehandle_read(self):
2956         self.basedir = "mutable/FileHandle/test_filehandle_read"
2957         chunk_size = 10
2958         for i in xrange(0, len(self.test_data), chunk_size):
2959             data = self.uploadable.read(chunk_size)
2960             data = "".join(data)
2961             start = i
2962             end = i + chunk_size
2963             self.failUnlessEqual(data, self.test_data[start:end])
2964
2965
2966     def test_filehandle_get_size(self):
2967         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2968         actual_size = len(self.test_data)
2969         size = self.uploadable.get_size()
2970         self.failUnlessEqual(size, actual_size)
2971
2972
2973     def test_filehandle_get_size_out_of_order(self):
2974         # We should be able to call get_size whenever we want without
2975         # disturbing the location of the seek pointer.
2976         chunk_size = 100
2977         data = self.uploadable.read(chunk_size)
2978         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2979
2980         # Now get the size.
2981         size = self.uploadable.get_size()
2982         self.failUnlessEqual(size, len(self.test_data))
2983
2984         # Now get more data. We should be right where we left off.
2985         more_data = self.uploadable.read(chunk_size)
2986         start = chunk_size
2987         end = chunk_size * 2
2988         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2989
2990
2991     def test_filehandle_file(self):
2992         # Make sure that the MutableFileHandle works on a file as well
2993         # as a StringIO object, since in some cases it will be asked to
2994         # deal with files.
2995         self.basedir = self.mktemp()
2996         # necessary? What am I doing wrong here?
2997         os.mkdir(self.basedir)
2998         f_path = os.path.join(self.basedir, "test_file")
2999         f = open(f_path, "w")
3000         f.write(self.test_data)
3001         f.close()
3002         f = open(f_path, "r")
3003
3004         uploadable = MutableFileHandle(f)
3005
3006         data = uploadable.read(len(self.test_data))
3007         self.failUnlessEqual("".join(data), self.test_data)
3008         size = uploadable.get_size()
3009         self.failUnlessEqual(size, len(self.test_data))
3010
3011
3012     def test_close(self):
3013         # Make sure that the MutableFileHandle closes its handle when
3014         # told to do so.
3015         self.uploadable.close()
3016         self.failUnless(self.sio.closed)
3017
3018
3019 class DataHandle(unittest.TestCase):
3020     def setUp(self):
3021         self.test_data = "Test Data" * 50000
3022         self.uploadable = MutableData(self.test_data)
3023
3024
3025     def test_datahandle_read(self):
3026         chunk_size = 10
3027         for i in xrange(0, len(self.test_data), chunk_size):
3028             data = self.uploadable.read(chunk_size)
3029             data = "".join(data)
3030             start = i
3031             end = i + chunk_size
3032             self.failUnlessEqual(data, self.test_data[start:end])
3033
3034
3035     def test_datahandle_get_size(self):
3036         actual_size = len(self.test_data)
3037         size = self.uploadable.get_size()
3038         self.failUnlessEqual(size, actual_size)
3039
3040
3041     def test_datahandle_get_size_out_of_order(self):
3042         # We should be able to call get_size whenever we want without
3043         # disturbing the location of the seek pointer.
3044         chunk_size = 100
3045         data = self.uploadable.read(chunk_size)
3046         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3047
3048         # Now get the size.
3049         size = self.uploadable.get_size()
3050         self.failUnlessEqual(size, len(self.test_data))
3051
3052         # Now get more data. We should be right where we left off.
3053         more_data = self.uploadable.read(chunk_size)
3054         start = chunk_size
3055         end = chunk_size * 2
3056         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3057
3058
3059 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3060               PublishMixin):
3061     def setUp(self):
3062         GridTestMixin.setUp(self)
3063         self.basedir = self.mktemp()
3064         self.set_up_grid()
3065         self.c = self.g.clients[0]
3066         self.nm = self.c.nodemaker
3067         self.data = "test data" * 100000 # about 900 KiB; MDMF
3068         self.small_data = "test data" * 10 # about 90 B; SDMF
3069
3070
3071     def do_upload_mdmf(self):
3072         d = self.nm.create_mutable_file(MutableData(self.data),
3073                                         version=MDMF_VERSION)
3074         def _then(n):
3075             assert isinstance(n, MutableFileNode)
3076             assert n._protocol_version == MDMF_VERSION
3077             self.mdmf_node = n
3078             return n
3079         d.addCallback(_then)
3080         return d
3081
3082     def do_upload_sdmf(self):
3083         d = self.nm.create_mutable_file(MutableData(self.small_data))
3084         def _then(n):
3085             assert isinstance(n, MutableFileNode)
3086             assert n._protocol_version == SDMF_VERSION
3087             self.sdmf_node = n
3088             return n
3089         d.addCallback(_then)
3090         return d
3091
3092     def do_upload_empty_sdmf(self):
3093         d = self.nm.create_mutable_file(MutableData(""))
3094         def _then(n):
3095             assert isinstance(n, MutableFileNode)
3096             self.sdmf_zero_length_node = n
3097             assert n._protocol_version == SDMF_VERSION
3098             return n
3099         d.addCallback(_then)
3100         return d
3101
3102     def do_upload(self):
3103         d = self.do_upload_mdmf()
3104         d.addCallback(lambda ign: self.do_upload_sdmf())
3105         return d
3106
3107     def test_debug(self):
3108         d = self.do_upload_mdmf()
3109         def _debug(n):
3110             fso = debug.FindSharesOptions()
3111             storage_index = base32.b2a(n.get_storage_index())
3112             fso.si_s = storage_index
3113             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3114                             for (i,ss,storedir)
3115                             in self.iterate_servers()]
3116             fso.stdout = StringIO()
3117             fso.stderr = StringIO()
3118             debug.find_shares(fso)
3119             sharefiles = fso.stdout.getvalue().splitlines()
3120             expected = self.nm.default_encoding_parameters["n"]
3121             self.failUnlessEqual(len(sharefiles), expected)
3122
3123             do = debug.DumpOptions()
3124             do["filename"] = sharefiles[0]
3125             do.stdout = StringIO()
3126             debug.dump_share(do)
3127             output = do.stdout.getvalue()
3128             lines = set(output.splitlines())
3129             self.failUnless("Mutable slot found:" in lines, output)
3130             self.failUnless(" share_type: MDMF" in lines, output)
3131             self.failUnless(" num_extra_leases: 0" in lines, output)
3132             self.failUnless(" MDMF contents:" in lines, output)
3133             self.failUnless("  seqnum: 1" in lines, output)
3134             self.failUnless("  required_shares: 3" in lines, output)
3135             self.failUnless("  total_shares: 10" in lines, output)
3136             self.failUnless("  segsize: 131073" in lines, output)
3137             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
3138             vcap = n.get_verify_cap().to_string()
3139             self.failUnless("  verify-cap: %s" % vcap in lines, output)
3140
3141             cso = debug.CatalogSharesOptions()
3142             cso.nodedirs = fso.nodedirs
3143             cso.stdout = StringIO()
3144             cso.stderr = StringIO()
3145             debug.catalog_shares(cso)
3146             shares = cso.stdout.getvalue().splitlines()
3147             oneshare = shares[0] # all shares should be MDMF
3148             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3149             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3150             fields = oneshare.split()
3151             self.failUnlessEqual(fields[0], "MDMF")
3152             self.failUnlessEqual(fields[1], storage_index)
3153             self.failUnlessEqual(fields[2], "3/10")
3154             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3155             self.failUnless(fields[4].startswith("#1:"), fields[3])
3156             # the rest of fields[4] is the roothash, which depends upon
3157             # encryption salts and is not constant. fields[5] is the
3158             # remaining time on the longest lease, which is timing dependent.
3159             # The rest of the line is the quoted pathname to the share.
3160         d.addCallback(_debug)
3161         return d
3162
3163     def test_get_sequence_number(self):
3164         d = self.do_upload()
3165         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3166         d.addCallback(lambda bv:
3167             self.failUnlessEqual(bv.get_sequence_number(), 1))
3168         d.addCallback(lambda ignored:
3169             self.sdmf_node.get_best_readable_version())
3170         d.addCallback(lambda bv:
3171             self.failUnlessEqual(bv.get_sequence_number(), 1))
3172         # Now update. The sequence number in both cases should be 1 in
3173         # both cases.
3174         def _do_update(ignored):
3175             new_data = MutableData("foo bar baz" * 100000)
3176             new_small_data = MutableData("foo bar baz" * 10)
3177             d1 = self.mdmf_node.overwrite(new_data)
3178             d2 = self.sdmf_node.overwrite(new_small_data)
3179             dl = gatherResults([d1, d2])
3180             return dl
3181         d.addCallback(_do_update)
3182         d.addCallback(lambda ignored:
3183             self.mdmf_node.get_best_readable_version())
3184         d.addCallback(lambda bv:
3185             self.failUnlessEqual(bv.get_sequence_number(), 2))
3186         d.addCallback(lambda ignored:
3187             self.sdmf_node.get_best_readable_version())
3188         d.addCallback(lambda bv:
3189             self.failUnlessEqual(bv.get_sequence_number(), 2))
3190         return d
3191
3192
3193     def test_cap_after_upload(self):
3194         # If we create a new mutable file and upload things to it, and
3195         # it's an MDMF file, we should get an MDMF cap back from that
3196         # file and should be able to use that.
3197         # That's essentially what MDMF node is, so just check that.
3198         d = self.do_upload_mdmf()
3199         def _then(ign):
3200             mdmf_uri = self.mdmf_node.get_uri()
3201             cap = uri.from_string(mdmf_uri)
3202             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3203             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3204             cap = uri.from_string(readonly_mdmf_uri)
3205             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3206         d.addCallback(_then)
3207         return d
3208
3209     def test_mutable_version(self):
3210         # assert that getting parameters from the IMutableVersion object
3211         # gives us the same data as getting them from the filenode itself
3212         d = self.do_upload()
3213         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3214         def _check_mdmf(bv):
3215             n = self.mdmf_node
3216             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3217             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3218             self.failIf(bv.is_readonly())
3219         d.addCallback(_check_mdmf)
3220         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3221         def _check_sdmf(bv):
3222             n = self.sdmf_node
3223             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3224             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3225             self.failIf(bv.is_readonly())
3226         d.addCallback(_check_sdmf)
3227         return d
3228
3229
3230     def test_get_readonly_version(self):
3231         d = self.do_upload()
3232         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3233         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3234
3235         # Attempting to get a mutable version of a mutable file from a
3236         # filenode initialized with a readcap should return a readonly
3237         # version of that same node.
3238         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3239         d.addCallback(lambda ro: ro.get_best_mutable_version())
3240         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3241
3242         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3243         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3244
3245         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3246         d.addCallback(lambda ro: ro.get_best_mutable_version())
3247         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3248         return d
3249
3250
3251     def test_toplevel_overwrite(self):
3252         new_data = MutableData("foo bar baz" * 100000)
3253         new_small_data = MutableData("foo bar baz" * 10)
3254         d = self.do_upload()
3255         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3256         d.addCallback(lambda ignored:
3257             self.mdmf_node.download_best_version())
3258         d.addCallback(lambda data:
3259             self.failUnlessEqual(data, "foo bar baz" * 100000))
3260         d.addCallback(lambda ignored:
3261             self.sdmf_node.overwrite(new_small_data))
3262         d.addCallback(lambda ignored:
3263             self.sdmf_node.download_best_version())
3264         d.addCallback(lambda data:
3265             self.failUnlessEqual(data, "foo bar baz" * 10))
3266         return d
3267
3268
3269     def test_toplevel_modify(self):
3270         d = self.do_upload()
3271         def modifier(old_contents, servermap, first_time):
3272             return old_contents + "modified"
3273         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3274         d.addCallback(lambda ignored:
3275             self.mdmf_node.download_best_version())
3276         d.addCallback(lambda data:
3277             self.failUnlessIn("modified", data))
3278         d.addCallback(lambda ignored:
3279             self.sdmf_node.modify(modifier))
3280         d.addCallback(lambda ignored:
3281             self.sdmf_node.download_best_version())
3282         d.addCallback(lambda data:
3283             self.failUnlessIn("modified", data))
3284         return d
3285
3286
3287     def test_version_modify(self):
3288         # TODO: When we can publish multiple versions, alter this test
3289         # to modify a version other than the best usable version, then
3290         # test to see that the best recoverable version is that.
3291         d = self.do_upload()
3292         def modifier(old_contents, servermap, first_time):
3293             return old_contents + "modified"
3294         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3295         d.addCallback(lambda ignored:
3296             self.mdmf_node.download_best_version())
3297         d.addCallback(lambda data:
3298             self.failUnlessIn("modified", data))
3299         d.addCallback(lambda ignored:
3300             self.sdmf_node.modify(modifier))
3301         d.addCallback(lambda ignored:
3302             self.sdmf_node.download_best_version())
3303         d.addCallback(lambda data:
3304             self.failUnlessIn("modified", data))
3305         return d
3306
3307
3308     def test_download_version(self):
3309         d = self.publish_multiple()
3310         # We want to have two recoverable versions on the grid.
3311         d.addCallback(lambda res:
3312                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3313                                           1:1,3:1,5:1,7:1,9:1}))
3314         # Now try to download each version. We should get the plaintext
3315         # associated with that version.
3316         d.addCallback(lambda ignored:
3317             self._fn.get_servermap(mode=MODE_READ))
3318         def _got_servermap(smap):
3319             versions = smap.recoverable_versions()
3320             assert len(versions) == 2
3321
3322             self.servermap = smap
3323             self.version1, self.version2 = versions
3324             assert self.version1 != self.version2
3325
3326             self.version1_seqnum = self.version1[0]
3327             self.version2_seqnum = self.version2[0]
3328             self.version1_index = self.version1_seqnum - 1
3329             self.version2_index = self.version2_seqnum - 1
3330
3331         d.addCallback(_got_servermap)
3332         d.addCallback(lambda ignored:
3333             self._fn.download_version(self.servermap, self.version1))
3334         d.addCallback(lambda results:
3335             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3336                                  results))
3337         d.addCallback(lambda ignored:
3338             self._fn.download_version(self.servermap, self.version2))
3339         d.addCallback(lambda results:
3340             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3341                                  results))
3342         return d
3343
3344
3345     def test_download_nonexistent_version(self):
3346         d = self.do_upload_mdmf()
3347         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3348         def _set_servermap(servermap):
3349             self.servermap = servermap
3350         d.addCallback(_set_servermap)
3351         d.addCallback(lambda ignored:
3352            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3353                            None,
3354                            self.mdmf_node.download_version, self.servermap,
3355                            "not a version"))
3356         return d
3357
3358
3359     def test_partial_read(self):
3360         d = self.do_upload_mdmf()
3361         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3362         modes = [("start_on_segment_boundary",
3363                   mathutil.next_multiple(128 * 1024, 3), 50),
3364                  ("ending_one_byte_after_segment_boundary",
3365                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3366                  ("zero_length_at_start", 0, 0),
3367                  ("zero_length_in_middle", 50, 0),
3368                  ("zero_length_at_segment_boundary",
3369                   mathutil.next_multiple(128 * 1024, 3), 0),
3370                  ]
3371         for (name, offset, length) in modes:
3372             d.addCallback(self._do_partial_read, name, offset, length)
3373         # then read only a few bytes at a time, and see that the results are
3374         # what we expect.
3375         def _read_data(version):
3376             c = consumer.MemoryConsumer()
3377             d2 = defer.succeed(None)
3378             for i in xrange(0, len(self.data), 10000):
3379                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3380             d2.addCallback(lambda ignored:
3381                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3382             return d2
3383         d.addCallback(_read_data)
3384         return d
3385     def _do_partial_read(self, version, name, offset, length):
3386         c = consumer.MemoryConsumer()
3387         d = version.read(c, offset, length)
3388         expected = self.data[offset:offset+length]
3389         d.addCallback(lambda ignored: "".join(c.chunks))
3390         def _check(results):
3391             if results != expected:
3392                 print
3393                 print "got: %s ... %s" % (results[:20], results[-20:])
3394                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3395                 self.fail("results[%s] != expected" % name)
3396             return version # daisy-chained to next call
3397         d.addCallback(_check)
3398         return d
3399
3400
3401     def _test_read_and_download(self, node, expected):
3402         d = node.get_best_readable_version()
3403         def _read_data(version):
3404             c = consumer.MemoryConsumer()
3405             d2 = defer.succeed(None)
3406             d2.addCallback(lambda ignored: version.read(c))
3407             d2.addCallback(lambda ignored:
3408                 self.failUnlessEqual(expected, "".join(c.chunks)))
3409             return d2
3410         d.addCallback(_read_data)
3411         d.addCallback(lambda ignored: node.download_best_version())
3412         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3413         return d
3414
3415     def test_read_and_download_mdmf(self):
3416         d = self.do_upload_mdmf()
3417         d.addCallback(self._test_read_and_download, self.data)
3418         return d
3419
3420     def test_read_and_download_sdmf(self):
3421         d = self.do_upload_sdmf()
3422         d.addCallback(self._test_read_and_download, self.small_data)
3423         return d
3424
3425     def test_read_and_download_sdmf_zero_length(self):
3426         d = self.do_upload_empty_sdmf()
3427         d.addCallback(self._test_read_and_download, "")
3428         return d
3429
3430
3431 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3432     timeout = 400 # these tests are too big, 120s is not enough on slow
3433                   # platforms
3434     def setUp(self):
3435         GridTestMixin.setUp(self)
3436         self.basedir = self.mktemp()
3437         self.set_up_grid()
3438         self.c = self.g.clients[0]
3439         self.nm = self.c.nodemaker
3440         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3441         self.small_data = "test data" * 10 # about 90 B; SDMF
3442
3443
3444     def do_upload_sdmf(self):
3445         d = self.nm.create_mutable_file(MutableData(self.small_data))
3446         def _then(n):
3447             assert isinstance(n, MutableFileNode)
3448             self.sdmf_node = n
3449             # Make SDMF node that has 255 shares.
3450             self.nm.default_encoding_parameters['n'] = 255
3451             self.nm.default_encoding_parameters['k'] = 127
3452             return self.nm.create_mutable_file(MutableData(self.small_data))
3453         d.addCallback(_then)
3454         def _then2(n):
3455             assert isinstance(n, MutableFileNode)
3456             self.sdmf_max_shares_node = n
3457         d.addCallback(_then2)
3458         return d
3459
3460     def do_upload_mdmf(self):
3461         d = self.nm.create_mutable_file(MutableData(self.data),
3462                                         version=MDMF_VERSION)
3463         def _then(n):
3464             assert isinstance(n, MutableFileNode)
3465             self.mdmf_node = n
3466             # Make MDMF node that has 255 shares.
3467             self.nm.default_encoding_parameters['n'] = 255
3468             self.nm.default_encoding_parameters['k'] = 127
3469             return self.nm.create_mutable_file(MutableData(self.data),
3470                                                version=MDMF_VERSION)
3471         d.addCallback(_then)
3472         def _then2(n):
3473             assert isinstance(n, MutableFileNode)
3474             self.mdmf_max_shares_node = n
3475         d.addCallback(_then2)
3476         return d
3477
3478     def _test_replace(self, offset, new_data):
3479         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3480         d0 = self.do_upload_mdmf()
3481         def _run(ign):
3482             d = defer.succeed(None)
3483             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3484                 # close over 'node'.
3485                 d.addCallback(lambda ign, node=node:
3486                               node.get_best_mutable_version())
3487                 d.addCallback(lambda mv:
3488                               mv.update(MutableData(new_data), offset))
3489                 d.addCallback(lambda ign, node=node:
3490                               node.download_best_version())
3491                 def _check(results):
3492                     if results != expected:
3493                         print
3494                         print "got: %s ... %s" % (results[:20], results[-20:])
3495                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3496                         self.fail("results != expected")
3497                 d.addCallback(_check)
3498             return d
3499         d0.addCallback(_run)
3500         return d0
3501
3502     def test_append(self):
3503         # We should be able to append data to a mutable file and get
3504         # what we expect.
3505         return self._test_replace(len(self.data), "appended")
3506
3507     def test_replace_middle(self):
3508         # We should be able to replace data in the middle of a mutable
3509         # file and get what we expect back.
3510         return self._test_replace(100, "replaced")
3511
3512     def test_replace_beginning(self):
3513         # We should be able to replace data at the beginning of the file
3514         # without truncating the file
3515         return self._test_replace(0, "beginning")
3516
3517     def test_replace_segstart1(self):
3518         return self._test_replace(128*1024+1, "NNNN")
3519
3520     def test_replace_zero_length_beginning(self):
3521         return self._test_replace(0, "")
3522
3523     def test_replace_zero_length_middle(self):
3524         return self._test_replace(50, "")
3525
3526     def test_replace_zero_length_segstart1(self):
3527         return self._test_replace(128*1024+1, "")
3528
3529     def test_replace_and_extend(self):
3530         # We should be able to replace data in the middle of a mutable
3531         # file and extend that mutable file and get what we expect.
3532         return self._test_replace(100, "modified " * 100000)
3533
3534
3535     def _check_differences(self, got, expected):
3536         # displaying arbitrary file corruption is tricky for a
3537         # 1MB file of repeating data,, so look for likely places
3538         # with problems and display them separately
3539         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3540         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3541         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3542                     for (start,end) in gotmods]
3543         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3544                     for (start,end) in expmods]
3545         #print "expecting: %s" % expspans
3546
3547         SEGSIZE = 128*1024
3548         if got != expected:
3549             print "differences:"
3550             for segnum in range(len(expected)//SEGSIZE):
3551                 start = segnum * SEGSIZE
3552                 end = (segnum+1) * SEGSIZE
3553                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3554                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3555                 if got_ends != exp_ends:
3556                     print "expected[%d]: %s" % (start, exp_ends)
3557                     print "got     [%d]: %s" % (start, got_ends)
3558             if expspans != gotspans:
3559                 print "expected: %s" % expspans
3560                 print "got     : %s" % gotspans
3561             open("EXPECTED","wb").write(expected)
3562             open("GOT","wb").write(got)
3563             print "wrote data to EXPECTED and GOT"
3564             self.fail("didn't get expected data")
3565
3566
3567     def test_replace_locations(self):
3568         # exercise fencepost conditions
3569         SEGSIZE = 128*1024
3570         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3571         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3572         d0 = self.do_upload_mdmf()
3573         def _run(ign):
3574             expected = self.data
3575             d = defer.succeed(None)
3576             for offset in suspects:
3577                 new_data = letters.next()*2 # "AA", then "BB", etc
3578                 expected = expected[:offset]+new_data+expected[offset+2:]
3579                 d.addCallback(lambda ign:
3580                               self.mdmf_node.get_best_mutable_version())
3581                 def _modify(mv, offset=offset, new_data=new_data):
3582                     # close over 'offset','new_data'
3583                     md = MutableData(new_data)
3584                     return mv.update(md, offset)
3585                 d.addCallback(_modify)
3586                 d.addCallback(lambda ignored:
3587                               self.mdmf_node.download_best_version())
3588                 d.addCallback(self._check_differences, expected)
3589             return d
3590         d0.addCallback(_run)
3591         return d0
3592
3593     def test_replace_locations_max_shares(self):
3594         # exercise fencepost conditions
3595         SEGSIZE = 128*1024
3596         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3597         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3598         d0 = self.do_upload_mdmf()
3599         def _run(ign):
3600             expected = self.data
3601             d = defer.succeed(None)
3602             for offset in suspects:
3603                 new_data = letters.next()*2 # "AA", then "BB", etc
3604                 expected = expected[:offset]+new_data+expected[offset+2:]
3605                 d.addCallback(lambda ign:
3606                               self.mdmf_max_shares_node.get_best_mutable_version())
3607                 def _modify(mv, offset=offset, new_data=new_data):
3608                     # close over 'offset','new_data'
3609                     md = MutableData(new_data)
3610                     return mv.update(md, offset)
3611                 d.addCallback(_modify)
3612                 d.addCallback(lambda ignored:
3613                               self.mdmf_max_shares_node.download_best_version())
3614                 d.addCallback(self._check_differences, expected)
3615             return d
3616         d0.addCallback(_run)
3617         return d0
3618
3619
3620     def test_append_power_of_two(self):
3621         # If we attempt to extend a mutable file so that its segment
3622         # count crosses a power-of-two boundary, the update operation
3623         # should know how to reencode the file.
3624
3625         # Note that the data populating self.mdmf_node is about 900 KiB
3626         # long -- this is 7 segments in the default segment size. So we
3627         # need to add 2 segments worth of data to push it over a
3628         # power-of-two boundary.
3629         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3630         new_data = self.data + (segment * 2)
3631         d0 = self.do_upload_mdmf()
3632         def _run(ign):
3633             d = defer.succeed(None)
3634             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3635                 # close over 'node'.
3636                 d.addCallback(lambda ign, node=node:
3637                               node.get_best_mutable_version())
3638                 d.addCallback(lambda mv:
3639                               mv.update(MutableData(segment * 2), len(self.data)))
3640                 d.addCallback(lambda ign, node=node:
3641                               node.download_best_version())
3642                 d.addCallback(lambda results:
3643                               self.failUnlessEqual(results, new_data))
3644             return d
3645         d0.addCallback(_run)
3646         return d0
3647
3648     def test_update_sdmf(self):
3649         # Running update on a single-segment file should still work.
3650         new_data = self.small_data + "appended"
3651         d0 = self.do_upload_sdmf()
3652         def _run(ign):
3653             d = defer.succeed(None)
3654             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3655                 # close over 'node'.
3656                 d.addCallback(lambda ign, node=node:
3657                               node.get_best_mutable_version())
3658                 d.addCallback(lambda mv:
3659                               mv.update(MutableData("appended"), len(self.small_data)))
3660                 d.addCallback(lambda ign, node=node:
3661                               node.download_best_version())
3662                 d.addCallback(lambda results:
3663                               self.failUnlessEqual(results, new_data))
3664             return d
3665         d0.addCallback(_run)
3666         return d0
3667
3668     def test_replace_in_last_segment(self):
3669         # The wrapper should know how to handle the tail segment
3670         # appropriately.
3671         replace_offset = len(self.data) - 100
3672         new_data = self.data[:replace_offset] + "replaced"
3673         rest_offset = replace_offset + len("replaced")
3674         new_data += self.data[rest_offset:]
3675         d0 = self.do_upload_mdmf()
3676         def _run(ign):
3677             d = defer.succeed(None)
3678             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3679                 # close over 'node'.
3680                 d.addCallback(lambda ign, node=node:
3681                               node.get_best_mutable_version())
3682                 d.addCallback(lambda mv:
3683                               mv.update(MutableData("replaced"), replace_offset))
3684                 d.addCallback(lambda ign, node=node:
3685                               node.download_best_version())
3686                 d.addCallback(lambda results:
3687                               self.failUnlessEqual(results, new_data))
3688             return d
3689         d0.addCallback(_run)
3690         return d0
3691
3692     def test_multiple_segment_replace(self):
3693         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3694         new_data = self.data[:replace_offset]
3695         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3696         new_data += 2 * new_segment
3697         new_data += "replaced"
3698         rest_offset = len(new_data)
3699         new_data += self.data[rest_offset:]
3700         d0 = self.do_upload_mdmf()
3701         def _run(ign):
3702             d = defer.succeed(None)
3703             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3704                 # close over 'node'.
3705                 d.addCallback(lambda ign, node=node:
3706                               node.get_best_mutable_version())
3707                 d.addCallback(lambda mv:
3708                               mv.update(MutableData((2 * new_segment) + "replaced"),
3709                                         replace_offset))
3710                 d.addCallback(lambda ignored, node=node:
3711                               node.download_best_version())
3712                 d.addCallback(lambda results:
3713                               self.failUnlessEqual(results, new_data))
3714             return d
3715         d0.addCallback(_run)
3716         return d0
3717
3718 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3719     sdmf_old_shares = {}
3720     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3721     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3722     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3723     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3724     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3725     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3726     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3727     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3728     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3729     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3730     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3731     sdmf_old_contents = "This is a test file.\n"
3732     def copy_sdmf_shares(self):
3733         # We'll basically be short-circuiting the upload process.
3734         servernums = self.g.servers_by_number.keys()
3735         assert len(servernums) == 10
3736
3737         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3738         # Get the storage index.
3739         cap = uri.from_string(self.sdmf_old_cap)
3740         si = cap.get_storage_index()
3741
3742         # Now execute each assignment by writing the storage.
3743         for (share, servernum) in assignments:
3744             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3745             storedir = self.get_serverdir(servernum)
3746             storage_path = os.path.join(storedir, "shares",
3747                                         storage_index_to_dir(si))
3748             fileutil.make_dirs(storage_path)
3749             fileutil.write(os.path.join(storage_path, "%d" % share),
3750                            sharedata)
3751         # ...and verify that the shares are there.
3752         shares = self.find_uri_shares(self.sdmf_old_cap)
3753         assert len(shares) == 10
3754
3755     def test_new_downloader_can_read_old_shares(self):
3756         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3757         self.set_up_grid()
3758         self.copy_sdmf_shares()
3759         nm = self.g.clients[0].nodemaker
3760         n = nm.create_from_cap(self.sdmf_old_cap)
3761         d = n.download_best_version()
3762         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3763         return d
3764
3765 class DifferentEncoding(unittest.TestCase):
3766     def setUp(self):
3767         self._storage = s = FakeStorage()
3768         self.nodemaker = make_nodemaker(s)
3769
3770     def test_filenode(self):
3771         # create a file with 3-of-20, then modify it with a client configured
3772         # to do 3-of-10. #1510 tracks a failure here
3773         self.nodemaker.default_encoding_parameters["n"] = 20
3774         d = self.nodemaker.create_mutable_file("old contents")
3775         def _created(n):
3776             filecap = n.get_cap().to_string()
3777             del n # we want a new object, not the cached one
3778             self.nodemaker.default_encoding_parameters["n"] = 10
3779             n2 = self.nodemaker.create_from_cap(filecap)
3780             return n2
3781         d.addCallback(_created)
3782         def modifier(old_contents, servermap, first_time):
3783             return "new contents"
3784         d.addCallback(lambda n: n.modify(modifier))
3785         return d