]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
MDMF: remove extension fields from caps, tolerate arbitrary ones. Fixes #1526
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from allmydata import uri, client
7 from allmydata.nodemaker import NodeMaker
8 from allmydata.util import base32, consumer, fileutil, mathutil
9 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
10      ssk_pubkey_fingerprint_hash
11 from allmydata.util.consumer import MemoryConsumer
12 from allmydata.util.deferredutil import gatherResults
13 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
14      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import ShouldFailMixin
17 from allmydata.test.no_network import GridTestMixin
18 from foolscap.api import eventually, fireEventually
19 from foolscap.logging import log
20 from allmydata.storage_client import StorageFarmBroker
21 from allmydata.storage.common import storage_index_to_dir
22 from allmydata.scripts import debug
23
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28      NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31                                       MutableData, \
32                                       DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
36
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39 from allmydata.test.test_download import PausingConsumer, \
40      PausingAndStoppingConsumer, StoppingConsumer, \
41      ImmediatelyStoppingConsumer
42
43
44 # this "FakeStorage" exists to put the share data in RAM and avoid using real
45 # network connections, both to speed up the tests and to reduce the amount of
46 # non-mutable.py code being exercised.
47
48 class FakeStorage:
49     # this class replaces the collection of storage servers, allowing the
50     # tests to examine and manipulate the published shares. It also lets us
51     # control the order in which read queries are answered, to exercise more
52     # of the error-handling code in Retrieve .
53     #
54     # Note that we ignore the storage index: this FakeStorage instance can
55     # only be used for a single storage index.
56
57
58     def __init__(self):
59         self._peers = {}
60         # _sequence is used to cause the responses to occur in a specific
61         # order. If it is in use, then we will defer queries instead of
62         # answering them right away, accumulating the Deferreds in a dict. We
63         # don't know exactly how many queries we'll get, so exactly one
64         # second after the first query arrives, we will release them all (in
65         # order).
66         self._sequence = None
67         self._pending = {}
68         self._pending_timer = None
69
70     def read(self, peerid, storage_index):
71         shares = self._peers.get(peerid, {})
72         if self._sequence is None:
73             return defer.succeed(shares)
74         d = defer.Deferred()
75         if not self._pending:
76             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
77         if peerid not in self._pending:
78             self._pending[peerid] = []
79         self._pending[peerid].append( (d, shares) )
80         return d
81
82     def _fire_readers(self):
83         self._pending_timer = None
84         pending = self._pending
85         self._pending = {}
86         for peerid in self._sequence:
87             if peerid in pending:
88                 for (d, shares) in pending.pop(peerid):
89                     eventually(d.callback, shares)
90         for peerid in pending:
91             for (d, shares) in pending[peerid]:
92                 eventually(d.callback, shares)
93
94     def write(self, peerid, storage_index, shnum, offset, data):
95         if peerid not in self._peers:
96             self._peers[peerid] = {}
97         shares = self._peers[peerid]
98         f = StringIO()
99         f.write(shares.get(shnum, ""))
100         f.seek(offset)
101         f.write(data)
102         shares[shnum] = f.getvalue()
103
104
105 class FakeStorageServer:
106     def __init__(self, peerid, storage):
107         self.peerid = peerid
108         self.storage = storage
109         self.queries = 0
110     def callRemote(self, methname, *args, **kwargs):
111         self.queries += 1
112         def _call():
113             meth = getattr(self, methname)
114             return meth(*args, **kwargs)
115         d = fireEventually()
116         d.addCallback(lambda res: _call())
117         return d
118
119     def callRemoteOnly(self, methname, *args, **kwargs):
120         self.queries += 1
121         d = self.callRemote(methname, *args, **kwargs)
122         d.addBoth(lambda ignore: None)
123         pass
124
125     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
126         pass
127
128     def slot_readv(self, storage_index, shnums, readv):
129         d = self.storage.read(self.peerid, storage_index)
130         def _read(shares):
131             response = {}
132             for shnum in shares:
133                 if shnums and shnum not in shnums:
134                     continue
135                 vector = response[shnum] = []
136                 for (offset, length) in readv:
137                     assert isinstance(offset, (int, long)), offset
138                     assert isinstance(length, (int, long)), length
139                     vector.append(shares[shnum][offset:offset+length])
140             return response
141         d.addCallback(_read)
142         return d
143
144     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
145                                         tw_vectors, read_vector):
146         # always-pass: parrot the test vectors back to them.
147         readv = {}
148         for shnum, (testv, writev, new_length) in tw_vectors.items():
149             for (offset, length, op, specimen) in testv:
150                 assert op in ("le", "eq", "ge")
151             # TODO: this isn't right, the read is controlled by read_vector,
152             # not by testv
153             readv[shnum] = [ specimen
154                              for (offset, length, op, specimen)
155                              in testv ]
156             for (offset, data) in writev:
157                 self.storage.write(self.peerid, storage_index, shnum,
158                                    offset, data)
159         answer = (True, readv)
160         return fireEventually(answer)
161
162
163 def flip_bit(original, byte_offset):
164     return (original[:byte_offset] +
165             chr(ord(original[byte_offset]) ^ 0x01) +
166             original[byte_offset+1:])
167
168 def add_two(original, byte_offset):
169     # It isn't enough to simply flip the bit for the version number,
170     # because 1 is a valid version number. So we add two instead.
171     return (original[:byte_offset] +
172             chr(ord(original[byte_offset]) ^ 0x02) +
173             original[byte_offset+1:])
174
175 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
176     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
177     # list of shnums to corrupt.
178     ds = []
179     for peerid in s._peers:
180         shares = s._peers[peerid]
181         for shnum in shares:
182             if (shnums_to_corrupt is not None
183                 and shnum not in shnums_to_corrupt):
184                 continue
185             data = shares[shnum]
186             # We're feeding the reader all of the share data, so it
187             # won't need to use the rref that we didn't provide, nor the
188             # storage index that we didn't provide. We do this because
189             # the reader will work for both MDMF and SDMF.
190             reader = MDMFSlotReadProxy(None, None, shnum, data)
191             # We need to get the offsets for the next part.
192             d = reader.get_verinfo()
193             def _do_corruption(verinfo, data, shnum):
194                 (seqnum,
195                  root_hash,
196                  IV,
197                  segsize,
198                  datalen,
199                  k, n, prefix, o) = verinfo
200                 if isinstance(offset, tuple):
201                     offset1, offset2 = offset
202                 else:
203                     offset1 = offset
204                     offset2 = 0
205                 if offset1 == "pubkey" and IV:
206                     real_offset = 107
207                 elif offset1 in o:
208                     real_offset = o[offset1]
209                 else:
210                     real_offset = offset1
211                 real_offset = int(real_offset) + offset2 + offset_offset
212                 assert isinstance(real_offset, int), offset
213                 if offset1 == 0: # verbyte
214                     f = add_two
215                 else:
216                     f = flip_bit
217                 shares[shnum] = f(data, real_offset)
218             d.addCallback(_do_corruption, data, shnum)
219             ds.append(d)
220     dl = defer.DeferredList(ds)
221     dl.addCallback(lambda ignored: res)
222     return dl
223
224 def make_storagebroker(s=None, num_peers=10):
225     if not s:
226         s = FakeStorage()
227     peerids = [tagged_hash("peerid", "%d" % i)[:20]
228                for i in range(num_peers)]
229     storage_broker = StorageFarmBroker(None, True)
230     for peerid in peerids:
231         fss = FakeStorageServer(peerid, s)
232         storage_broker.test_add_rref(peerid, fss)
233     return storage_broker
234
235 def make_nodemaker(s=None, num_peers=10):
236     storage_broker = make_storagebroker(s, num_peers)
237     sh = client.SecretHolder("lease secret", "convergence secret")
238     keygen = client.KeyGenerator()
239     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
240     nodemaker = NodeMaker(storage_broker, sh, None,
241                           None, None,
242                           {"k": 3, "n": 10}, keygen)
243     return nodemaker
244
245 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
246     # this used to be in Publish, but we removed the limit. Some of
247     # these tests test whether the new code correctly allows files
248     # larger than the limit.
249     OLD_MAX_SEGMENT_SIZE = 3500000
250     def setUp(self):
251         self._storage = s = FakeStorage()
252         self.nodemaker = make_nodemaker(s)
253
254     def test_create(self):
255         d = self.nodemaker.create_mutable_file()
256         def _created(n):
257             self.failUnless(isinstance(n, MutableFileNode))
258             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
259             sb = self.nodemaker.storage_broker
260             peer0 = sorted(sb.get_all_serverids())[0]
261             shnums = self._storage._peers[peer0].keys()
262             self.failUnlessEqual(len(shnums), 1)
263         d.addCallback(_created)
264         return d
265
266
267     def test_create_mdmf(self):
268         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
269         def _created(n):
270             self.failUnless(isinstance(n, MutableFileNode))
271             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
272             sb = self.nodemaker.storage_broker
273             peer0 = sorted(sb.get_all_serverids())[0]
274             shnums = self._storage._peers[peer0].keys()
275             self.failUnlessEqual(len(shnums), 1)
276         d.addCallback(_created)
277         return d
278
279     def test_single_share(self):
280         # Make sure that we tolerate publishing a single share.
281         self.nodemaker.default_encoding_parameters['k'] = 1
282         self.nodemaker.default_encoding_parameters['happy'] = 1
283         self.nodemaker.default_encoding_parameters['n'] = 1
284         d = defer.succeed(None)
285         for v in (SDMF_VERSION, MDMF_VERSION):
286             d.addCallback(lambda ignored:
287                 self.nodemaker.create_mutable_file(version=v))
288             def _created(n):
289                 self.failUnless(isinstance(n, MutableFileNode))
290                 self._node = n
291                 return n
292             d.addCallback(_created)
293             d.addCallback(lambda n:
294                 n.overwrite(MutableData("Contents" * 50000)))
295             d.addCallback(lambda ignored:
296                 self._node.download_best_version())
297             d.addCallback(lambda contents:
298                 self.failUnlessEqual(contents, "Contents" * 50000))
299         return d
300
301     def test_max_shares(self):
302         self.nodemaker.default_encoding_parameters['n'] = 255
303         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
304         def _created(n):
305             self.failUnless(isinstance(n, MutableFileNode))
306             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
307             sb = self.nodemaker.storage_broker
308             num_shares = sum([len(self._storage._peers[x].keys()) for x \
309                               in sb.get_all_serverids()])
310             self.failUnlessEqual(num_shares, 255)
311             self._node = n
312             return n
313         d.addCallback(_created)
314         # Now we upload some contents
315         d.addCallback(lambda n:
316             n.overwrite(MutableData("contents" * 50000)))
317         # ...then download contents
318         d.addCallback(lambda ignored:
319             self._node.download_best_version())
320         # ...and check to make sure everything went okay.
321         d.addCallback(lambda contents:
322             self.failUnlessEqual("contents" * 50000, contents))
323         return d
324
325     def test_max_shares_mdmf(self):
326         # Test how files behave when there are 255 shares.
327         self.nodemaker.default_encoding_parameters['n'] = 255
328         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
329         def _created(n):
330             self.failUnless(isinstance(n, MutableFileNode))
331             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
332             sb = self.nodemaker.storage_broker
333             num_shares = sum([len(self._storage._peers[x].keys()) for x \
334                               in sb.get_all_serverids()])
335             self.failUnlessEqual(num_shares, 255)
336             self._node = n
337             return n
338         d.addCallback(_created)
339         d.addCallback(lambda n:
340             n.overwrite(MutableData("contents" * 50000)))
341         d.addCallback(lambda ignored:
342             self._node.download_best_version())
343         d.addCallback(lambda contents:
344             self.failUnlessEqual(contents, "contents" * 50000))
345         return d
346
347     def test_mdmf_filenode_cap(self):
348         # Test that an MDMF filenode, once created, returns an MDMF URI.
349         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
350         def _created(n):
351             self.failUnless(isinstance(n, MutableFileNode))
352             cap = n.get_cap()
353             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
354             rcap = n.get_readcap()
355             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
356             vcap = n.get_verify_cap()
357             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
358         d.addCallback(_created)
359         return d
360
361
362     def test_create_from_mdmf_writecap(self):
363         # Test that the nodemaker is capable of creating an MDMF
364         # filenode given an MDMF cap.
365         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
366         def _created(n):
367             self.failUnless(isinstance(n, MutableFileNode))
368             s = n.get_uri()
369             self.failUnless(s.startswith("URI:MDMF"))
370             n2 = self.nodemaker.create_from_cap(s)
371             self.failUnless(isinstance(n2, MutableFileNode))
372             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
373             self.failUnlessEqual(n.get_uri(), n2.get_uri())
374         d.addCallback(_created)
375         return d
376
377
378     def test_create_from_mdmf_readcap(self):
379         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
380         def _created(n):
381             self.failUnless(isinstance(n, MutableFileNode))
382             s = n.get_readonly_uri()
383             n2 = self.nodemaker.create_from_cap(s)
384             self.failUnless(isinstance(n2, MutableFileNode))
385
386             # Check that it's a readonly node
387             self.failUnless(n2.is_readonly())
388         d.addCallback(_created)
389         return d
390
391
392     def test_internal_version_from_cap(self):
393         # MutableFileNodes and MutableFileVersions have an internal
394         # switch that tells them whether they're dealing with an SDMF or
395         # MDMF mutable file when they start doing stuff. We want to make
396         # sure that this is set appropriately given an MDMF cap.
397         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
398         def _created(n):
399             self.uri = n.get_uri()
400             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
401
402             n2 = self.nodemaker.create_from_cap(self.uri)
403             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
404         d.addCallback(_created)
405         return d
406
407
408     def test_serialize(self):
409         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
410         calls = []
411         def _callback(*args, **kwargs):
412             self.failUnlessEqual(args, (4,) )
413             self.failUnlessEqual(kwargs, {"foo": 5})
414             calls.append(1)
415             return 6
416         d = n._do_serialized(_callback, 4, foo=5)
417         def _check_callback(res):
418             self.failUnlessEqual(res, 6)
419             self.failUnlessEqual(calls, [1])
420         d.addCallback(_check_callback)
421
422         def _errback():
423             raise ValueError("heya")
424         d.addCallback(lambda res:
425                       self.shouldFail(ValueError, "_check_errback", "heya",
426                                       n._do_serialized, _errback))
427         return d
428
429     def test_upload_and_download(self):
430         d = self.nodemaker.create_mutable_file()
431         def _created(n):
432             d = defer.succeed(None)
433             d.addCallback(lambda res: n.get_servermap(MODE_READ))
434             d.addCallback(lambda smap: smap.dump(StringIO()))
435             d.addCallback(lambda sio:
436                           self.failUnless("3-of-10" in sio.getvalue()))
437             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
438             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
439             d.addCallback(lambda res: n.download_best_version())
440             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
441             d.addCallback(lambda res: n.get_size_of_best_version())
442             d.addCallback(lambda size:
443                           self.failUnlessEqual(size, len("contents 1")))
444             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
445             d.addCallback(lambda res: n.download_best_version())
446             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
447             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
448             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
449             d.addCallback(lambda res: n.download_best_version())
450             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
451             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
452             d.addCallback(lambda smap:
453                           n.download_version(smap,
454                                              smap.best_recoverable_version()))
455             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
456             # test a file that is large enough to overcome the
457             # mapupdate-to-retrieve data caching (i.e. make the shares larger
458             # than the default readsize, which is 2000 bytes). A 15kB file
459             # will have 5kB shares.
460             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
461             d.addCallback(lambda res: n.download_best_version())
462             d.addCallback(lambda res:
463                           self.failUnlessEqual(res, "large size file" * 1000))
464             return d
465         d.addCallback(_created)
466         return d
467
468
469     def test_upload_and_download_mdmf(self):
470         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
471         def _created(n):
472             d = defer.succeed(None)
473             d.addCallback(lambda ignored:
474                 n.get_servermap(MODE_READ))
475             def _then(servermap):
476                 dumped = servermap.dump(StringIO())
477                 self.failUnlessIn("3-of-10", dumped.getvalue())
478             d.addCallback(_then)
479             # Now overwrite the contents with some new contents. We want 
480             # to make them big enough to force the file to be uploaded
481             # in more than one segment.
482             big_contents = "contents1" * 100000 # about 900 KiB
483             big_contents_uploadable = MutableData(big_contents)
484             d.addCallback(lambda ignored:
485                 n.overwrite(big_contents_uploadable))
486             d.addCallback(lambda ignored:
487                 n.download_best_version())
488             d.addCallback(lambda data:
489                 self.failUnlessEqual(data, big_contents))
490             # Overwrite the contents again with some new contents. As
491             # before, they need to be big enough to force multiple
492             # segments, so that we make the downloader deal with
493             # multiple segments.
494             bigger_contents = "contents2" * 1000000 # about 9MiB 
495             bigger_contents_uploadable = MutableData(bigger_contents)
496             d.addCallback(lambda ignored:
497                 n.overwrite(bigger_contents_uploadable))
498             d.addCallback(lambda ignored:
499                 n.download_best_version())
500             d.addCallback(lambda data:
501                 self.failUnlessEqual(data, bigger_contents))
502             return d
503         d.addCallback(_created)
504         return d
505
506
507     def test_retrieve_producer_mdmf(self):
508         # We should make sure that the retriever is able to pause and stop
509         # correctly.
510         data = "contents1" * 100000
511         d = self.nodemaker.create_mutable_file(MutableData(data),
512                                                version=MDMF_VERSION)
513         d.addCallback(lambda node: node.get_best_mutable_version())
514         d.addCallback(self._test_retrieve_producer, "MDMF", data)
515         return d
516
517     # note: SDMF has only one big segment, so we can't use the usual
518     # after-the-first-write() trick to pause or stop the download.
519     # Disabled until we find a better approach.
520     def OFF_test_retrieve_producer_sdmf(self):
521         data = "contents1" * 100000
522         d = self.nodemaker.create_mutable_file(MutableData(data),
523                                                version=SDMF_VERSION)
524         d.addCallback(lambda node: node.get_best_mutable_version())
525         d.addCallback(self._test_retrieve_producer, "SDMF", data)
526         return d
527
528     def _test_retrieve_producer(self, version, kind, data):
529         # Now we'll retrieve it into a pausing consumer.
530         c = PausingConsumer()
531         d = version.read(c)
532         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
533
534         c2 = PausingAndStoppingConsumer()
535         d.addCallback(lambda ign:
536                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
537                                       "our Consumer called stopProducing()",
538                                       version.read, c2))
539
540         c3 = StoppingConsumer()
541         d.addCallback(lambda ign:
542                       self.shouldFail(DownloadStopped, kind+"_stop",
543                                       "our Consumer called stopProducing()",
544                                       version.read, c3))
545
546         c4 = ImmediatelyStoppingConsumer()
547         d.addCallback(lambda ign:
548                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
549                                       "our Consumer called stopProducing()",
550                                       version.read, c4))
551
552         def _then(ign):
553             c5 = MemoryConsumer()
554             d1 = version.read(c5)
555             c5.producer.stopProducing()
556             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
557                                    "our Consumer called stopProducing()",
558                                    lambda: d1)
559         d.addCallback(_then)
560         return d
561
562     def test_download_from_mdmf_cap(self):
563         # We should be able to download an MDMF file given its cap
564         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
565         def _created(node):
566             self.uri = node.get_uri()
567             # also confirm that the cap has no extension fields
568             pieces = self.uri.split(":")
569             self.failUnlessEqual(len(pieces), 4)
570
571             return node.overwrite(MutableData("contents1" * 100000))
572         def _then(ignored):
573             node = self.nodemaker.create_from_cap(self.uri)
574             return node.download_best_version()
575         def _downloaded(data):
576             self.failUnlessEqual(data, "contents1" * 100000)
577         d.addCallback(_created)
578         d.addCallback(_then)
579         d.addCallback(_downloaded)
580         return d
581
582
583     def test_mdmf_write_count(self):
584         # Publishing an MDMF file should only cause one write for each
585         # share that is to be published. Otherwise, we introduce
586         # undesirable semantics that are a regression from SDMF
587         upload = MutableData("MDMF" * 100000) # about 400 KiB
588         d = self.nodemaker.create_mutable_file(upload,
589                                                version=MDMF_VERSION)
590         def _check_server_write_counts(ignored):
591             sb = self.nodemaker.storage_broker
592             for server in sb.servers.itervalues():
593                 self.failUnlessEqual(server.get_rref().queries, 1)
594         d.addCallback(_check_server_write_counts)
595         return d
596
597
598     def test_create_with_initial_contents(self):
599         upload1 = MutableData("contents 1")
600         d = self.nodemaker.create_mutable_file(upload1)
601         def _created(n):
602             d = n.download_best_version()
603             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
604             upload2 = MutableData("contents 2")
605             d.addCallback(lambda res: n.overwrite(upload2))
606             d.addCallback(lambda res: n.download_best_version())
607             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
608             return d
609         d.addCallback(_created)
610         return d
611
612
613     def test_create_mdmf_with_initial_contents(self):
614         initial_contents = "foobarbaz" * 131072 # 900KiB
615         initial_contents_uploadable = MutableData(initial_contents)
616         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
617                                                version=MDMF_VERSION)
618         def _created(n):
619             d = n.download_best_version()
620             d.addCallback(lambda data:
621                 self.failUnlessEqual(data, initial_contents))
622             uploadable2 = MutableData(initial_contents + "foobarbaz")
623             d.addCallback(lambda ignored:
624                 n.overwrite(uploadable2))
625             d.addCallback(lambda ignored:
626                 n.download_best_version())
627             d.addCallback(lambda data:
628                 self.failUnlessEqual(data, initial_contents +
629                                            "foobarbaz"))
630             return d
631         d.addCallback(_created)
632         return d
633
634
635     def test_response_cache_memory_leak(self):
636         d = self.nodemaker.create_mutable_file("contents")
637         def _created(n):
638             d = n.download_best_version()
639             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
640             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
641
642             def _check_cache(expected):
643                 # The total size of cache entries should not increase on the second download;
644                 # in fact the cache contents should be identical.
645                 d2 = n.download_best_version()
646                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
647                 return d2
648             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
649             return d
650         d.addCallback(_created)
651         return d
652
653     def test_create_with_initial_contents_function(self):
654         data = "initial contents"
655         def _make_contents(n):
656             self.failUnless(isinstance(n, MutableFileNode))
657             key = n.get_writekey()
658             self.failUnless(isinstance(key, str), key)
659             self.failUnlessEqual(len(key), 16) # AES key size
660             return MutableData(data)
661         d = self.nodemaker.create_mutable_file(_make_contents)
662         def _created(n):
663             return n.download_best_version()
664         d.addCallback(_created)
665         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
666         return d
667
668
669     def test_create_mdmf_with_initial_contents_function(self):
670         data = "initial contents" * 100000
671         def _make_contents(n):
672             self.failUnless(isinstance(n, MutableFileNode))
673             key = n.get_writekey()
674             self.failUnless(isinstance(key, str), key)
675             self.failUnlessEqual(len(key), 16)
676             return MutableData(data)
677         d = self.nodemaker.create_mutable_file(_make_contents,
678                                                version=MDMF_VERSION)
679         d.addCallback(lambda n:
680             n.download_best_version())
681         d.addCallback(lambda data2:
682             self.failUnlessEqual(data2, data))
683         return d
684
685
686     def test_create_with_too_large_contents(self):
687         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
688         BIG_uploadable = MutableData(BIG)
689         d = self.nodemaker.create_mutable_file(BIG_uploadable)
690         def _created(n):
691             other_BIG_uploadable = MutableData(BIG)
692             d = n.overwrite(other_BIG_uploadable)
693             return d
694         d.addCallback(_created)
695         return d
696
697     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
698         d = n.get_servermap(MODE_READ)
699         d.addCallback(lambda servermap: servermap.best_recoverable_version())
700         d.addCallback(lambda verinfo:
701                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
702         return d
703
704     def test_modify(self):
705         def _modifier(old_contents, servermap, first_time):
706             new_contents = old_contents + "line2"
707             return new_contents
708         def _non_modifier(old_contents, servermap, first_time):
709             return old_contents
710         def _none_modifier(old_contents, servermap, first_time):
711             return None
712         def _error_modifier(old_contents, servermap, first_time):
713             raise ValueError("oops")
714         def _toobig_modifier(old_contents, servermap, first_time):
715             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
716             return new_content
717         calls = []
718         def _ucw_error_modifier(old_contents, servermap, first_time):
719             # simulate an UncoordinatedWriteError once
720             calls.append(1)
721             if len(calls) <= 1:
722                 raise UncoordinatedWriteError("simulated")
723             new_contents = old_contents + "line3"
724             return new_contents
725         def _ucw_error_non_modifier(old_contents, servermap, first_time):
726             # simulate an UncoordinatedWriteError once, and don't actually
727             # modify the contents on subsequent invocations
728             calls.append(1)
729             if len(calls) <= 1:
730                 raise UncoordinatedWriteError("simulated")
731             return old_contents
732
733         initial_contents = "line1"
734         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
735         def _created(n):
736             d = n.modify(_modifier)
737             d.addCallback(lambda res: n.download_best_version())
738             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
739             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
740
741             d.addCallback(lambda res: n.modify(_non_modifier))
742             d.addCallback(lambda res: n.download_best_version())
743             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
744             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
745
746             d.addCallback(lambda res: n.modify(_none_modifier))
747             d.addCallback(lambda res: n.download_best_version())
748             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
749             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
750
751             d.addCallback(lambda res:
752                           self.shouldFail(ValueError, "error_modifier", None,
753                                           n.modify, _error_modifier))
754             d.addCallback(lambda res: n.download_best_version())
755             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
756             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
757
758
759             d.addCallback(lambda res: n.download_best_version())
760             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
761             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
762
763             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
764             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
765             d.addCallback(lambda res: n.download_best_version())
766             d.addCallback(lambda res: self.failUnlessEqual(res,
767                                                            "line1line2line3"))
768             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
769
770             def _reset_ucw_error_modifier(res):
771                 calls[:] = []
772                 return res
773             d.addCallback(_reset_ucw_error_modifier)
774
775             # in practice, this n.modify call should publish twice: the first
776             # one gets a UCWE, the second does not. But our test jig (in
777             # which the modifier raises the UCWE) skips over the first one,
778             # so in this test there will be only one publish, and the seqnum
779             # will only be one larger than the previous test, not two (i.e. 4
780             # instead of 5).
781             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
782             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
783             d.addCallback(lambda res: n.download_best_version())
784             d.addCallback(lambda res: self.failUnlessEqual(res,
785                                                            "line1line2line3"))
786             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
787             d.addCallback(lambda res: n.modify(_toobig_modifier))
788             return d
789         d.addCallback(_created)
790         return d
791
792
793     def test_modify_backoffer(self):
794         def _modifier(old_contents, servermap, first_time):
795             return old_contents + "line2"
796         calls = []
797         def _ucw_error_modifier(old_contents, servermap, first_time):
798             # simulate an UncoordinatedWriteError once
799             calls.append(1)
800             if len(calls) <= 1:
801                 raise UncoordinatedWriteError("simulated")
802             return old_contents + "line3"
803         def _always_ucw_error_modifier(old_contents, servermap, first_time):
804             raise UncoordinatedWriteError("simulated")
805         def _backoff_stopper(node, f):
806             return f
807         def _backoff_pauser(node, f):
808             d = defer.Deferred()
809             reactor.callLater(0.5, d.callback, None)
810             return d
811
812         # the give-up-er will hit its maximum retry count quickly
813         giveuper = BackoffAgent()
814         giveuper._delay = 0.1
815         giveuper.factor = 1
816
817         d = self.nodemaker.create_mutable_file(MutableData("line1"))
818         def _created(n):
819             d = n.modify(_modifier)
820             d.addCallback(lambda res: n.download_best_version())
821             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
822             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
823
824             d.addCallback(lambda res:
825                           self.shouldFail(UncoordinatedWriteError,
826                                           "_backoff_stopper", None,
827                                           n.modify, _ucw_error_modifier,
828                                           _backoff_stopper))
829             d.addCallback(lambda res: n.download_best_version())
830             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
831             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
832
833             def _reset_ucw_error_modifier(res):
834                 calls[:] = []
835                 return res
836             d.addCallback(_reset_ucw_error_modifier)
837             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
838                                                _backoff_pauser))
839             d.addCallback(lambda res: n.download_best_version())
840             d.addCallback(lambda res: self.failUnlessEqual(res,
841                                                            "line1line2line3"))
842             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
843
844             d.addCallback(lambda res:
845                           self.shouldFail(UncoordinatedWriteError,
846                                           "giveuper", None,
847                                           n.modify, _always_ucw_error_modifier,
848                                           giveuper.delay))
849             d.addCallback(lambda res: n.download_best_version())
850             d.addCallback(lambda res: self.failUnlessEqual(res,
851                                                            "line1line2line3"))
852             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
853
854             return d
855         d.addCallback(_created)
856         return d
857
858     def test_upload_and_download_full_size_keys(self):
859         self.nodemaker.key_generator = client.KeyGenerator()
860         d = self.nodemaker.create_mutable_file()
861         def _created(n):
862             d = defer.succeed(None)
863             d.addCallback(lambda res: n.get_servermap(MODE_READ))
864             d.addCallback(lambda smap: smap.dump(StringIO()))
865             d.addCallback(lambda sio:
866                           self.failUnless("3-of-10" in sio.getvalue()))
867             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
868             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
869             d.addCallback(lambda res: n.download_best_version())
870             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
871             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
872             d.addCallback(lambda res: n.download_best_version())
873             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
874             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
875             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
876             d.addCallback(lambda res: n.download_best_version())
877             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
878             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
879             d.addCallback(lambda smap:
880                           n.download_version(smap,
881                                              smap.best_recoverable_version()))
882             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
883             return d
884         d.addCallback(_created)
885         return d
886
887
888     def test_size_after_servermap_update(self):
889         # a mutable file node should have something to say about how big
890         # it is after a servermap update is performed, since this tells
891         # us how large the best version of that mutable file is.
892         d = self.nodemaker.create_mutable_file()
893         def _created(n):
894             self.n = n
895             return n.get_servermap(MODE_READ)
896         d.addCallback(_created)
897         d.addCallback(lambda ignored:
898             self.failUnlessEqual(self.n.get_size(), 0))
899         d.addCallback(lambda ignored:
900             self.n.overwrite(MutableData("foobarbaz")))
901         d.addCallback(lambda ignored:
902             self.failUnlessEqual(self.n.get_size(), 9))
903         d.addCallback(lambda ignored:
904             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
905         d.addCallback(_created)
906         d.addCallback(lambda ignored:
907             self.failUnlessEqual(self.n.get_size(), 9))
908         return d
909
910
911 class PublishMixin:
912     def publish_one(self):
913         # publish a file and create shares, which can then be manipulated
914         # later.
915         self.CONTENTS = "New contents go here" * 1000
916         self.uploadable = MutableData(self.CONTENTS)
917         self._storage = FakeStorage()
918         self._nodemaker = make_nodemaker(self._storage)
919         self._storage_broker = self._nodemaker.storage_broker
920         d = self._nodemaker.create_mutable_file(self.uploadable)
921         def _created(node):
922             self._fn = node
923             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
924         d.addCallback(_created)
925         return d
926
927     def publish_mdmf(self):
928         # like publish_one, except that the result is guaranteed to be
929         # an MDMF file.
930         # self.CONTENTS should have more than one segment.
931         self.CONTENTS = "This is an MDMF file" * 100000
932         self.uploadable = MutableData(self.CONTENTS)
933         self._storage = FakeStorage()
934         self._nodemaker = make_nodemaker(self._storage)
935         self._storage_broker = self._nodemaker.storage_broker
936         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
937         def _created(node):
938             self._fn = node
939             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
940         d.addCallback(_created)
941         return d
942
943
944     def publish_sdmf(self):
945         # like publish_one, except that the result is guaranteed to be
946         # an SDMF file
947         self.CONTENTS = "This is an SDMF file" * 1000
948         self.uploadable = MutableData(self.CONTENTS)
949         self._storage = FakeStorage()
950         self._nodemaker = make_nodemaker(self._storage)
951         self._storage_broker = self._nodemaker.storage_broker
952         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
953         def _created(node):
954             self._fn = node
955             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
956         d.addCallback(_created)
957         return d
958
959
960     def publish_multiple(self, version=0):
961         self.CONTENTS = ["Contents 0",
962                          "Contents 1",
963                          "Contents 2",
964                          "Contents 3a",
965                          "Contents 3b"]
966         self.uploadables = [MutableData(d) for d in self.CONTENTS]
967         self._copied_shares = {}
968         self._storage = FakeStorage()
969         self._nodemaker = make_nodemaker(self._storage)
970         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
971         def _created(node):
972             self._fn = node
973             # now create multiple versions of the same file, and accumulate
974             # their shares, so we can mix and match them later.
975             d = defer.succeed(None)
976             d.addCallback(self._copy_shares, 0)
977             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
978             d.addCallback(self._copy_shares, 1)
979             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
980             d.addCallback(self._copy_shares, 2)
981             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
982             d.addCallback(self._copy_shares, 3)
983             # now we replace all the shares with version s3, and upload a new
984             # version to get s4b.
985             rollback = dict([(i,2) for i in range(10)])
986             d.addCallback(lambda res: self._set_versions(rollback))
987             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
988             d.addCallback(self._copy_shares, 4)
989             # we leave the storage in state 4
990             return d
991         d.addCallback(_created)
992         return d
993
994
995     def _copy_shares(self, ignored, index):
996         shares = self._storage._peers
997         # we need a deep copy
998         new_shares = {}
999         for peerid in shares:
1000             new_shares[peerid] = {}
1001             for shnum in shares[peerid]:
1002                 new_shares[peerid][shnum] = shares[peerid][shnum]
1003         self._copied_shares[index] = new_shares
1004
1005     def _set_versions(self, versionmap):
1006         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1007         # share to be at. Any shnum which is left out of the map will stay at
1008         # its current version.
1009         shares = self._storage._peers
1010         oldshares = self._copied_shares
1011         for peerid in shares:
1012             for shnum in shares[peerid]:
1013                 if shnum in versionmap:
1014                     index = versionmap[shnum]
1015                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1016
1017 class Servermap(unittest.TestCase, PublishMixin):
1018     def setUp(self):
1019         return self.publish_one()
1020
1021     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1022                        update_range=None):
1023         if fn is None:
1024             fn = self._fn
1025         if sb is None:
1026             sb = self._storage_broker
1027         smu = ServermapUpdater(fn, sb, Monitor(),
1028                                ServerMap(), mode, update_range=update_range)
1029         d = smu.update()
1030         return d
1031
1032     def update_servermap(self, oldmap, mode=MODE_CHECK):
1033         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1034                                oldmap, mode)
1035         d = smu.update()
1036         return d
1037
1038     def failUnlessOneRecoverable(self, sm, num_shares):
1039         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1040         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1041         best = sm.best_recoverable_version()
1042         self.failIfEqual(best, None)
1043         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1044         self.failUnlessEqual(len(sm.shares_available()), 1)
1045         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1046         shnum, peerids = sm.make_sharemap().items()[0]
1047         peerid = list(peerids)[0]
1048         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1049         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1050         return sm
1051
1052     def test_basic(self):
1053         d = defer.succeed(None)
1054         ms = self.make_servermap
1055         us = self.update_servermap
1056
1057         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1058         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1059         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1060         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1061         d.addCallback(lambda res: ms(mode=MODE_READ))
1062         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1063         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1064         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1065         # this mode stops at 'k' shares
1066         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1067
1068         # and can we re-use the same servermap? Note that these are sorted in
1069         # increasing order of number of servers queried, since once a server
1070         # gets into the servermap, we'll always ask it for an update.
1071         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1072         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1073         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1074         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1075         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1076         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1077         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1078         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1079         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1080
1081         return d
1082
1083     def test_fetch_privkey(self):
1084         d = defer.succeed(None)
1085         # use the sibling filenode (which hasn't been used yet), and make
1086         # sure it can fetch the privkey. The file is small, so the privkey
1087         # will be fetched on the first (query) pass.
1088         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1089         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1090
1091         # create a new file, which is large enough to knock the privkey out
1092         # of the early part of the file
1093         LARGE = "These are Larger contents" * 200 # about 5KB
1094         LARGE_uploadable = MutableData(LARGE)
1095         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1096         def _created(large_fn):
1097             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1098             return self.make_servermap(MODE_WRITE, large_fn2)
1099         d.addCallback(_created)
1100         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1101         return d
1102
1103
1104     def test_mark_bad(self):
1105         d = defer.succeed(None)
1106         ms = self.make_servermap
1107
1108         d.addCallback(lambda res: ms(mode=MODE_READ))
1109         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1110         def _made_map(sm):
1111             v = sm.best_recoverable_version()
1112             vm = sm.make_versionmap()
1113             shares = list(vm[v])
1114             self.failUnlessEqual(len(shares), 6)
1115             self._corrupted = set()
1116             # mark the first 5 shares as corrupt, then update the servermap.
1117             # The map should not have the marked shares it in any more, and
1118             # new shares should be found to replace the missing ones.
1119             for (shnum, peerid, timestamp) in shares:
1120                 if shnum < 5:
1121                     self._corrupted.add( (peerid, shnum) )
1122                     sm.mark_bad_share(peerid, shnum, "")
1123             return self.update_servermap(sm, MODE_WRITE)
1124         d.addCallback(_made_map)
1125         def _check_map(sm):
1126             # this should find all 5 shares that weren't marked bad
1127             v = sm.best_recoverable_version()
1128             vm = sm.make_versionmap()
1129             shares = list(vm[v])
1130             for (peerid, shnum) in self._corrupted:
1131                 peer_shares = sm.shares_on_peer(peerid)
1132                 self.failIf(shnum in peer_shares,
1133                             "%d was in %s" % (shnum, peer_shares))
1134             self.failUnlessEqual(len(shares), 5)
1135         d.addCallback(_check_map)
1136         return d
1137
1138     def failUnlessNoneRecoverable(self, sm):
1139         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1140         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1141         best = sm.best_recoverable_version()
1142         self.failUnlessEqual(best, None)
1143         self.failUnlessEqual(len(sm.shares_available()), 0)
1144
1145     def test_no_shares(self):
1146         self._storage._peers = {} # delete all shares
1147         ms = self.make_servermap
1148         d = defer.succeed(None)
1149 #
1150         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1151         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1152
1153         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1154         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1155
1156         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1157         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1158
1159         d.addCallback(lambda res: ms(mode=MODE_READ))
1160         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1161
1162         return d
1163
1164     def failUnlessNotQuiteEnough(self, sm):
1165         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1166         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1167         best = sm.best_recoverable_version()
1168         self.failUnlessEqual(best, None)
1169         self.failUnlessEqual(len(sm.shares_available()), 1)
1170         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1171         return sm
1172
1173     def test_not_quite_enough_shares(self):
1174         s = self._storage
1175         ms = self.make_servermap
1176         num_shares = len(s._peers)
1177         for peerid in s._peers:
1178             s._peers[peerid] = {}
1179             num_shares -= 1
1180             if num_shares == 2:
1181                 break
1182         # now there ought to be only two shares left
1183         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1184
1185         d = defer.succeed(None)
1186
1187         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1188         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1189         d.addCallback(lambda sm:
1190                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1191         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1192         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1193         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1194         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1195         d.addCallback(lambda res: ms(mode=MODE_READ))
1196         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1197
1198         return d
1199
1200
1201     def test_servermapupdater_finds_mdmf_files(self):
1202         # setUp already published an MDMF file for us. We just need to
1203         # make sure that when we run the ServermapUpdater, the file is
1204         # reported to have one recoverable version.
1205         d = defer.succeed(None)
1206         d.addCallback(lambda ignored:
1207             self.publish_mdmf())
1208         d.addCallback(lambda ignored:
1209             self.make_servermap(mode=MODE_CHECK))
1210         # Calling make_servermap also updates the servermap in the mode
1211         # that we specify, so we just need to see what it says.
1212         def _check_servermap(sm):
1213             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1214         d.addCallback(_check_servermap)
1215         return d
1216
1217
1218     def test_fetch_update(self):
1219         d = defer.succeed(None)
1220         d.addCallback(lambda ignored:
1221             self.publish_mdmf())
1222         d.addCallback(lambda ignored:
1223             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1224         def _check_servermap(sm):
1225             # 10 shares
1226             self.failUnlessEqual(len(sm.update_data), 10)
1227             # one version
1228             for data in sm.update_data.itervalues():
1229                 self.failUnlessEqual(len(data), 1)
1230         d.addCallback(_check_servermap)
1231         return d
1232
1233
1234     def test_servermapupdater_finds_sdmf_files(self):
1235         d = defer.succeed(None)
1236         d.addCallback(lambda ignored:
1237             self.publish_sdmf())
1238         d.addCallback(lambda ignored:
1239             self.make_servermap(mode=MODE_CHECK))
1240         d.addCallback(lambda servermap:
1241             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1242         return d
1243
1244
1245 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1246     def setUp(self):
1247         return self.publish_one()
1248
1249     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1250         if oldmap is None:
1251             oldmap = ServerMap()
1252         if sb is None:
1253             sb = self._storage_broker
1254         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1255         d = smu.update()
1256         return d
1257
1258     def abbrev_verinfo(self, verinfo):
1259         if verinfo is None:
1260             return None
1261         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1262          offsets_tuple) = verinfo
1263         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1264
1265     def abbrev_verinfo_dict(self, verinfo_d):
1266         output = {}
1267         for verinfo,value in verinfo_d.items():
1268             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1269              offsets_tuple) = verinfo
1270             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1271         return output
1272
1273     def dump_servermap(self, servermap):
1274         print "SERVERMAP", servermap
1275         print "RECOVERABLE", [self.abbrev_verinfo(v)
1276                               for v in servermap.recoverable_versions()]
1277         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1278         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1279
1280     def do_download(self, servermap, version=None):
1281         if version is None:
1282             version = servermap.best_recoverable_version()
1283         r = Retrieve(self._fn, servermap, version)
1284         c = consumer.MemoryConsumer()
1285         d = r.download(consumer=c)
1286         d.addCallback(lambda mc: "".join(mc.chunks))
1287         return d
1288
1289
1290     def test_basic(self):
1291         d = self.make_servermap()
1292         def _do_retrieve(servermap):
1293             self._smap = servermap
1294             #self.dump_servermap(servermap)
1295             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1296             return self.do_download(servermap)
1297         d.addCallback(_do_retrieve)
1298         def _retrieved(new_contents):
1299             self.failUnlessEqual(new_contents, self.CONTENTS)
1300         d.addCallback(_retrieved)
1301         # we should be able to re-use the same servermap, both with and
1302         # without updating it.
1303         d.addCallback(lambda res: self.do_download(self._smap))
1304         d.addCallback(_retrieved)
1305         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1306         d.addCallback(lambda res: self.do_download(self._smap))
1307         d.addCallback(_retrieved)
1308         # clobbering the pubkey should make the servermap updater re-fetch it
1309         def _clobber_pubkey(res):
1310             self._fn._pubkey = None
1311         d.addCallback(_clobber_pubkey)
1312         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1313         d.addCallback(lambda res: self.do_download(self._smap))
1314         d.addCallback(_retrieved)
1315         return d
1316
1317     def test_all_shares_vanished(self):
1318         d = self.make_servermap()
1319         def _remove_shares(servermap):
1320             for shares in self._storage._peers.values():
1321                 shares.clear()
1322             d1 = self.shouldFail(NotEnoughSharesError,
1323                                  "test_all_shares_vanished",
1324                                  "ran out of peers",
1325                                  self.do_download, servermap)
1326             return d1
1327         d.addCallback(_remove_shares)
1328         return d
1329
1330     def test_no_servers(self):
1331         sb2 = make_storagebroker(num_peers=0)
1332         # if there are no servers, then a MODE_READ servermap should come
1333         # back empty
1334         d = self.make_servermap(sb=sb2)
1335         def _check_servermap(servermap):
1336             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1337             self.failIf(servermap.recoverable_versions())
1338             self.failIf(servermap.unrecoverable_versions())
1339             self.failIf(servermap.all_peers())
1340         d.addCallback(_check_servermap)
1341         return d
1342
1343     def test_no_servers_download(self):
1344         sb2 = make_storagebroker(num_peers=0)
1345         self._fn._storage_broker = sb2
1346         d = self.shouldFail(UnrecoverableFileError,
1347                             "test_no_servers_download",
1348                             "no recoverable versions",
1349                             self._fn.download_best_version)
1350         def _restore(res):
1351             # a failed download that occurs while we aren't connected to
1352             # anybody should not prevent a subsequent download from working.
1353             # This isn't quite the webapi-driven test that #463 wants, but it
1354             # should be close enough.
1355             self._fn._storage_broker = self._storage_broker
1356             return self._fn.download_best_version()
1357         def _retrieved(new_contents):
1358             self.failUnlessEqual(new_contents, self.CONTENTS)
1359         d.addCallback(_restore)
1360         d.addCallback(_retrieved)
1361         return d
1362
1363
1364     def _test_corrupt_all(self, offset, substring,
1365                           should_succeed=False,
1366                           corrupt_early=True,
1367                           failure_checker=None,
1368                           fetch_privkey=False):
1369         d = defer.succeed(None)
1370         if corrupt_early:
1371             d.addCallback(corrupt, self._storage, offset)
1372         d.addCallback(lambda res: self.make_servermap())
1373         if not corrupt_early:
1374             d.addCallback(corrupt, self._storage, offset)
1375         def _do_retrieve(servermap):
1376             ver = servermap.best_recoverable_version()
1377             if ver is None and not should_succeed:
1378                 # no recoverable versions == not succeeding. The problem
1379                 # should be noted in the servermap's list of problems.
1380                 if substring:
1381                     allproblems = [str(f) for f in servermap.problems]
1382                     self.failUnlessIn(substring, "".join(allproblems))
1383                 return servermap
1384             if should_succeed:
1385                 d1 = self._fn.download_version(servermap, ver,
1386                                                fetch_privkey)
1387                 d1.addCallback(lambda new_contents:
1388                                self.failUnlessEqual(new_contents, self.CONTENTS))
1389             else:
1390                 d1 = self.shouldFail(NotEnoughSharesError,
1391                                      "_corrupt_all(offset=%s)" % (offset,),
1392                                      substring,
1393                                      self._fn.download_version, servermap,
1394                                                                 ver,
1395                                                                 fetch_privkey)
1396             if failure_checker:
1397                 d1.addCallback(failure_checker)
1398             d1.addCallback(lambda res: servermap)
1399             return d1
1400         d.addCallback(_do_retrieve)
1401         return d
1402
1403     def test_corrupt_all_verbyte(self):
1404         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1405         # error in unpack_share().
1406         d = self._test_corrupt_all(0, "UnknownVersionError")
1407         def _check_servermap(servermap):
1408             # and the dump should mention the problems
1409             s = StringIO()
1410             dump = servermap.dump(s).getvalue()
1411             self.failUnless("30 PROBLEMS" in dump, dump)
1412         d.addCallback(_check_servermap)
1413         return d
1414
1415     def test_corrupt_all_seqnum(self):
1416         # a corrupt sequence number will trigger a bad signature
1417         return self._test_corrupt_all(1, "signature is invalid")
1418
1419     def test_corrupt_all_R(self):
1420         # a corrupt root hash will trigger a bad signature
1421         return self._test_corrupt_all(9, "signature is invalid")
1422
1423     def test_corrupt_all_IV(self):
1424         # a corrupt salt/IV will trigger a bad signature
1425         return self._test_corrupt_all(41, "signature is invalid")
1426
1427     def test_corrupt_all_k(self):
1428         # a corrupt 'k' will trigger a bad signature
1429         return self._test_corrupt_all(57, "signature is invalid")
1430
1431     def test_corrupt_all_N(self):
1432         # a corrupt 'N' will trigger a bad signature
1433         return self._test_corrupt_all(58, "signature is invalid")
1434
1435     def test_corrupt_all_segsize(self):
1436         # a corrupt segsize will trigger a bad signature
1437         return self._test_corrupt_all(59, "signature is invalid")
1438
1439     def test_corrupt_all_datalen(self):
1440         # a corrupt data length will trigger a bad signature
1441         return self._test_corrupt_all(67, "signature is invalid")
1442
1443     def test_corrupt_all_pubkey(self):
1444         # a corrupt pubkey won't match the URI's fingerprint. We need to
1445         # remove the pubkey from the filenode, or else it won't bother trying
1446         # to update it.
1447         self._fn._pubkey = None
1448         return self._test_corrupt_all("pubkey",
1449                                       "pubkey doesn't match fingerprint")
1450
1451     def test_corrupt_all_sig(self):
1452         # a corrupt signature is a bad one
1453         # the signature runs from about [543:799], depending upon the length
1454         # of the pubkey
1455         return self._test_corrupt_all("signature", "signature is invalid")
1456
1457     def test_corrupt_all_share_hash_chain_number(self):
1458         # a corrupt share hash chain entry will show up as a bad hash. If we
1459         # mangle the first byte, that will look like a bad hash number,
1460         # causing an IndexError
1461         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1462
1463     def test_corrupt_all_share_hash_chain_hash(self):
1464         # a corrupt share hash chain entry will show up as a bad hash. If we
1465         # mangle a few bytes in, that will look like a bad hash.
1466         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1467
1468     def test_corrupt_all_block_hash_tree(self):
1469         return self._test_corrupt_all("block_hash_tree",
1470                                       "block hash tree failure")
1471
1472     def test_corrupt_all_block(self):
1473         return self._test_corrupt_all("share_data", "block hash tree failure")
1474
1475     def test_corrupt_all_encprivkey(self):
1476         # a corrupted privkey won't even be noticed by the reader, only by a
1477         # writer.
1478         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1479
1480
1481     def test_corrupt_all_encprivkey_late(self):
1482         # this should work for the same reason as above, but we corrupt 
1483         # after the servermap update to exercise the error handling
1484         # code.
1485         # We need to remove the privkey from the node, or the retrieve
1486         # process won't know to update it.
1487         self._fn._privkey = None
1488         return self._test_corrupt_all("enc_privkey",
1489                                       None, # this shouldn't fail
1490                                       should_succeed=True,
1491                                       corrupt_early=False,
1492                                       fetch_privkey=True)
1493
1494
1495     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1496     # just use a .todo because the failing-but-ignored test emits about 30kB
1497     # of noise.
1498     def OFF_test_corrupt_all_seqnum_late(self):
1499         # corrupting the seqnum between mapupdate and retrieve should result
1500         # in NotEnoughSharesError, since each share will look invalid
1501         def _check(res):
1502             f = res[0]
1503             self.failUnless(f.check(NotEnoughSharesError))
1504             self.failUnless("uncoordinated write" in str(f))
1505         return self._test_corrupt_all(1, "ran out of peers",
1506                                       corrupt_early=False,
1507                                       failure_checker=_check)
1508
1509     def test_corrupt_all_block_hash_tree_late(self):
1510         def _check(res):
1511             f = res[0]
1512             self.failUnless(f.check(NotEnoughSharesError))
1513         return self._test_corrupt_all("block_hash_tree",
1514                                       "block hash tree failure",
1515                                       corrupt_early=False,
1516                                       failure_checker=_check)
1517
1518
1519     def test_corrupt_all_block_late(self):
1520         def _check(res):
1521             f = res[0]
1522             self.failUnless(f.check(NotEnoughSharesError))
1523         return self._test_corrupt_all("share_data", "block hash tree failure",
1524                                       corrupt_early=False,
1525                                       failure_checker=_check)
1526
1527
1528     def test_basic_pubkey_at_end(self):
1529         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1530         # download to succeed but forcing a bunch of retries first. Note that
1531         # this is rather pessimistic: our Retrieve process will throw away
1532         # the whole share if the pubkey is bad, even though the rest of the
1533         # share might be good.
1534
1535         self._fn._pubkey = None
1536         k = self._fn.get_required_shares()
1537         N = self._fn.get_total_shares()
1538         d = defer.succeed(None)
1539         d.addCallback(corrupt, self._storage, "pubkey",
1540                       shnums_to_corrupt=range(0, N-k))
1541         d.addCallback(lambda res: self.make_servermap())
1542         def _do_retrieve(servermap):
1543             self.failUnless(servermap.problems)
1544             self.failUnless("pubkey doesn't match fingerprint"
1545                             in str(servermap.problems[0]))
1546             ver = servermap.best_recoverable_version()
1547             r = Retrieve(self._fn, servermap, ver)
1548             c = consumer.MemoryConsumer()
1549             return r.download(c)
1550         d.addCallback(_do_retrieve)
1551         d.addCallback(lambda mc: "".join(mc.chunks))
1552         d.addCallback(lambda new_contents:
1553                       self.failUnlessEqual(new_contents, self.CONTENTS))
1554         return d
1555
1556
1557     def _test_corrupt_some(self, offset, mdmf=False):
1558         if mdmf:
1559             d = self.publish_mdmf()
1560         else:
1561             d = defer.succeed(None)
1562         d.addCallback(lambda ignored:
1563             corrupt(None, self._storage, offset, range(5)))
1564         d.addCallback(lambda ignored:
1565             self.make_servermap())
1566         def _do_retrieve(servermap):
1567             ver = servermap.best_recoverable_version()
1568             self.failUnless(ver)
1569             return self._fn.download_best_version()
1570         d.addCallback(_do_retrieve)
1571         d.addCallback(lambda new_contents:
1572             self.failUnlessEqual(new_contents, self.CONTENTS))
1573         return d
1574
1575
1576     def test_corrupt_some(self):
1577         # corrupt the data of first five shares (so the servermap thinks
1578         # they're good but retrieve marks them as bad), so that the
1579         # MODE_READ set of 6 will be insufficient, forcing node.download to
1580         # retry with more servers.
1581         return self._test_corrupt_some("share_data")
1582
1583
1584     def test_download_fails(self):
1585         d = corrupt(None, self._storage, "signature")
1586         d.addCallback(lambda ignored:
1587             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1588                             "no recoverable versions",
1589                             self._fn.download_best_version))
1590         return d
1591
1592
1593
1594     def test_corrupt_mdmf_block_hash_tree(self):
1595         d = self.publish_mdmf()
1596         d.addCallback(lambda ignored:
1597             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1598                                    "block hash tree failure",
1599                                    corrupt_early=False,
1600                                    should_succeed=False))
1601         return d
1602
1603
1604     def test_corrupt_mdmf_block_hash_tree_late(self):
1605         d = self.publish_mdmf()
1606         d.addCallback(lambda ignored:
1607             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1608                                    "block hash tree failure",
1609                                    corrupt_early=True,
1610                                    should_succeed=False))
1611         return d
1612
1613
1614     def test_corrupt_mdmf_share_data(self):
1615         d = self.publish_mdmf()
1616         d.addCallback(lambda ignored:
1617             # TODO: Find out what the block size is and corrupt a
1618             # specific block, rather than just guessing.
1619             self._test_corrupt_all(("share_data", 12 * 40),
1620                                     "block hash tree failure",
1621                                     corrupt_early=True,
1622                                     should_succeed=False))
1623         return d
1624
1625
1626     def test_corrupt_some_mdmf(self):
1627         return self._test_corrupt_some(("share_data", 12 * 40),
1628                                        mdmf=True)
1629
1630
1631 class CheckerMixin:
1632     def check_good(self, r, where):
1633         self.failUnless(r.is_healthy(), where)
1634         return r
1635
1636     def check_bad(self, r, where):
1637         self.failIf(r.is_healthy(), where)
1638         return r
1639
1640     def check_expected_failure(self, r, expected_exception, substring, where):
1641         for (peerid, storage_index, shnum, f) in r.problems:
1642             if f.check(expected_exception):
1643                 self.failUnless(substring in str(f),
1644                                 "%s: substring '%s' not in '%s'" %
1645                                 (where, substring, str(f)))
1646                 return
1647         self.fail("%s: didn't see expected exception %s in problems %s" %
1648                   (where, expected_exception, r.problems))
1649
1650
1651 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1652     def setUp(self):
1653         return self.publish_one()
1654
1655
1656     def test_check_good(self):
1657         d = self._fn.check(Monitor())
1658         d.addCallback(self.check_good, "test_check_good")
1659         return d
1660
1661     def test_check_mdmf_good(self):
1662         d = self.publish_mdmf()
1663         d.addCallback(lambda ignored:
1664             self._fn.check(Monitor()))
1665         d.addCallback(self.check_good, "test_check_mdmf_good")
1666         return d
1667
1668     def test_check_no_shares(self):
1669         for shares in self._storage._peers.values():
1670             shares.clear()
1671         d = self._fn.check(Monitor())
1672         d.addCallback(self.check_bad, "test_check_no_shares")
1673         return d
1674
1675     def test_check_mdmf_no_shares(self):
1676         d = self.publish_mdmf()
1677         def _then(ignored):
1678             for share in self._storage._peers.values():
1679                 share.clear()
1680         d.addCallback(_then)
1681         d.addCallback(lambda ignored:
1682             self._fn.check(Monitor()))
1683         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1684         return d
1685
1686     def test_check_not_enough_shares(self):
1687         for shares in self._storage._peers.values():
1688             for shnum in shares.keys():
1689                 if shnum > 0:
1690                     del shares[shnum]
1691         d = self._fn.check(Monitor())
1692         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1693         return d
1694
1695     def test_check_mdmf_not_enough_shares(self):
1696         d = self.publish_mdmf()
1697         def _then(ignored):
1698             for shares in self._storage._peers.values():
1699                 for shnum in shares.keys():
1700                     if shnum > 0:
1701                         del shares[shnum]
1702         d.addCallback(_then)
1703         d.addCallback(lambda ignored:
1704             self._fn.check(Monitor()))
1705         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1706         return d
1707
1708
1709     def test_check_all_bad_sig(self):
1710         d = corrupt(None, self._storage, 1) # bad sig
1711         d.addCallback(lambda ignored:
1712             self._fn.check(Monitor()))
1713         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1714         return d
1715
1716     def test_check_mdmf_all_bad_sig(self):
1717         d = self.publish_mdmf()
1718         d.addCallback(lambda ignored:
1719             corrupt(None, self._storage, 1))
1720         d.addCallback(lambda ignored:
1721             self._fn.check(Monitor()))
1722         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1723         return d
1724
1725     def test_check_all_bad_blocks(self):
1726         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1727         # the Checker won't notice this.. it doesn't look at actual data
1728         d.addCallback(lambda ignored:
1729             self._fn.check(Monitor()))
1730         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1731         return d
1732
1733
1734     def test_check_mdmf_all_bad_blocks(self):
1735         d = self.publish_mdmf()
1736         d.addCallback(lambda ignored:
1737             corrupt(None, self._storage, "share_data"))
1738         d.addCallback(lambda ignored:
1739             self._fn.check(Monitor()))
1740         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1741         return d
1742
1743     def test_verify_good(self):
1744         d = self._fn.check(Monitor(), verify=True)
1745         d.addCallback(self.check_good, "test_verify_good")
1746         return d
1747
1748     def test_verify_all_bad_sig(self):
1749         d = corrupt(None, self._storage, 1) # bad sig
1750         d.addCallback(lambda ignored:
1751             self._fn.check(Monitor(), verify=True))
1752         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1753         return d
1754
1755     def test_verify_one_bad_sig(self):
1756         d = corrupt(None, self._storage, 1, [9]) # bad sig
1757         d.addCallback(lambda ignored:
1758             self._fn.check(Monitor(), verify=True))
1759         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1760         return d
1761
1762     def test_verify_one_bad_block(self):
1763         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1764         # the Verifier *will* notice this, since it examines every byte
1765         d.addCallback(lambda ignored:
1766             self._fn.check(Monitor(), verify=True))
1767         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1768         d.addCallback(self.check_expected_failure,
1769                       CorruptShareError, "block hash tree failure",
1770                       "test_verify_one_bad_block")
1771         return d
1772
1773     def test_verify_one_bad_sharehash(self):
1774         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1775         d.addCallback(lambda ignored:
1776             self._fn.check(Monitor(), verify=True))
1777         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1778         d.addCallback(self.check_expected_failure,
1779                       CorruptShareError, "corrupt hashes",
1780                       "test_verify_one_bad_sharehash")
1781         return d
1782
1783     def test_verify_one_bad_encprivkey(self):
1784         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1785         d.addCallback(lambda ignored:
1786             self._fn.check(Monitor(), verify=True))
1787         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1788         d.addCallback(self.check_expected_failure,
1789                       CorruptShareError, "invalid privkey",
1790                       "test_verify_one_bad_encprivkey")
1791         return d
1792
1793     def test_verify_one_bad_encprivkey_uncheckable(self):
1794         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1795         readonly_fn = self._fn.get_readonly()
1796         # a read-only node has no way to validate the privkey
1797         d.addCallback(lambda ignored:
1798             readonly_fn.check(Monitor(), verify=True))
1799         d.addCallback(self.check_good,
1800                       "test_verify_one_bad_encprivkey_uncheckable")
1801         return d
1802
1803
1804     def test_verify_mdmf_good(self):
1805         d = self.publish_mdmf()
1806         d.addCallback(lambda ignored:
1807             self._fn.check(Monitor(), verify=True))
1808         d.addCallback(self.check_good, "test_verify_mdmf_good")
1809         return d
1810
1811
1812     def test_verify_mdmf_one_bad_block(self):
1813         d = self.publish_mdmf()
1814         d.addCallback(lambda ignored:
1815             corrupt(None, self._storage, "share_data", [1]))
1816         d.addCallback(lambda ignored:
1817             self._fn.check(Monitor(), verify=True))
1818         # We should find one bad block here
1819         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1820         d.addCallback(self.check_expected_failure,
1821                       CorruptShareError, "block hash tree failure",
1822                       "test_verify_mdmf_one_bad_block")
1823         return d
1824
1825
1826     def test_verify_mdmf_bad_encprivkey(self):
1827         d = self.publish_mdmf()
1828         d.addCallback(lambda ignored:
1829             corrupt(None, self._storage, "enc_privkey", [0]))
1830         d.addCallback(lambda ignored:
1831             self._fn.check(Monitor(), verify=True))
1832         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1833         d.addCallback(self.check_expected_failure,
1834                       CorruptShareError, "privkey",
1835                       "test_verify_mdmf_bad_encprivkey")
1836         return d
1837
1838
1839     def test_verify_mdmf_bad_sig(self):
1840         d = self.publish_mdmf()
1841         d.addCallback(lambda ignored:
1842             corrupt(None, self._storage, 1, [1]))
1843         d.addCallback(lambda ignored:
1844             self._fn.check(Monitor(), verify=True))
1845         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1846         return d
1847
1848
1849     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1850         d = self.publish_mdmf()
1851         d.addCallback(lambda ignored:
1852             corrupt(None, self._storage, "enc_privkey", [1]))
1853         d.addCallback(lambda ignored:
1854             self._fn.get_readonly())
1855         d.addCallback(lambda fn:
1856             fn.check(Monitor(), verify=True))
1857         d.addCallback(self.check_good,
1858                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1859         return d
1860
1861
1862 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1863
1864     def get_shares(self, s):
1865         all_shares = {} # maps (peerid, shnum) to share data
1866         for peerid in s._peers:
1867             shares = s._peers[peerid]
1868             for shnum in shares:
1869                 data = shares[shnum]
1870                 all_shares[ (peerid, shnum) ] = data
1871         return all_shares
1872
1873     def copy_shares(self, ignored=None):
1874         self.old_shares.append(self.get_shares(self._storage))
1875
1876     def test_repair_nop(self):
1877         self.old_shares = []
1878         d = self.publish_one()
1879         d.addCallback(self.copy_shares)
1880         d.addCallback(lambda res: self._fn.check(Monitor()))
1881         d.addCallback(lambda check_results: self._fn.repair(check_results))
1882         def _check_results(rres):
1883             self.failUnless(IRepairResults.providedBy(rres))
1884             self.failUnless(rres.get_successful())
1885             # TODO: examine results
1886
1887             self.copy_shares()
1888
1889             initial_shares = self.old_shares[0]
1890             new_shares = self.old_shares[1]
1891             # TODO: this really shouldn't change anything. When we implement
1892             # a "minimal-bandwidth" repairer", change this test to assert:
1893             #self.failUnlessEqual(new_shares, initial_shares)
1894
1895             # all shares should be in the same place as before
1896             self.failUnlessEqual(set(initial_shares.keys()),
1897                                  set(new_shares.keys()))
1898             # but they should all be at a newer seqnum. The IV will be
1899             # different, so the roothash will be too.
1900             for key in initial_shares:
1901                 (version0,
1902                  seqnum0,
1903                  root_hash0,
1904                  IV0,
1905                  k0, N0, segsize0, datalen0,
1906                  o0) = unpack_header(initial_shares[key])
1907                 (version1,
1908                  seqnum1,
1909                  root_hash1,
1910                  IV1,
1911                  k1, N1, segsize1, datalen1,
1912                  o1) = unpack_header(new_shares[key])
1913                 self.failUnlessEqual(version0, version1)
1914                 self.failUnlessEqual(seqnum0+1, seqnum1)
1915                 self.failUnlessEqual(k0, k1)
1916                 self.failUnlessEqual(N0, N1)
1917                 self.failUnlessEqual(segsize0, segsize1)
1918                 self.failUnlessEqual(datalen0, datalen1)
1919         d.addCallback(_check_results)
1920         return d
1921
1922     def failIfSharesChanged(self, ignored=None):
1923         old_shares = self.old_shares[-2]
1924         current_shares = self.old_shares[-1]
1925         self.failUnlessEqual(old_shares, current_shares)
1926
1927
1928     def test_unrepairable_0shares(self):
1929         d = self.publish_one()
1930         def _delete_all_shares(ign):
1931             shares = self._storage._peers
1932             for peerid in shares:
1933                 shares[peerid] = {}
1934         d.addCallback(_delete_all_shares)
1935         d.addCallback(lambda ign: self._fn.check(Monitor()))
1936         d.addCallback(lambda check_results: self._fn.repair(check_results))
1937         def _check(crr):
1938             self.failUnlessEqual(crr.get_successful(), False)
1939         d.addCallback(_check)
1940         return d
1941
1942     def test_mdmf_unrepairable_0shares(self):
1943         d = self.publish_mdmf()
1944         def _delete_all_shares(ign):
1945             shares = self._storage._peers
1946             for peerid in shares:
1947                 shares[peerid] = {}
1948         d.addCallback(_delete_all_shares)
1949         d.addCallback(lambda ign: self._fn.check(Monitor()))
1950         d.addCallback(lambda check_results: self._fn.repair(check_results))
1951         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
1952         return d
1953
1954
1955     def test_unrepairable_1share(self):
1956         d = self.publish_one()
1957         def _delete_all_shares(ign):
1958             shares = self._storage._peers
1959             for peerid in shares:
1960                 for shnum in list(shares[peerid]):
1961                     if shnum > 0:
1962                         del shares[peerid][shnum]
1963         d.addCallback(_delete_all_shares)
1964         d.addCallback(lambda ign: self._fn.check(Monitor()))
1965         d.addCallback(lambda check_results: self._fn.repair(check_results))
1966         def _check(crr):
1967             self.failUnlessEqual(crr.get_successful(), False)
1968         d.addCallback(_check)
1969         return d
1970
1971     def test_mdmf_unrepairable_1share(self):
1972         d = self.publish_mdmf()
1973         def _delete_all_shares(ign):
1974             shares = self._storage._peers
1975             for peerid in shares:
1976                 for shnum in list(shares[peerid]):
1977                     if shnum > 0:
1978                         del shares[peerid][shnum]
1979         d.addCallback(_delete_all_shares)
1980         d.addCallback(lambda ign: self._fn.check(Monitor()))
1981         d.addCallback(lambda check_results: self._fn.repair(check_results))
1982         def _check(crr):
1983             self.failUnlessEqual(crr.get_successful(), False)
1984         d.addCallback(_check)
1985         return d
1986
1987     def test_repairable_5shares(self):
1988         d = self.publish_mdmf()
1989         def _delete_all_shares(ign):
1990             shares = self._storage._peers
1991             for peerid in shares:
1992                 for shnum in list(shares[peerid]):
1993                     if shnum > 4:
1994                         del shares[peerid][shnum]
1995         d.addCallback(_delete_all_shares)
1996         d.addCallback(lambda ign: self._fn.check(Monitor()))
1997         d.addCallback(lambda check_results: self._fn.repair(check_results))
1998         def _check(crr):
1999             self.failUnlessEqual(crr.get_successful(), True)
2000         d.addCallback(_check)
2001         return d
2002
2003     def test_mdmf_repairable_5shares(self):
2004         d = self.publish_mdmf()
2005         def _delete_some_shares(ign):
2006             shares = self._storage._peers
2007             for peerid in shares:
2008                 for shnum in list(shares[peerid]):
2009                     if shnum > 5:
2010                         del shares[peerid][shnum]
2011         d.addCallback(_delete_some_shares)
2012         d.addCallback(lambda ign: self._fn.check(Monitor()))
2013         def _check(cr):
2014             self.failIf(cr.is_healthy())
2015             self.failUnless(cr.is_recoverable())
2016             return cr
2017         d.addCallback(_check)
2018         d.addCallback(lambda check_results: self._fn.repair(check_results))
2019         def _check1(crr):
2020             self.failUnlessEqual(crr.get_successful(), True)
2021         d.addCallback(_check1)
2022         return d
2023
2024
2025     def test_merge(self):
2026         self.old_shares = []
2027         d = self.publish_multiple()
2028         # repair will refuse to merge multiple highest seqnums unless you
2029         # pass force=True
2030         d.addCallback(lambda res:
2031                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2032                                           1:4,3:4,5:4,7:4,9:4}))
2033         d.addCallback(self.copy_shares)
2034         d.addCallback(lambda res: self._fn.check(Monitor()))
2035         def _try_repair(check_results):
2036             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2037             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2038                                  self._fn.repair, check_results)
2039             d2.addCallback(self.copy_shares)
2040             d2.addCallback(self.failIfSharesChanged)
2041             d2.addCallback(lambda res: check_results)
2042             return d2
2043         d.addCallback(_try_repair)
2044         d.addCallback(lambda check_results:
2045                       self._fn.repair(check_results, force=True))
2046         # this should give us 10 shares of the highest roothash
2047         def _check_repair_results(rres):
2048             self.failUnless(rres.get_successful())
2049             pass # TODO
2050         d.addCallback(_check_repair_results)
2051         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2052         def _check_smap(smap):
2053             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2054             self.failIf(smap.unrecoverable_versions())
2055             # now, which should have won?
2056             roothash_s4a = self.get_roothash_for(3)
2057             roothash_s4b = self.get_roothash_for(4)
2058             if roothash_s4b > roothash_s4a:
2059                 expected_contents = self.CONTENTS[4]
2060             else:
2061                 expected_contents = self.CONTENTS[3]
2062             new_versionid = smap.best_recoverable_version()
2063             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2064             d2 = self._fn.download_version(smap, new_versionid)
2065             d2.addCallback(self.failUnlessEqual, expected_contents)
2066             return d2
2067         d.addCallback(_check_smap)
2068         return d
2069
2070     def test_non_merge(self):
2071         self.old_shares = []
2072         d = self.publish_multiple()
2073         # repair should not refuse a repair that doesn't need to merge. In
2074         # this case, we combine v2 with v3. The repair should ignore v2 and
2075         # copy v3 into a new v5.
2076         d.addCallback(lambda res:
2077                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2078                                           1:3,3:3,5:3,7:3,9:3}))
2079         d.addCallback(lambda res: self._fn.check(Monitor()))
2080         d.addCallback(lambda check_results: self._fn.repair(check_results))
2081         # this should give us 10 shares of v3
2082         def _check_repair_results(rres):
2083             self.failUnless(rres.get_successful())
2084             pass # TODO
2085         d.addCallback(_check_repair_results)
2086         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2087         def _check_smap(smap):
2088             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2089             self.failIf(smap.unrecoverable_versions())
2090             # now, which should have won?
2091             expected_contents = self.CONTENTS[3]
2092             new_versionid = smap.best_recoverable_version()
2093             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2094             d2 = self._fn.download_version(smap, new_versionid)
2095             d2.addCallback(self.failUnlessEqual, expected_contents)
2096             return d2
2097         d.addCallback(_check_smap)
2098         return d
2099
2100     def get_roothash_for(self, index):
2101         # return the roothash for the first share we see in the saved set
2102         shares = self._copied_shares[index]
2103         for peerid in shares:
2104             for shnum in shares[peerid]:
2105                 share = shares[peerid][shnum]
2106                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2107                           unpack_header(share)
2108                 return root_hash
2109
2110     def test_check_and_repair_readcap(self):
2111         # we can't currently repair from a mutable readcap: #625
2112         self.old_shares = []
2113         d = self.publish_one()
2114         d.addCallback(self.copy_shares)
2115         def _get_readcap(res):
2116             self._fn3 = self._fn.get_readonly()
2117             # also delete some shares
2118             for peerid,shares in self._storage._peers.items():
2119                 shares.pop(0, None)
2120         d.addCallback(_get_readcap)
2121         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2122         def _check_results(crr):
2123             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2124             # we should detect the unhealthy, but skip over mutable-readcap
2125             # repairs until #625 is fixed
2126             self.failIf(crr.get_pre_repair_results().is_healthy())
2127             self.failIf(crr.get_repair_attempted())
2128             self.failIf(crr.get_post_repair_results().is_healthy())
2129         d.addCallback(_check_results)
2130         return d
2131
2132 class DevNullDictionary(dict):
2133     def __setitem__(self, key, value):
2134         return
2135
2136 class MultipleEncodings(unittest.TestCase):
2137     def setUp(self):
2138         self.CONTENTS = "New contents go here"
2139         self.uploadable = MutableData(self.CONTENTS)
2140         self._storage = FakeStorage()
2141         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2142         self._storage_broker = self._nodemaker.storage_broker
2143         d = self._nodemaker.create_mutable_file(self.uploadable)
2144         def _created(node):
2145             self._fn = node
2146         d.addCallback(_created)
2147         return d
2148
2149     def _encode(self, k, n, data, version=SDMF_VERSION):
2150         # encode 'data' into a peerid->shares dict.
2151
2152         fn = self._fn
2153         # disable the nodecache, since for these tests we explicitly need
2154         # multiple nodes pointing at the same file
2155         self._nodemaker._node_cache = DevNullDictionary()
2156         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2157         # then we copy over other fields that are normally fetched from the
2158         # existing shares
2159         fn2._pubkey = fn._pubkey
2160         fn2._privkey = fn._privkey
2161         fn2._encprivkey = fn._encprivkey
2162         # and set the encoding parameters to something completely different
2163         fn2._required_shares = k
2164         fn2._total_shares = n
2165
2166         s = self._storage
2167         s._peers = {} # clear existing storage
2168         p2 = Publish(fn2, self._storage_broker, None)
2169         uploadable = MutableData(data)
2170         d = p2.publish(uploadable)
2171         def _published(res):
2172             shares = s._peers
2173             s._peers = {}
2174             return shares
2175         d.addCallback(_published)
2176         return d
2177
2178     def make_servermap(self, mode=MODE_READ, oldmap=None):
2179         if oldmap is None:
2180             oldmap = ServerMap()
2181         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2182                                oldmap, mode)
2183         d = smu.update()
2184         return d
2185
2186     def test_multiple_encodings(self):
2187         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2188         # then mix up the shares, to make sure that download survives seeing
2189         # a variety of encodings. This is actually kind of tricky to set up.
2190
2191         contents1 = "Contents for encoding 1 (3-of-10) go here"
2192         contents2 = "Contents for encoding 2 (4-of-9) go here"
2193         contents3 = "Contents for encoding 3 (4-of-7) go here"
2194
2195         # we make a retrieval object that doesn't know what encoding
2196         # parameters to use
2197         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2198
2199         # now we upload a file through fn1, and grab its shares
2200         d = self._encode(3, 10, contents1)
2201         def _encoded_1(shares):
2202             self._shares1 = shares
2203         d.addCallback(_encoded_1)
2204         d.addCallback(lambda res: self._encode(4, 9, contents2))
2205         def _encoded_2(shares):
2206             self._shares2 = shares
2207         d.addCallback(_encoded_2)
2208         d.addCallback(lambda res: self._encode(4, 7, contents3))
2209         def _encoded_3(shares):
2210             self._shares3 = shares
2211         d.addCallback(_encoded_3)
2212
2213         def _merge(res):
2214             log.msg("merging sharelists")
2215             # we merge the shares from the two sets, leaving each shnum in
2216             # its original location, but using a share from set1 or set2
2217             # according to the following sequence:
2218             #
2219             #  4-of-9  a  s2
2220             #  4-of-9  b  s2
2221             #  4-of-7  c   s3
2222             #  4-of-9  d  s2
2223             #  3-of-9  e s1
2224             #  3-of-9  f s1
2225             #  3-of-9  g s1
2226             #  4-of-9  h  s2
2227             #
2228             # so that neither form can be recovered until fetch [f], at which
2229             # point version-s1 (the 3-of-10 form) should be recoverable. If
2230             # the implementation latches on to the first version it sees,
2231             # then s2 will be recoverable at fetch [g].
2232
2233             # Later, when we implement code that handles multiple versions,
2234             # we can use this framework to assert that all recoverable
2235             # versions are retrieved, and test that 'epsilon' does its job
2236
2237             places = [2, 2, 3, 2, 1, 1, 1, 2]
2238
2239             sharemap = {}
2240             sb = self._storage_broker
2241
2242             for peerid in sorted(sb.get_all_serverids()):
2243                 for shnum in self._shares1.get(peerid, {}):
2244                     if shnum < len(places):
2245                         which = places[shnum]
2246                     else:
2247                         which = "x"
2248                     self._storage._peers[peerid] = peers = {}
2249                     in_1 = shnum in self._shares1[peerid]
2250                     in_2 = shnum in self._shares2.get(peerid, {})
2251                     in_3 = shnum in self._shares3.get(peerid, {})
2252                     if which == 1:
2253                         if in_1:
2254                             peers[shnum] = self._shares1[peerid][shnum]
2255                             sharemap[shnum] = peerid
2256                     elif which == 2:
2257                         if in_2:
2258                             peers[shnum] = self._shares2[peerid][shnum]
2259                             sharemap[shnum] = peerid
2260                     elif which == 3:
2261                         if in_3:
2262                             peers[shnum] = self._shares3[peerid][shnum]
2263                             sharemap[shnum] = peerid
2264
2265             # we don't bother placing any other shares
2266             # now sort the sequence so that share 0 is returned first
2267             new_sequence = [sharemap[shnum]
2268                             for shnum in sorted(sharemap.keys())]
2269             self._storage._sequence = new_sequence
2270             log.msg("merge done")
2271         d.addCallback(_merge)
2272         d.addCallback(lambda res: fn3.download_best_version())
2273         def _retrieved(new_contents):
2274             # the current specified behavior is "first version recoverable"
2275             self.failUnlessEqual(new_contents, contents1)
2276         d.addCallback(_retrieved)
2277         return d
2278
2279
2280 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2281
2282     def setUp(self):
2283         return self.publish_multiple()
2284
2285     def test_multiple_versions(self):
2286         # if we see a mix of versions in the grid, download_best_version
2287         # should get the latest one
2288         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2289         d = self._fn.download_best_version()
2290         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2291         # and the checker should report problems
2292         d.addCallback(lambda res: self._fn.check(Monitor()))
2293         d.addCallback(self.check_bad, "test_multiple_versions")
2294
2295         # but if everything is at version 2, that's what we should download
2296         d.addCallback(lambda res:
2297                       self._set_versions(dict([(i,2) for i in range(10)])))
2298         d.addCallback(lambda res: self._fn.download_best_version())
2299         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2300         # if exactly one share is at version 3, we should still get v2
2301         d.addCallback(lambda res:
2302                       self._set_versions({0:3}))
2303         d.addCallback(lambda res: self._fn.download_best_version())
2304         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2305         # but the servermap should see the unrecoverable version. This
2306         # depends upon the single newer share being queried early.
2307         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2308         def _check_smap(smap):
2309             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2310             newer = smap.unrecoverable_newer_versions()
2311             self.failUnlessEqual(len(newer), 1)
2312             verinfo, health = newer.items()[0]
2313             self.failUnlessEqual(verinfo[0], 4)
2314             self.failUnlessEqual(health, (1,3))
2315             self.failIf(smap.needs_merge())
2316         d.addCallback(_check_smap)
2317         # if we have a mix of two parallel versions (s4a and s4b), we could
2318         # recover either
2319         d.addCallback(lambda res:
2320                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2321                                           1:4,3:4,5:4,7:4,9:4}))
2322         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2323         def _check_smap_mixed(smap):
2324             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2325             newer = smap.unrecoverable_newer_versions()
2326             self.failUnlessEqual(len(newer), 0)
2327             self.failUnless(smap.needs_merge())
2328         d.addCallback(_check_smap_mixed)
2329         d.addCallback(lambda res: self._fn.download_best_version())
2330         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2331                                                   res == self.CONTENTS[4]))
2332         return d
2333
2334     def test_replace(self):
2335         # if we see a mix of versions in the grid, we should be able to
2336         # replace them all with a newer version
2337
2338         # if exactly one share is at version 3, we should download (and
2339         # replace) v2, and the result should be v4. Note that the index we
2340         # give to _set_versions is different than the sequence number.
2341         target = dict([(i,2) for i in range(10)]) # seqnum3
2342         target[0] = 3 # seqnum4
2343         self._set_versions(target)
2344
2345         def _modify(oldversion, servermap, first_time):
2346             return oldversion + " modified"
2347         d = self._fn.modify(_modify)
2348         d.addCallback(lambda res: self._fn.download_best_version())
2349         expected = self.CONTENTS[2] + " modified"
2350         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2351         # and the servermap should indicate that the outlier was replaced too
2352         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2353         def _check_smap(smap):
2354             self.failUnlessEqual(smap.highest_seqnum(), 5)
2355             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2356             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2357         d.addCallback(_check_smap)
2358         return d
2359
2360
2361 class Utils(unittest.TestCase):
2362     def test_cache(self):
2363         c = ResponseCache()
2364         # xdata = base62.b2a(os.urandom(100))[:100]
2365         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2366         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2367         c.add("v1", 1, 0, xdata)
2368         c.add("v1", 1, 2000, ydata)
2369         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2370         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2371         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2372         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2373         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2374         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2375         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2376         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2377         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2378         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2379         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2380         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2381         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2382         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2383         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2384         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2385         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2386         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2387
2388         # test joining fragments
2389         c = ResponseCache()
2390         c.add("v1", 1, 0, xdata[:10])
2391         c.add("v1", 1, 10, xdata[10:20])
2392         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2393
2394 class Exceptions(unittest.TestCase):
2395     def test_repr(self):
2396         nmde = NeedMoreDataError(100, 50, 100)
2397         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2398         ucwe = UncoordinatedWriteError()
2399         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2400
2401 class SameKeyGenerator:
2402     def __init__(self, pubkey, privkey):
2403         self.pubkey = pubkey
2404         self.privkey = privkey
2405     def generate(self, keysize=None):
2406         return defer.succeed( (self.pubkey, self.privkey) )
2407
2408 class FirstServerGetsKilled:
2409     done = False
2410     def notify(self, retval, wrapper, methname):
2411         if not self.done:
2412             wrapper.broken = True
2413             self.done = True
2414         return retval
2415
2416 class FirstServerGetsDeleted:
2417     def __init__(self):
2418         self.done = False
2419         self.silenced = None
2420     def notify(self, retval, wrapper, methname):
2421         if not self.done:
2422             # this query will work, but later queries should think the share
2423             # has been deleted
2424             self.done = True
2425             self.silenced = wrapper
2426             return retval
2427         if wrapper == self.silenced:
2428             assert methname == "slot_testv_and_readv_and_writev"
2429             return (True, {})
2430         return retval
2431
2432 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2433     def do_publish_surprise(self, version):
2434         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2435         self.set_up_grid()
2436         nm = self.g.clients[0].nodemaker
2437         d = nm.create_mutable_file(MutableData("contents 1"),
2438                                     version=version)
2439         def _created(n):
2440             d = defer.succeed(None)
2441             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2442             def _got_smap1(smap):
2443                 # stash the old state of the file
2444                 self.old_map = smap
2445             d.addCallback(_got_smap1)
2446             # then modify the file, leaving the old map untouched
2447             d.addCallback(lambda res: log.msg("starting winning write"))
2448             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2449             # now attempt to modify the file with the old servermap. This
2450             # will look just like an uncoordinated write, in which every
2451             # single share got updated between our mapupdate and our publish
2452             d.addCallback(lambda res: log.msg("starting doomed write"))
2453             d.addCallback(lambda res:
2454                           self.shouldFail(UncoordinatedWriteError,
2455                                           "test_publish_surprise", None,
2456                                           n.upload,
2457                                           MutableData("contents 2a"), self.old_map))
2458             return d
2459         d.addCallback(_created)
2460         return d
2461
2462     def test_publish_surprise_sdmf(self):
2463         return self.do_publish_surprise(SDMF_VERSION)
2464
2465     def test_publish_surprise_mdmf(self):
2466         return self.do_publish_surprise(MDMF_VERSION)
2467
2468     def test_retrieve_surprise(self):
2469         self.basedir = "mutable/Problems/test_retrieve_surprise"
2470         self.set_up_grid()
2471         nm = self.g.clients[0].nodemaker
2472         d = nm.create_mutable_file(MutableData("contents 1"))
2473         def _created(n):
2474             d = defer.succeed(None)
2475             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2476             def _got_smap1(smap):
2477                 # stash the old state of the file
2478                 self.old_map = smap
2479             d.addCallback(_got_smap1)
2480             # then modify the file, leaving the old map untouched
2481             d.addCallback(lambda res: log.msg("starting winning write"))
2482             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2483             # now attempt to retrieve the old version with the old servermap.
2484             # This will look like someone has changed the file since we
2485             # updated the servermap.
2486             d.addCallback(lambda res: n._cache._clear())
2487             d.addCallback(lambda res: log.msg("starting doomed read"))
2488             d.addCallback(lambda res:
2489                           self.shouldFail(NotEnoughSharesError,
2490                                           "test_retrieve_surprise",
2491                                           "ran out of peers: have 0 of 1",
2492                                           n.download_version,
2493                                           self.old_map,
2494                                           self.old_map.best_recoverable_version(),
2495                                           ))
2496             return d
2497         d.addCallback(_created)
2498         return d
2499
2500
2501     def test_unexpected_shares(self):
2502         # upload the file, take a servermap, shut down one of the servers,
2503         # upload it again (causing shares to appear on a new server), then
2504         # upload using the old servermap. The last upload should fail with an
2505         # UncoordinatedWriteError, because of the shares that didn't appear
2506         # in the servermap.
2507         self.basedir = "mutable/Problems/test_unexpected_shares"
2508         self.set_up_grid()
2509         nm = self.g.clients[0].nodemaker
2510         d = nm.create_mutable_file(MutableData("contents 1"))
2511         def _created(n):
2512             d = defer.succeed(None)
2513             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2514             def _got_smap1(smap):
2515                 # stash the old state of the file
2516                 self.old_map = smap
2517                 # now shut down one of the servers
2518                 peer0 = list(smap.make_sharemap()[0])[0]
2519                 self.g.remove_server(peer0)
2520                 # then modify the file, leaving the old map untouched
2521                 log.msg("starting winning write")
2522                 return n.overwrite(MutableData("contents 2"))
2523             d.addCallback(_got_smap1)
2524             # now attempt to modify the file with the old servermap. This
2525             # will look just like an uncoordinated write, in which every
2526             # single share got updated between our mapupdate and our publish
2527             d.addCallback(lambda res: log.msg("starting doomed write"))
2528             d.addCallback(lambda res:
2529                           self.shouldFail(UncoordinatedWriteError,
2530                                           "test_surprise", None,
2531                                           n.upload,
2532                                           MutableData("contents 2a"), self.old_map))
2533             return d
2534         d.addCallback(_created)
2535         return d
2536
2537     def test_bad_server(self):
2538         # Break one server, then create the file: the initial publish should
2539         # complete with an alternate server. Breaking a second server should
2540         # not prevent an update from succeeding either.
2541         self.basedir = "mutable/Problems/test_bad_server"
2542         self.set_up_grid()
2543         nm = self.g.clients[0].nodemaker
2544
2545         # to make sure that one of the initial peers is broken, we have to
2546         # get creative. We create an RSA key and compute its storage-index.
2547         # Then we make a KeyGenerator that always returns that one key, and
2548         # use it to create the mutable file. This will get easier when we can
2549         # use #467 static-server-selection to disable permutation and force
2550         # the choice of server for share[0].
2551
2552         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2553         def _got_key( (pubkey, privkey) ):
2554             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2555             pubkey_s = pubkey.serialize()
2556             privkey_s = privkey.serialize()
2557             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2558                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2559             self._storage_index = u.get_storage_index()
2560         d.addCallback(_got_key)
2561         def _break_peer0(res):
2562             si = self._storage_index
2563             servers = nm.storage_broker.get_servers_for_psi(si)
2564             self.g.break_server(servers[0].get_serverid())
2565             self.server1 = servers[1]
2566         d.addCallback(_break_peer0)
2567         # now "create" the file, using the pre-established key, and let the
2568         # initial publish finally happen
2569         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2570         # that ought to work
2571         def _got_node(n):
2572             d = n.download_best_version()
2573             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2574             # now break the second peer
2575             def _break_peer1(res):
2576                 self.g.break_server(self.server1.get_serverid())
2577             d.addCallback(_break_peer1)
2578             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2579             # that ought to work too
2580             d.addCallback(lambda res: n.download_best_version())
2581             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2582             def _explain_error(f):
2583                 print f
2584                 if f.check(NotEnoughServersError):
2585                     print "first_error:", f.value.first_error
2586                 return f
2587             d.addErrback(_explain_error)
2588             return d
2589         d.addCallback(_got_node)
2590         return d
2591
2592     def test_bad_server_overlap(self):
2593         # like test_bad_server, but with no extra unused servers to fall back
2594         # upon. This means that we must re-use a server which we've already
2595         # used. If we don't remember the fact that we sent them one share
2596         # already, we'll mistakenly think we're experiencing an
2597         # UncoordinatedWriteError.
2598
2599         # Break one server, then create the file: the initial publish should
2600         # complete with an alternate server. Breaking a second server should
2601         # not prevent an update from succeeding either.
2602         self.basedir = "mutable/Problems/test_bad_server_overlap"
2603         self.set_up_grid()
2604         nm = self.g.clients[0].nodemaker
2605         sb = nm.storage_broker
2606
2607         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2608         self.g.break_server(peerids[0])
2609
2610         d = nm.create_mutable_file(MutableData("contents 1"))
2611         def _created(n):
2612             d = n.download_best_version()
2613             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2614             # now break one of the remaining servers
2615             def _break_second_server(res):
2616                 self.g.break_server(peerids[1])
2617             d.addCallback(_break_second_server)
2618             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2619             # that ought to work too
2620             d.addCallback(lambda res: n.download_best_version())
2621             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2622             return d
2623         d.addCallback(_created)
2624         return d
2625
2626     def test_publish_all_servers_bad(self):
2627         # Break all servers: the publish should fail
2628         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2629         self.set_up_grid()
2630         nm = self.g.clients[0].nodemaker
2631         for s in nm.storage_broker.get_connected_servers():
2632             s.get_rref().broken = True
2633
2634         d = self.shouldFail(NotEnoughServersError,
2635                             "test_publish_all_servers_bad",
2636                             "ran out of good servers",
2637                             nm.create_mutable_file, MutableData("contents"))
2638         return d
2639
2640     def test_publish_no_servers(self):
2641         # no servers at all: the publish should fail
2642         self.basedir = "mutable/Problems/test_publish_no_servers"
2643         self.set_up_grid(num_servers=0)
2644         nm = self.g.clients[0].nodemaker
2645
2646         d = self.shouldFail(NotEnoughServersError,
2647                             "test_publish_no_servers",
2648                             "Ran out of non-bad servers",
2649                             nm.create_mutable_file, MutableData("contents"))
2650         return d
2651
2652
2653     def test_privkey_query_error(self):
2654         # when a servermap is updated with MODE_WRITE, it tries to get the
2655         # privkey. Something might go wrong during this query attempt.
2656         # Exercise the code in _privkey_query_failed which tries to handle
2657         # such an error.
2658         self.basedir = "mutable/Problems/test_privkey_query_error"
2659         self.set_up_grid(num_servers=20)
2660         nm = self.g.clients[0].nodemaker
2661         nm._node_cache = DevNullDictionary() # disable the nodecache
2662
2663         # we need some contents that are large enough to push the privkey out
2664         # of the early part of the file
2665         LARGE = "These are Larger contents" * 2000 # about 50KB
2666         LARGE_uploadable = MutableData(LARGE)
2667         d = nm.create_mutable_file(LARGE_uploadable)
2668         def _created(n):
2669             self.uri = n.get_uri()
2670             self.n2 = nm.create_from_cap(self.uri)
2671
2672             # When a mapupdate is performed on a node that doesn't yet know
2673             # the privkey, a short read is sent to a batch of servers, to get
2674             # the verinfo and (hopefully, if the file is short enough) the
2675             # encprivkey. Our file is too large to let this first read
2676             # contain the encprivkey. Each non-encprivkey-bearing response
2677             # that arrives (until the node gets the encprivkey) will trigger
2678             # a second read to specifically read the encprivkey.
2679             #
2680             # So, to exercise this case:
2681             #  1. notice which server gets a read() call first
2682             #  2. tell that server to start throwing errors
2683             killer = FirstServerGetsKilled()
2684             for s in nm.storage_broker.get_connected_servers():
2685                 s.get_rref().post_call_notifier = killer.notify
2686         d.addCallback(_created)
2687
2688         # now we update a servermap from a new node (which doesn't have the
2689         # privkey yet, forcing it to use a separate privkey query). Note that
2690         # the map-update will succeed, since we'll just get a copy from one
2691         # of the other shares.
2692         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2693
2694         return d
2695
2696     def test_privkey_query_missing(self):
2697         # like test_privkey_query_error, but the shares are deleted by the
2698         # second query, instead of raising an exception.
2699         self.basedir = "mutable/Problems/test_privkey_query_missing"
2700         self.set_up_grid(num_servers=20)
2701         nm = self.g.clients[0].nodemaker
2702         LARGE = "These are Larger contents" * 2000 # about 50KiB
2703         LARGE_uploadable = MutableData(LARGE)
2704         nm._node_cache = DevNullDictionary() # disable the nodecache
2705
2706         d = nm.create_mutable_file(LARGE_uploadable)
2707         def _created(n):
2708             self.uri = n.get_uri()
2709             self.n2 = nm.create_from_cap(self.uri)
2710             deleter = FirstServerGetsDeleted()
2711             for s in nm.storage_broker.get_connected_servers():
2712                 s.get_rref().post_call_notifier = deleter.notify
2713         d.addCallback(_created)
2714         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2715         return d
2716
2717
2718     def test_block_and_hash_query_error(self):
2719         # This tests for what happens when a query to a remote server
2720         # fails in either the hash validation step or the block getting
2721         # step (because of batching, this is the same actual query).
2722         # We need to have the storage server persist up until the point
2723         # that its prefix is validated, then suddenly die. This
2724         # exercises some exception handling code in Retrieve.
2725         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2726         self.set_up_grid(num_servers=20)
2727         nm = self.g.clients[0].nodemaker
2728         CONTENTS = "contents" * 2000
2729         CONTENTS_uploadable = MutableData(CONTENTS)
2730         d = nm.create_mutable_file(CONTENTS_uploadable)
2731         def _created(node):
2732             self._node = node
2733         d.addCallback(_created)
2734         d.addCallback(lambda ignored:
2735             self._node.get_servermap(MODE_READ))
2736         def _then(servermap):
2737             # we have our servermap. Now we set up the servers like the
2738             # tests above -- the first one that gets a read call should
2739             # start throwing errors, but only after returning its prefix
2740             # for validation. Since we'll download without fetching the
2741             # private key, the next query to the remote server will be
2742             # for either a block and salt or for hashes, either of which
2743             # will exercise the error handling code.
2744             killer = FirstServerGetsKilled()
2745             for s in nm.storage_broker.get_connected_servers():
2746                 s.get_rref().post_call_notifier = killer.notify
2747             ver = servermap.best_recoverable_version()
2748             assert ver
2749             return self._node.download_version(servermap, ver)
2750         d.addCallback(_then)
2751         d.addCallback(lambda data:
2752             self.failUnlessEqual(data, CONTENTS))
2753         return d
2754
2755
2756 class FileHandle(unittest.TestCase):
2757     def setUp(self):
2758         self.test_data = "Test Data" * 50000
2759         self.sio = StringIO(self.test_data)
2760         self.uploadable = MutableFileHandle(self.sio)
2761
2762
2763     def test_filehandle_read(self):
2764         self.basedir = "mutable/FileHandle/test_filehandle_read"
2765         chunk_size = 10
2766         for i in xrange(0, len(self.test_data), chunk_size):
2767             data = self.uploadable.read(chunk_size)
2768             data = "".join(data)
2769             start = i
2770             end = i + chunk_size
2771             self.failUnlessEqual(data, self.test_data[start:end])
2772
2773
2774     def test_filehandle_get_size(self):
2775         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2776         actual_size = len(self.test_data)
2777         size = self.uploadable.get_size()
2778         self.failUnlessEqual(size, actual_size)
2779
2780
2781     def test_filehandle_get_size_out_of_order(self):
2782         # We should be able to call get_size whenever we want without
2783         # disturbing the location of the seek pointer.
2784         chunk_size = 100
2785         data = self.uploadable.read(chunk_size)
2786         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2787
2788         # Now get the size.
2789         size = self.uploadable.get_size()
2790         self.failUnlessEqual(size, len(self.test_data))
2791
2792         # Now get more data. We should be right where we left off.
2793         more_data = self.uploadable.read(chunk_size)
2794         start = chunk_size
2795         end = chunk_size * 2
2796         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2797
2798
2799     def test_filehandle_file(self):
2800         # Make sure that the MutableFileHandle works on a file as well
2801         # as a StringIO object, since in some cases it will be asked to
2802         # deal with files.
2803         self.basedir = self.mktemp()
2804         # necessary? What am I doing wrong here?
2805         os.mkdir(self.basedir)
2806         f_path = os.path.join(self.basedir, "test_file")
2807         f = open(f_path, "w")
2808         f.write(self.test_data)
2809         f.close()
2810         f = open(f_path, "r")
2811
2812         uploadable = MutableFileHandle(f)
2813
2814         data = uploadable.read(len(self.test_data))
2815         self.failUnlessEqual("".join(data), self.test_data)
2816         size = uploadable.get_size()
2817         self.failUnlessEqual(size, len(self.test_data))
2818
2819
2820     def test_close(self):
2821         # Make sure that the MutableFileHandle closes its handle when
2822         # told to do so.
2823         self.uploadable.close()
2824         self.failUnless(self.sio.closed)
2825
2826
2827 class DataHandle(unittest.TestCase):
2828     def setUp(self):
2829         self.test_data = "Test Data" * 50000
2830         self.uploadable = MutableData(self.test_data)
2831
2832
2833     def test_datahandle_read(self):
2834         chunk_size = 10
2835         for i in xrange(0, len(self.test_data), chunk_size):
2836             data = self.uploadable.read(chunk_size)
2837             data = "".join(data)
2838             start = i
2839             end = i + chunk_size
2840             self.failUnlessEqual(data, self.test_data[start:end])
2841
2842
2843     def test_datahandle_get_size(self):
2844         actual_size = len(self.test_data)
2845         size = self.uploadable.get_size()
2846         self.failUnlessEqual(size, actual_size)
2847
2848
2849     def test_datahandle_get_size_out_of_order(self):
2850         # We should be able to call get_size whenever we want without
2851         # disturbing the location of the seek pointer.
2852         chunk_size = 100
2853         data = self.uploadable.read(chunk_size)
2854         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2855
2856         # Now get the size.
2857         size = self.uploadable.get_size()
2858         self.failUnlessEqual(size, len(self.test_data))
2859
2860         # Now get more data. We should be right where we left off.
2861         more_data = self.uploadable.read(chunk_size)
2862         start = chunk_size
2863         end = chunk_size * 2
2864         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2865
2866
2867 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2868               PublishMixin):
2869     def setUp(self):
2870         GridTestMixin.setUp(self)
2871         self.basedir = self.mktemp()
2872         self.set_up_grid()
2873         self.c = self.g.clients[0]
2874         self.nm = self.c.nodemaker
2875         self.data = "test data" * 100000 # about 900 KiB; MDMF
2876         self.small_data = "test data" * 10 # about 90 B; SDMF
2877
2878
2879     def do_upload_mdmf(self):
2880         d = self.nm.create_mutable_file(MutableData(self.data),
2881                                         version=MDMF_VERSION)
2882         def _then(n):
2883             assert isinstance(n, MutableFileNode)
2884             assert n._protocol_version == MDMF_VERSION
2885             self.mdmf_node = n
2886             return n
2887         d.addCallback(_then)
2888         return d
2889
2890     def do_upload_sdmf(self):
2891         d = self.nm.create_mutable_file(MutableData(self.small_data))
2892         def _then(n):
2893             assert isinstance(n, MutableFileNode)
2894             assert n._protocol_version == SDMF_VERSION
2895             self.sdmf_node = n
2896             return n
2897         d.addCallback(_then)
2898         return d
2899
2900     def do_upload_empty_sdmf(self):
2901         d = self.nm.create_mutable_file(MutableData(""))
2902         def _then(n):
2903             assert isinstance(n, MutableFileNode)
2904             self.sdmf_zero_length_node = n
2905             assert n._protocol_version == SDMF_VERSION
2906             return n
2907         d.addCallback(_then)
2908         return d
2909
2910     def do_upload(self):
2911         d = self.do_upload_mdmf()
2912         d.addCallback(lambda ign: self.do_upload_sdmf())
2913         return d
2914
2915     def test_debug(self):
2916         d = self.do_upload_mdmf()
2917         def _debug(n):
2918             fso = debug.FindSharesOptions()
2919             storage_index = base32.b2a(n.get_storage_index())
2920             fso.si_s = storage_index
2921             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2922                             for (i,ss,storedir)
2923                             in self.iterate_servers()]
2924             fso.stdout = StringIO()
2925             fso.stderr = StringIO()
2926             debug.find_shares(fso)
2927             sharefiles = fso.stdout.getvalue().splitlines()
2928             expected = self.nm.default_encoding_parameters["n"]
2929             self.failUnlessEqual(len(sharefiles), expected)
2930
2931             do = debug.DumpOptions()
2932             do["filename"] = sharefiles[0]
2933             do.stdout = StringIO()
2934             debug.dump_share(do)
2935             output = do.stdout.getvalue()
2936             lines = set(output.splitlines())
2937             self.failUnless("Mutable slot found:" in lines, output)
2938             self.failUnless(" share_type: MDMF" in lines, output)
2939             self.failUnless(" num_extra_leases: 0" in lines, output)
2940             self.failUnless(" MDMF contents:" in lines, output)
2941             self.failUnless("  seqnum: 1" in lines, output)
2942             self.failUnless("  required_shares: 3" in lines, output)
2943             self.failUnless("  total_shares: 10" in lines, output)
2944             self.failUnless("  segsize: 131073" in lines, output)
2945             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
2946             vcap = n.get_verify_cap().to_string()
2947             self.failUnless("  verify-cap: %s" % vcap in lines, output)
2948
2949             cso = debug.CatalogSharesOptions()
2950             cso.nodedirs = fso.nodedirs
2951             cso.stdout = StringIO()
2952             cso.stderr = StringIO()
2953             debug.catalog_shares(cso)
2954             shares = cso.stdout.getvalue().splitlines()
2955             oneshare = shares[0] # all shares should be MDMF
2956             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
2957             self.failUnless(oneshare.startswith("MDMF"), oneshare)
2958             fields = oneshare.split()
2959             self.failUnlessEqual(fields[0], "MDMF")
2960             self.failUnlessEqual(fields[1], storage_index)
2961             self.failUnlessEqual(fields[2], "3/10")
2962             self.failUnlessEqual(fields[3], "%d" % len(self.data))
2963             self.failUnless(fields[4].startswith("#1:"), fields[3])
2964             # the rest of fields[4] is the roothash, which depends upon
2965             # encryption salts and is not constant. fields[5] is the
2966             # remaining time on the longest lease, which is timing dependent.
2967             # The rest of the line is the quoted pathname to the share.
2968         d.addCallback(_debug)
2969         return d
2970
2971     def test_get_sequence_number(self):
2972         d = self.do_upload()
2973         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
2974         d.addCallback(lambda bv:
2975             self.failUnlessEqual(bv.get_sequence_number(), 1))
2976         d.addCallback(lambda ignored:
2977             self.sdmf_node.get_best_readable_version())
2978         d.addCallback(lambda bv:
2979             self.failUnlessEqual(bv.get_sequence_number(), 1))
2980         # Now update. The sequence number in both cases should be 1 in
2981         # both cases.
2982         def _do_update(ignored):
2983             new_data = MutableData("foo bar baz" * 100000)
2984             new_small_data = MutableData("foo bar baz" * 10)
2985             d1 = self.mdmf_node.overwrite(new_data)
2986             d2 = self.sdmf_node.overwrite(new_small_data)
2987             dl = gatherResults([d1, d2])
2988             return dl
2989         d.addCallback(_do_update)
2990         d.addCallback(lambda ignored:
2991             self.mdmf_node.get_best_readable_version())
2992         d.addCallback(lambda bv:
2993             self.failUnlessEqual(bv.get_sequence_number(), 2))
2994         d.addCallback(lambda ignored:
2995             self.sdmf_node.get_best_readable_version())
2996         d.addCallback(lambda bv:
2997             self.failUnlessEqual(bv.get_sequence_number(), 2))
2998         return d
2999
3000
3001     def test_cap_after_upload(self):
3002         # If we create a new mutable file and upload things to it, and
3003         # it's an MDMF file, we should get an MDMF cap back from that
3004         # file and should be able to use that.
3005         # That's essentially what MDMF node is, so just check that.
3006         d = self.do_upload_mdmf()
3007         def _then(ign):
3008             mdmf_uri = self.mdmf_node.get_uri()
3009             cap = uri.from_string(mdmf_uri)
3010             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3011             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3012             cap = uri.from_string(readonly_mdmf_uri)
3013             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3014         d.addCallback(_then)
3015         return d
3016
3017     def test_mutable_version(self):
3018         # assert that getting parameters from the IMutableVersion object
3019         # gives us the same data as getting them from the filenode itself
3020         d = self.do_upload()
3021         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3022         def _check_mdmf(bv):
3023             n = self.mdmf_node
3024             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3025             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3026             self.failIf(bv.is_readonly())
3027         d.addCallback(_check_mdmf)
3028         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3029         def _check_sdmf(bv):
3030             n = self.sdmf_node
3031             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3032             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3033             self.failIf(bv.is_readonly())
3034         d.addCallback(_check_sdmf)
3035         return d
3036
3037
3038     def test_get_readonly_version(self):
3039         d = self.do_upload()
3040         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3041         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3042
3043         # Attempting to get a mutable version of a mutable file from a
3044         # filenode initialized with a readcap should return a readonly
3045         # version of that same node.
3046         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3047         d.addCallback(lambda ro: ro.get_best_mutable_version())
3048         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3049
3050         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3051         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3052
3053         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3054         d.addCallback(lambda ro: ro.get_best_mutable_version())
3055         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3056         return d
3057
3058
3059     def test_toplevel_overwrite(self):
3060         new_data = MutableData("foo bar baz" * 100000)
3061         new_small_data = MutableData("foo bar baz" * 10)
3062         d = self.do_upload()
3063         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3064         d.addCallback(lambda ignored:
3065             self.mdmf_node.download_best_version())
3066         d.addCallback(lambda data:
3067             self.failUnlessEqual(data, "foo bar baz" * 100000))
3068         d.addCallback(lambda ignored:
3069             self.sdmf_node.overwrite(new_small_data))
3070         d.addCallback(lambda ignored:
3071             self.sdmf_node.download_best_version())
3072         d.addCallback(lambda data:
3073             self.failUnlessEqual(data, "foo bar baz" * 10))
3074         return d
3075
3076
3077     def test_toplevel_modify(self):
3078         d = self.do_upload()
3079         def modifier(old_contents, servermap, first_time):
3080             return old_contents + "modified"
3081         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3082         d.addCallback(lambda ignored:
3083             self.mdmf_node.download_best_version())
3084         d.addCallback(lambda data:
3085             self.failUnlessIn("modified", data))
3086         d.addCallback(lambda ignored:
3087             self.sdmf_node.modify(modifier))
3088         d.addCallback(lambda ignored:
3089             self.sdmf_node.download_best_version())
3090         d.addCallback(lambda data:
3091             self.failUnlessIn("modified", data))
3092         return d
3093
3094
3095     def test_version_modify(self):
3096         # TODO: When we can publish multiple versions, alter this test
3097         # to modify a version other than the best usable version, then
3098         # test to see that the best recoverable version is that.
3099         d = self.do_upload()
3100         def modifier(old_contents, servermap, first_time):
3101             return old_contents + "modified"
3102         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3103         d.addCallback(lambda ignored:
3104             self.mdmf_node.download_best_version())
3105         d.addCallback(lambda data:
3106             self.failUnlessIn("modified", data))
3107         d.addCallback(lambda ignored:
3108             self.sdmf_node.modify(modifier))
3109         d.addCallback(lambda ignored:
3110             self.sdmf_node.download_best_version())
3111         d.addCallback(lambda data:
3112             self.failUnlessIn("modified", data))
3113         return d
3114
3115
3116     def test_download_version(self):
3117         d = self.publish_multiple()
3118         # We want to have two recoverable versions on the grid.
3119         d.addCallback(lambda res:
3120                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3121                                           1:1,3:1,5:1,7:1,9:1}))
3122         # Now try to download each version. We should get the plaintext
3123         # associated with that version.
3124         d.addCallback(lambda ignored:
3125             self._fn.get_servermap(mode=MODE_READ))
3126         def _got_servermap(smap):
3127             versions = smap.recoverable_versions()
3128             assert len(versions) == 2
3129
3130             self.servermap = smap
3131             self.version1, self.version2 = versions
3132             assert self.version1 != self.version2
3133
3134             self.version1_seqnum = self.version1[0]
3135             self.version2_seqnum = self.version2[0]
3136             self.version1_index = self.version1_seqnum - 1
3137             self.version2_index = self.version2_seqnum - 1
3138
3139         d.addCallback(_got_servermap)
3140         d.addCallback(lambda ignored:
3141             self._fn.download_version(self.servermap, self.version1))
3142         d.addCallback(lambda results:
3143             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3144                                  results))
3145         d.addCallback(lambda ignored:
3146             self._fn.download_version(self.servermap, self.version2))
3147         d.addCallback(lambda results:
3148             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3149                                  results))
3150         return d
3151
3152
3153     def test_download_nonexistent_version(self):
3154         d = self.do_upload_mdmf()
3155         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3156         def _set_servermap(servermap):
3157             self.servermap = servermap
3158         d.addCallback(_set_servermap)
3159         d.addCallback(lambda ignored:
3160            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3161                            None,
3162                            self.mdmf_node.download_version, self.servermap,
3163                            "not a version"))
3164         return d
3165
3166
3167     def test_partial_read(self):
3168         d = self.do_upload_mdmf()
3169         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3170         modes = [("start_on_segment_boundary",
3171                   mathutil.next_multiple(128 * 1024, 3), 50),
3172                  ("ending_one_byte_after_segment_boundary",
3173                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3174                  ("zero_length_at_start", 0, 0),
3175                  ("zero_length_in_middle", 50, 0),
3176                  ("zero_length_at_segment_boundary",
3177                   mathutil.next_multiple(128 * 1024, 3), 0),
3178                  ]
3179         for (name, offset, length) in modes:
3180             d.addCallback(self._do_partial_read, name, offset, length)
3181         # then read only a few bytes at a time, and see that the results are
3182         # what we expect.
3183         def _read_data(version):
3184             c = consumer.MemoryConsumer()
3185             d2 = defer.succeed(None)
3186             for i in xrange(0, len(self.data), 10000):
3187                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3188             d2.addCallback(lambda ignored:
3189                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3190             return d2
3191         d.addCallback(_read_data)
3192         return d
3193     def _do_partial_read(self, version, name, offset, length):
3194         c = consumer.MemoryConsumer()
3195         d = version.read(c, offset, length)
3196         expected = self.data[offset:offset+length]
3197         d.addCallback(lambda ignored: "".join(c.chunks))
3198         def _check(results):
3199             if results != expected:
3200                 print
3201                 print "got: %s ... %s" % (results[:20], results[-20:])
3202                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3203                 self.fail("results[%s] != expected" % name)
3204             return version # daisy-chained to next call
3205         d.addCallback(_check)
3206         return d
3207
3208
3209     def _test_read_and_download(self, node, expected):
3210         d = node.get_best_readable_version()
3211         def _read_data(version):
3212             c = consumer.MemoryConsumer()
3213             d2 = defer.succeed(None)
3214             d2.addCallback(lambda ignored: version.read(c))
3215             d2.addCallback(lambda ignored:
3216                 self.failUnlessEqual(expected, "".join(c.chunks)))
3217             return d2
3218         d.addCallback(_read_data)
3219         d.addCallback(lambda ignored: node.download_best_version())
3220         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3221         return d
3222
3223     def test_read_and_download_mdmf(self):
3224         d = self.do_upload_mdmf()
3225         d.addCallback(self._test_read_and_download, self.data)
3226         return d
3227
3228     def test_read_and_download_sdmf(self):
3229         d = self.do_upload_sdmf()
3230         d.addCallback(self._test_read_and_download, self.small_data)
3231         return d
3232
3233     def test_read_and_download_sdmf_zero_length(self):
3234         d = self.do_upload_empty_sdmf()
3235         d.addCallback(self._test_read_and_download, "")
3236         return d
3237
3238
3239 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3240     timeout = 400 # these tests are too big, 120s is not enough on slow
3241                   # platforms
3242     def setUp(self):
3243         GridTestMixin.setUp(self)
3244         self.basedir = self.mktemp()
3245         self.set_up_grid()
3246         self.c = self.g.clients[0]
3247         self.nm = self.c.nodemaker
3248         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3249         self.small_data = "test data" * 10 # about 90 B; SDMF
3250
3251
3252     def do_upload_sdmf(self):
3253         d = self.nm.create_mutable_file(MutableData(self.small_data))
3254         def _then(n):
3255             assert isinstance(n, MutableFileNode)
3256             self.sdmf_node = n
3257             # Make SDMF node that has 255 shares.
3258             self.nm.default_encoding_parameters['n'] = 255
3259             self.nm.default_encoding_parameters['k'] = 127
3260             return self.nm.create_mutable_file(MutableData(self.small_data))
3261         d.addCallback(_then)
3262         def _then2(n):
3263             assert isinstance(n, MutableFileNode)
3264             self.sdmf_max_shares_node = n
3265         d.addCallback(_then2)
3266         return d
3267
3268     def do_upload_mdmf(self):
3269         d = self.nm.create_mutable_file(MutableData(self.data),
3270                                         version=MDMF_VERSION)
3271         def _then(n):
3272             assert isinstance(n, MutableFileNode)
3273             self.mdmf_node = n
3274             # Make MDMF node that has 255 shares.
3275             self.nm.default_encoding_parameters['n'] = 255
3276             self.nm.default_encoding_parameters['k'] = 127
3277             return self.nm.create_mutable_file(MutableData(self.data),
3278                                                version=MDMF_VERSION)
3279         d.addCallback(_then)
3280         def _then2(n):
3281             assert isinstance(n, MutableFileNode)
3282             self.mdmf_max_shares_node = n
3283         d.addCallback(_then2)
3284         return d
3285
3286     def _test_replace(self, offset, new_data):
3287         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3288         d0 = self.do_upload_mdmf()
3289         def _run(ign):
3290             d = defer.succeed(None)
3291             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3292                 d.addCallback(lambda ign: node.get_best_mutable_version())
3293                 d.addCallback(lambda mv:
3294                     mv.update(MutableData(new_data), offset))
3295                 # close around node.
3296                 d.addCallback(lambda ignored, node=node:
3297                     node.download_best_version())
3298                 def _check(results):
3299                     if results != expected:
3300                         print
3301                         print "got: %s ... %s" % (results[:20], results[-20:])
3302                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3303                         self.fail("results != expected")
3304                 d.addCallback(_check)
3305             return d
3306         d0.addCallback(_run)
3307         return d0
3308
3309     def test_append(self):
3310         # We should be able to append data to a mutable file and get
3311         # what we expect.
3312         return self._test_replace(len(self.data), "appended")
3313
3314     def test_replace_middle(self):
3315         # We should be able to replace data in the middle of a mutable
3316         # file and get what we expect back.
3317         return self._test_replace(100, "replaced")
3318
3319     def test_replace_beginning(self):
3320         # We should be able to replace data at the beginning of the file
3321         # without truncating the file
3322         return self._test_replace(0, "beginning")
3323
3324     def test_replace_segstart1(self):
3325         return self._test_replace(128*1024+1, "NNNN")
3326
3327     def test_replace_zero_length_beginning(self):
3328         return self._test_replace(0, "")
3329
3330     def test_replace_zero_length_middle(self):
3331         return self._test_replace(50, "")
3332
3333     def test_replace_zero_length_segstart1(self):
3334         return self._test_replace(128*1024+1, "")
3335
3336     def test_replace_and_extend(self):
3337         # We should be able to replace data in the middle of a mutable
3338         # file and extend that mutable file and get what we expect.
3339         return self._test_replace(100, "modified " * 100000)
3340
3341
3342     def _check_differences(self, got, expected):
3343         # displaying arbitrary file corruption is tricky for a
3344         # 1MB file of repeating data,, so look for likely places
3345         # with problems and display them separately
3346         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3347         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3348         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3349                     for (start,end) in gotmods]
3350         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3351                     for (start,end) in expmods]
3352         #print "expecting: %s" % expspans
3353
3354         SEGSIZE = 128*1024
3355         if got != expected:
3356             print "differences:"
3357             for segnum in range(len(expected)//SEGSIZE):
3358                 start = segnum * SEGSIZE
3359                 end = (segnum+1) * SEGSIZE
3360                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3361                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3362                 if got_ends != exp_ends:
3363                     print "expected[%d]: %s" % (start, exp_ends)
3364                     print "got     [%d]: %s" % (start, got_ends)
3365             if expspans != gotspans:
3366                 print "expected: %s" % expspans
3367                 print "got     : %s" % gotspans
3368             open("EXPECTED","wb").write(expected)
3369             open("GOT","wb").write(got)
3370             print "wrote data to EXPECTED and GOT"
3371             self.fail("didn't get expected data")
3372
3373
3374     def test_replace_locations(self):
3375         # exercise fencepost conditions
3376         SEGSIZE = 128*1024
3377         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3378         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3379         d0 = self.do_upload_mdmf()
3380         def _run(ign):
3381             expected = self.data
3382             d = defer.succeed(None)
3383             for offset in suspects:
3384                 new_data = letters.next()*2 # "AA", then "BB", etc
3385                 expected = expected[:offset]+new_data+expected[offset+2:]
3386                 d.addCallback(lambda ign:
3387                               self.mdmf_node.get_best_mutable_version())
3388                 def _modify(mv, offset=offset, new_data=new_data):
3389                     # close over 'offset','new_data'
3390                     md = MutableData(new_data)
3391                     return mv.update(md, offset)
3392                 d.addCallback(_modify)
3393                 d.addCallback(lambda ignored:
3394                               self.mdmf_node.download_best_version())
3395                 d.addCallback(self._check_differences, expected)
3396             return d
3397         d0.addCallback(_run)
3398         return d0
3399
3400     def test_replace_locations_max_shares(self):
3401         # exercise fencepost conditions
3402         SEGSIZE = 128*1024
3403         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3404         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3405         d0 = self.do_upload_mdmf()
3406         def _run(ign):
3407             expected = self.data
3408             d = defer.succeed(None)
3409             for offset in suspects:
3410                 new_data = letters.next()*2 # "AA", then "BB", etc
3411                 expected = expected[:offset]+new_data+expected[offset+2:]
3412                 d.addCallback(lambda ign:
3413                               self.mdmf_max_shares_node.get_best_mutable_version())
3414                 def _modify(mv, offset=offset, new_data=new_data):
3415                     # close over 'offset','new_data'
3416                     md = MutableData(new_data)
3417                     return mv.update(md, offset)
3418                 d.addCallback(_modify)
3419                 d.addCallback(lambda ignored:
3420                               self.mdmf_max_shares_node.download_best_version())
3421                 d.addCallback(self._check_differences, expected)
3422             return d
3423         d0.addCallback(_run)
3424         return d0
3425
3426
3427     def test_append_power_of_two(self):
3428         # If we attempt to extend a mutable file so that its segment
3429         # count crosses a power-of-two boundary, the update operation
3430         # should know how to reencode the file.
3431
3432         # Note that the data populating self.mdmf_node is about 900 KiB
3433         # long -- this is 7 segments in the default segment size. So we
3434         # need to add 2 segments worth of data to push it over a
3435         # power-of-two boundary.
3436         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3437         new_data = self.data + (segment * 2)
3438         d0 = self.do_upload_mdmf()
3439         def _run(ign):
3440             d = defer.succeed(None)
3441             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3442                 d.addCallback(lambda ign: node.get_best_mutable_version())
3443                 d.addCallback(lambda mv:
3444                     mv.update(MutableData(segment * 2), len(self.data)))
3445                 d.addCallback(lambda ignored, node=node:
3446                     node.download_best_version())
3447                 d.addCallback(lambda results:
3448                     self.failUnlessEqual(results, new_data))
3449             return d
3450         d0.addCallback(_run)
3451         return d0
3452
3453     def test_update_sdmf(self):
3454         # Running update on a single-segment file should still work.
3455         new_data = self.small_data + "appended"
3456         d0 = self.do_upload_sdmf()
3457         def _run(ign):
3458             d = defer.succeed(None)
3459             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3460                 d.addCallback(lambda ign: node.get_best_mutable_version())
3461                 d.addCallback(lambda mv:
3462                     mv.update(MutableData("appended"), len(self.small_data)))
3463                 d.addCallback(lambda ignored, node=node:
3464                     node.download_best_version())
3465                 d.addCallback(lambda results:
3466                     self.failUnlessEqual(results, new_data))
3467             return d
3468         d0.addCallback(_run)
3469         return d0
3470
3471     def test_replace_in_last_segment(self):
3472         # The wrapper should know how to handle the tail segment
3473         # appropriately.
3474         replace_offset = len(self.data) - 100
3475         new_data = self.data[:replace_offset] + "replaced"
3476         rest_offset = replace_offset + len("replaced")
3477         new_data += self.data[rest_offset:]
3478         d0 = self.do_upload_mdmf()
3479         def _run(ign):
3480             d = defer.succeed(None)
3481             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3482                 d.addCallback(lambda ign: node.get_best_mutable_version())
3483                 d.addCallback(lambda mv:
3484                     mv.update(MutableData("replaced"), replace_offset))
3485                 d.addCallback(lambda ignored, node=node:
3486                     node.download_best_version())
3487                 d.addCallback(lambda results:
3488                     self.failUnlessEqual(results, new_data))
3489             return d
3490         d0.addCallback(_run)
3491         return d0
3492
3493     def test_multiple_segment_replace(self):
3494         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3495         new_data = self.data[:replace_offset]
3496         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3497         new_data += 2 * new_segment
3498         new_data += "replaced"
3499         rest_offset = len(new_data)
3500         new_data += self.data[rest_offset:]
3501         d0 = self.do_upload_mdmf()
3502         def _run(ign):
3503             d = defer.succeed(None)
3504             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3505                 d.addCallback(lambda ign: node.get_best_mutable_version())
3506                 d.addCallback(lambda mv:
3507                     mv.update(MutableData((2 * new_segment) + "replaced"),
3508                               replace_offset))
3509                 d.addCallback(lambda ignored, node=node:
3510                     node.download_best_version())
3511                 d.addCallback(lambda results:
3512                     self.failUnlessEqual(results, new_data))
3513             return d
3514         d0.addCallback(_run)
3515         return d0
3516
3517 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3518     sdmf_old_shares = {}
3519     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3520     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3521     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3522     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3523     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3524     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3525     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3526     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3527     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3528     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3529     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3530     sdmf_old_contents = "This is a test file.\n"
3531     def copy_sdmf_shares(self):
3532         # We'll basically be short-circuiting the upload process.
3533         servernums = self.g.servers_by_number.keys()
3534         assert len(servernums) == 10
3535
3536         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3537         # Get the storage index.
3538         cap = uri.from_string(self.sdmf_old_cap)
3539         si = cap.get_storage_index()
3540
3541         # Now execute each assignment by writing the storage.
3542         for (share, servernum) in assignments:
3543             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3544             storedir = self.get_serverdir(servernum)
3545             storage_path = os.path.join(storedir, "shares",
3546                                         storage_index_to_dir(si))
3547             fileutil.make_dirs(storage_path)
3548             fileutil.write(os.path.join(storage_path, "%d" % share),
3549                            sharedata)
3550         # ...and verify that the shares are there.
3551         shares = self.find_uri_shares(self.sdmf_old_cap)
3552         assert len(shares) == 10
3553
3554     def test_new_downloader_can_read_old_shares(self):
3555         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3556         self.set_up_grid()
3557         self.copy_sdmf_shares()
3558         nm = self.g.clients[0].nodemaker
3559         n = nm.create_from_cap(self.sdmf_old_cap)
3560         d = n.download_best_version()
3561         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3562         return d
3563
3564 class DifferentEncoding(unittest.TestCase):
3565     def setUp(self):
3566         self._storage = s = FakeStorage()
3567         self.nodemaker = make_nodemaker(s)
3568
3569     def test_filenode(self):
3570         # create a file with 3-of-20, then modify it with a client configured
3571         # to do 3-of-10. #1510 tracks a failure here
3572         self.nodemaker.default_encoding_parameters["n"] = 20
3573         d = self.nodemaker.create_mutable_file("old contents")
3574         def _created(n):
3575             filecap = n.get_cap().to_string()
3576             del n # we want a new object, not the cached one
3577             self.nodemaker.default_encoding_parameters["n"] = 10
3578             n2 = self.nodemaker.create_from_cap(filecap)
3579             return n2
3580         d.addCallback(_created)
3581         def modifier(old_contents, servermap, first_time):
3582             return "new contents"
3583         d.addCallback(lambda n: n.modify(modifier))
3584         return d