]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
5a1534d64a45bb05bdd0efdceef7ae51a971ee42
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from allmydata import uri, client
7 from allmydata.nodemaker import NodeMaker
8 from allmydata.util import base32, consumer, fileutil, mathutil
9 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
10      ssk_pubkey_fingerprint_hash
11 from allmydata.util.consumer import MemoryConsumer
12 from allmydata.util.deferredutil import gatherResults
13 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
14      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import ShouldFailMixin
17 from allmydata.test.no_network import GridTestMixin
18 from foolscap.api import eventually, fireEventually
19 from foolscap.logging import log
20 from allmydata.storage_client import StorageFarmBroker
21 from allmydata.storage.common import storage_index_to_dir
22 from allmydata.scripts import debug
23
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28      NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31                                       MutableData, \
32                                       DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
36
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39 from allmydata.test.test_download import PausingConsumer, \
40      PausingAndStoppingConsumer, StoppingConsumer, \
41      ImmediatelyStoppingConsumer
42
43
44 # this "FakeStorage" exists to put the share data in RAM and avoid using real
45 # network connections, both to speed up the tests and to reduce the amount of
46 # non-mutable.py code being exercised.
47
48 class FakeStorage:
49     # this class replaces the collection of storage servers, allowing the
50     # tests to examine and manipulate the published shares. It also lets us
51     # control the order in which read queries are answered, to exercise more
52     # of the error-handling code in Retrieve .
53     #
54     # Note that we ignore the storage index: this FakeStorage instance can
55     # only be used for a single storage index.
56
57
58     def __init__(self):
59         self._peers = {}
60         # _sequence is used to cause the responses to occur in a specific
61         # order. If it is in use, then we will defer queries instead of
62         # answering them right away, accumulating the Deferreds in a dict. We
63         # don't know exactly how many queries we'll get, so exactly one
64         # second after the first query arrives, we will release them all (in
65         # order).
66         self._sequence = None
67         self._pending = {}
68         self._pending_timer = None
69
70     def read(self, peerid, storage_index):
71         shares = self._peers.get(peerid, {})
72         if self._sequence is None:
73             return defer.succeed(shares)
74         d = defer.Deferred()
75         if not self._pending:
76             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
77         if peerid not in self._pending:
78             self._pending[peerid] = []
79         self._pending[peerid].append( (d, shares) )
80         return d
81
82     def _fire_readers(self):
83         self._pending_timer = None
84         pending = self._pending
85         self._pending = {}
86         for peerid in self._sequence:
87             if peerid in pending:
88                 for (d, shares) in pending.pop(peerid):
89                     eventually(d.callback, shares)
90         for peerid in pending:
91             for (d, shares) in pending[peerid]:
92                 eventually(d.callback, shares)
93
94     def write(self, peerid, storage_index, shnum, offset, data):
95         if peerid not in self._peers:
96             self._peers[peerid] = {}
97         shares = self._peers[peerid]
98         f = StringIO()
99         f.write(shares.get(shnum, ""))
100         f.seek(offset)
101         f.write(data)
102         shares[shnum] = f.getvalue()
103
104
105 class FakeStorageServer:
106     def __init__(self, peerid, storage):
107         self.peerid = peerid
108         self.storage = storage
109         self.queries = 0
110     def callRemote(self, methname, *args, **kwargs):
111         self.queries += 1
112         def _call():
113             meth = getattr(self, methname)
114             return meth(*args, **kwargs)
115         d = fireEventually()
116         d.addCallback(lambda res: _call())
117         return d
118
119     def callRemoteOnly(self, methname, *args, **kwargs):
120         self.queries += 1
121         d = self.callRemote(methname, *args, **kwargs)
122         d.addBoth(lambda ignore: None)
123         pass
124
125     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
126         pass
127
128     def slot_readv(self, storage_index, shnums, readv):
129         d = self.storage.read(self.peerid, storage_index)
130         def _read(shares):
131             response = {}
132             for shnum in shares:
133                 if shnums and shnum not in shnums:
134                     continue
135                 vector = response[shnum] = []
136                 for (offset, length) in readv:
137                     assert isinstance(offset, (int, long)), offset
138                     assert isinstance(length, (int, long)), length
139                     vector.append(shares[shnum][offset:offset+length])
140             return response
141         d.addCallback(_read)
142         return d
143
144     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
145                                         tw_vectors, read_vector):
146         # always-pass: parrot the test vectors back to them.
147         readv = {}
148         for shnum, (testv, writev, new_length) in tw_vectors.items():
149             for (offset, length, op, specimen) in testv:
150                 assert op in ("le", "eq", "ge")
151             # TODO: this isn't right, the read is controlled by read_vector,
152             # not by testv
153             readv[shnum] = [ specimen
154                              for (offset, length, op, specimen)
155                              in testv ]
156             for (offset, data) in writev:
157                 self.storage.write(self.peerid, storage_index, shnum,
158                                    offset, data)
159         answer = (True, readv)
160         return fireEventually(answer)
161
162
163 def flip_bit(original, byte_offset):
164     return (original[:byte_offset] +
165             chr(ord(original[byte_offset]) ^ 0x01) +
166             original[byte_offset+1:])
167
168 def add_two(original, byte_offset):
169     # It isn't enough to simply flip the bit for the version number,
170     # because 1 is a valid version number. So we add two instead.
171     return (original[:byte_offset] +
172             chr(ord(original[byte_offset]) ^ 0x02) +
173             original[byte_offset+1:])
174
175 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
176     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
177     # list of shnums to corrupt.
178     ds = []
179     for peerid in s._peers:
180         shares = s._peers[peerid]
181         for shnum in shares:
182             if (shnums_to_corrupt is not None
183                 and shnum not in shnums_to_corrupt):
184                 continue
185             data = shares[shnum]
186             # We're feeding the reader all of the share data, so it
187             # won't need to use the rref that we didn't provide, nor the
188             # storage index that we didn't provide. We do this because
189             # the reader will work for both MDMF and SDMF.
190             reader = MDMFSlotReadProxy(None, None, shnum, data)
191             # We need to get the offsets for the next part.
192             d = reader.get_verinfo()
193             def _do_corruption(verinfo, data, shnum):
194                 (seqnum,
195                  root_hash,
196                  IV,
197                  segsize,
198                  datalen,
199                  k, n, prefix, o) = verinfo
200                 if isinstance(offset, tuple):
201                     offset1, offset2 = offset
202                 else:
203                     offset1 = offset
204                     offset2 = 0
205                 if offset1 == "pubkey" and IV:
206                     real_offset = 107
207                 elif offset1 in o:
208                     real_offset = o[offset1]
209                 else:
210                     real_offset = offset1
211                 real_offset = int(real_offset) + offset2 + offset_offset
212                 assert isinstance(real_offset, int), offset
213                 if offset1 == 0: # verbyte
214                     f = add_two
215                 else:
216                     f = flip_bit
217                 shares[shnum] = f(data, real_offset)
218             d.addCallback(_do_corruption, data, shnum)
219             ds.append(d)
220     dl = defer.DeferredList(ds)
221     dl.addCallback(lambda ignored: res)
222     return dl
223
224 def make_storagebroker(s=None, num_peers=10):
225     if not s:
226         s = FakeStorage()
227     peerids = [tagged_hash("peerid", "%d" % i)[:20]
228                for i in range(num_peers)]
229     storage_broker = StorageFarmBroker(None, True)
230     for peerid in peerids:
231         fss = FakeStorageServer(peerid, s)
232         storage_broker.test_add_rref(peerid, fss)
233     return storage_broker
234
235 def make_nodemaker(s=None, num_peers=10):
236     storage_broker = make_storagebroker(s, num_peers)
237     sh = client.SecretHolder("lease secret", "convergence secret")
238     keygen = client.KeyGenerator()
239     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
240     nodemaker = NodeMaker(storage_broker, sh, None,
241                           None, None,
242                           {"k": 3, "n": 10}, keygen)
243     return nodemaker
244
245 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
246     # this used to be in Publish, but we removed the limit. Some of
247     # these tests test whether the new code correctly allows files
248     # larger than the limit.
249     OLD_MAX_SEGMENT_SIZE = 3500000
250     def setUp(self):
251         self._storage = s = FakeStorage()
252         self.nodemaker = make_nodemaker(s)
253
254     def test_create(self):
255         d = self.nodemaker.create_mutable_file()
256         def _created(n):
257             self.failUnless(isinstance(n, MutableFileNode))
258             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
259             sb = self.nodemaker.storage_broker
260             peer0 = sorted(sb.get_all_serverids())[0]
261             shnums = self._storage._peers[peer0].keys()
262             self.failUnlessEqual(len(shnums), 1)
263         d.addCallback(_created)
264         return d
265
266
267     def test_create_mdmf(self):
268         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
269         def _created(n):
270             self.failUnless(isinstance(n, MutableFileNode))
271             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
272             sb = self.nodemaker.storage_broker
273             peer0 = sorted(sb.get_all_serverids())[0]
274             shnums = self._storage._peers[peer0].keys()
275             self.failUnlessEqual(len(shnums), 1)
276         d.addCallback(_created)
277         return d
278
279     def test_single_share(self):
280         # Make sure that we tolerate publishing a single share.
281         self.nodemaker.default_encoding_parameters['k'] = 1
282         self.nodemaker.default_encoding_parameters['happy'] = 1
283         self.nodemaker.default_encoding_parameters['n'] = 1
284         d = defer.succeed(None)
285         for v in (SDMF_VERSION, MDMF_VERSION):
286             d.addCallback(lambda ignored:
287                 self.nodemaker.create_mutable_file(version=v))
288             def _created(n):
289                 self.failUnless(isinstance(n, MutableFileNode))
290                 self._node = n
291                 return n
292             d.addCallback(_created)
293             d.addCallback(lambda n:
294                 n.overwrite(MutableData("Contents" * 50000)))
295             d.addCallback(lambda ignored:
296                 self._node.download_best_version())
297             d.addCallback(lambda contents:
298                 self.failUnlessEqual(contents, "Contents" * 50000))
299         return d
300
301     def test_max_shares(self):
302         self.nodemaker.default_encoding_parameters['n'] = 255
303         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
304         def _created(n):
305             self.failUnless(isinstance(n, MutableFileNode))
306             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
307             sb = self.nodemaker.storage_broker
308             num_shares = sum([len(self._storage._peers[x].keys()) for x \
309                               in sb.get_all_serverids()])
310             self.failUnlessEqual(num_shares, 255)
311             self._node = n
312             return n
313         d.addCallback(_created)
314         # Now we upload some contents
315         d.addCallback(lambda n:
316             n.overwrite(MutableData("contents" * 50000)))
317         # ...then download contents
318         d.addCallback(lambda ignored:
319             self._node.download_best_version())
320         # ...and check to make sure everything went okay.
321         d.addCallback(lambda contents:
322             self.failUnlessEqual("contents" * 50000, contents))
323         return d
324
325     def test_max_shares_mdmf(self):
326         # Test how files behave when there are 255 shares.
327         self.nodemaker.default_encoding_parameters['n'] = 255
328         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
329         def _created(n):
330             self.failUnless(isinstance(n, MutableFileNode))
331             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
332             sb = self.nodemaker.storage_broker
333             num_shares = sum([len(self._storage._peers[x].keys()) for x \
334                               in sb.get_all_serverids()])
335             self.failUnlessEqual(num_shares, 255)
336             self._node = n
337             return n
338         d.addCallback(_created)
339         d.addCallback(lambda n:
340             n.overwrite(MutableData("contents" * 50000)))
341         d.addCallback(lambda ignored:
342             self._node.download_best_version())
343         d.addCallback(lambda contents:
344             self.failUnlessEqual(contents, "contents" * 50000))
345         return d
346
347     def test_mdmf_filenode_cap(self):
348         # Test that an MDMF filenode, once created, returns an MDMF URI.
349         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
350         def _created(n):
351             self.failUnless(isinstance(n, MutableFileNode))
352             cap = n.get_cap()
353             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
354             rcap = n.get_readcap()
355             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
356             vcap = n.get_verify_cap()
357             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
358         d.addCallback(_created)
359         return d
360
361
362     def test_create_from_mdmf_writecap(self):
363         # Test that the nodemaker is capable of creating an MDMF
364         # filenode given an MDMF cap.
365         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
366         def _created(n):
367             self.failUnless(isinstance(n, MutableFileNode))
368             s = n.get_uri()
369             self.failUnless(s.startswith("URI:MDMF"))
370             n2 = self.nodemaker.create_from_cap(s)
371             self.failUnless(isinstance(n2, MutableFileNode))
372             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
373             self.failUnlessEqual(n.get_uri(), n2.get_uri())
374         d.addCallback(_created)
375         return d
376
377
378     def test_create_from_mdmf_writecap_with_extensions(self):
379         # Test that the nodemaker is capable of creating an MDMF
380         # filenode when given a writecap with extension parameters in
381         # them.
382         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
383         def _created(n):
384             self.failUnless(isinstance(n, MutableFileNode))
385             s = n.get_uri()
386             # We need to cheat a little and delete the nodemaker's
387             # cache, otherwise we'll get the same node instance back.
388             self.failUnlessIn(":3:131073", s)
389             n2 = self.nodemaker.create_from_cap(s)
390
391             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
392             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
393             hints = n2._downloader_hints
394             self.failUnlessEqual(hints['k'], 3)
395             self.failUnlessEqual(hints['segsize'], 131073)
396         d.addCallback(_created)
397         return d
398
399
400     def test_create_from_mdmf_readcap(self):
401         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
402         def _created(n):
403             self.failUnless(isinstance(n, MutableFileNode))
404             s = n.get_readonly_uri()
405             n2 = self.nodemaker.create_from_cap(s)
406             self.failUnless(isinstance(n2, MutableFileNode))
407
408             # Check that it's a readonly node
409             self.failUnless(n2.is_readonly())
410         d.addCallback(_created)
411         return d
412
413
414     def test_create_from_mdmf_readcap_with_extensions(self):
415         # We should be able to create an MDMF filenode with the
416         # extension parameters without it breaking.
417         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
418         def _created(n):
419             self.failUnless(isinstance(n, MutableFileNode))
420             s = n.get_readonly_uri()
421             self.failUnlessIn(":3:131073", s)
422
423             n2 = self.nodemaker.create_from_cap(s)
424             self.failUnless(isinstance(n2, MutableFileNode))
425             self.failUnless(n2.is_readonly())
426             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
427             hints = n2._downloader_hints
428             self.failUnlessEqual(hints["k"], 3)
429             self.failUnlessEqual(hints["segsize"], 131073)
430         d.addCallback(_created)
431         return d
432
433
434     def test_internal_version_from_cap(self):
435         # MutableFileNodes and MutableFileVersions have an internal
436         # switch that tells them whether they're dealing with an SDMF or
437         # MDMF mutable file when they start doing stuff. We want to make
438         # sure that this is set appropriately given an MDMF cap.
439         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
440         def _created(n):
441             self.uri = n.get_uri()
442             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
443
444             n2 = self.nodemaker.create_from_cap(self.uri)
445             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
446         d.addCallback(_created)
447         return d
448
449
450     def test_serialize(self):
451         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
452         calls = []
453         def _callback(*args, **kwargs):
454             self.failUnlessEqual(args, (4,) )
455             self.failUnlessEqual(kwargs, {"foo": 5})
456             calls.append(1)
457             return 6
458         d = n._do_serialized(_callback, 4, foo=5)
459         def _check_callback(res):
460             self.failUnlessEqual(res, 6)
461             self.failUnlessEqual(calls, [1])
462         d.addCallback(_check_callback)
463
464         def _errback():
465             raise ValueError("heya")
466         d.addCallback(lambda res:
467                       self.shouldFail(ValueError, "_check_errback", "heya",
468                                       n._do_serialized, _errback))
469         return d
470
471     def test_upload_and_download(self):
472         d = self.nodemaker.create_mutable_file()
473         def _created(n):
474             d = defer.succeed(None)
475             d.addCallback(lambda res: n.get_servermap(MODE_READ))
476             d.addCallback(lambda smap: smap.dump(StringIO()))
477             d.addCallback(lambda sio:
478                           self.failUnless("3-of-10" in sio.getvalue()))
479             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
480             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
481             d.addCallback(lambda res: n.download_best_version())
482             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
483             d.addCallback(lambda res: n.get_size_of_best_version())
484             d.addCallback(lambda size:
485                           self.failUnlessEqual(size, len("contents 1")))
486             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
487             d.addCallback(lambda res: n.download_best_version())
488             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
489             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
490             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
491             d.addCallback(lambda res: n.download_best_version())
492             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
494             d.addCallback(lambda smap:
495                           n.download_version(smap,
496                                              smap.best_recoverable_version()))
497             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
498             # test a file that is large enough to overcome the
499             # mapupdate-to-retrieve data caching (i.e. make the shares larger
500             # than the default readsize, which is 2000 bytes). A 15kB file
501             # will have 5kB shares.
502             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
503             d.addCallback(lambda res: n.download_best_version())
504             d.addCallback(lambda res:
505                           self.failUnlessEqual(res, "large size file" * 1000))
506             return d
507         d.addCallback(_created)
508         return d
509
510
511     def test_upload_and_download_mdmf(self):
512         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
513         def _created(n):
514             d = defer.succeed(None)
515             d.addCallback(lambda ignored:
516                 n.get_servermap(MODE_READ))
517             def _then(servermap):
518                 dumped = servermap.dump(StringIO())
519                 self.failUnlessIn("3-of-10", dumped.getvalue())
520             d.addCallback(_then)
521             # Now overwrite the contents with some new contents. We want 
522             # to make them big enough to force the file to be uploaded
523             # in more than one segment.
524             big_contents = "contents1" * 100000 # about 900 KiB
525             big_contents_uploadable = MutableData(big_contents)
526             d.addCallback(lambda ignored:
527                 n.overwrite(big_contents_uploadable))
528             d.addCallback(lambda ignored:
529                 n.download_best_version())
530             d.addCallback(lambda data:
531                 self.failUnlessEqual(data, big_contents))
532             # Overwrite the contents again with some new contents. As
533             # before, they need to be big enough to force multiple
534             # segments, so that we make the downloader deal with
535             # multiple segments.
536             bigger_contents = "contents2" * 1000000 # about 9MiB 
537             bigger_contents_uploadable = MutableData(bigger_contents)
538             d.addCallback(lambda ignored:
539                 n.overwrite(bigger_contents_uploadable))
540             d.addCallback(lambda ignored:
541                 n.download_best_version())
542             d.addCallback(lambda data:
543                 self.failUnlessEqual(data, bigger_contents))
544             return d
545         d.addCallback(_created)
546         return d
547
548
549     def test_retrieve_producer_mdmf(self):
550         # We should make sure that the retriever is able to pause and stop
551         # correctly.
552         data = "contents1" * 100000
553         d = self.nodemaker.create_mutable_file(MutableData(data),
554                                                version=MDMF_VERSION)
555         d.addCallback(lambda node: node.get_best_mutable_version())
556         d.addCallback(self._test_retrieve_producer, "MDMF", data)
557         return d
558
559     # note: SDMF has only one big segment, so we can't use the usual
560     # after-the-first-write() trick to pause or stop the download.
561     # Disabled until we find a better approach.
562     def OFF_test_retrieve_producer_sdmf(self):
563         data = "contents1" * 100000
564         d = self.nodemaker.create_mutable_file(MutableData(data),
565                                                version=SDMF_VERSION)
566         d.addCallback(lambda node: node.get_best_mutable_version())
567         d.addCallback(self._test_retrieve_producer, "SDMF", data)
568         return d
569
570     def _test_retrieve_producer(self, version, kind, data):
571         # Now we'll retrieve it into a pausing consumer.
572         c = PausingConsumer()
573         d = version.read(c)
574         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
575
576         c2 = PausingAndStoppingConsumer()
577         d.addCallback(lambda ign:
578                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
579                                       "our Consumer called stopProducing()",
580                                       version.read, c2))
581
582         c3 = StoppingConsumer()
583         d.addCallback(lambda ign:
584                       self.shouldFail(DownloadStopped, kind+"_stop",
585                                       "our Consumer called stopProducing()",
586                                       version.read, c3))
587
588         c4 = ImmediatelyStoppingConsumer()
589         d.addCallback(lambda ign:
590                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
591                                       "our Consumer called stopProducing()",
592                                       version.read, c4))
593
594         def _then(ign):
595             c5 = MemoryConsumer()
596             d1 = version.read(c5)
597             c5.producer.stopProducing()
598             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
599                                    "our Consumer called stopProducing()",
600                                    lambda: d1)
601         d.addCallback(_then)
602         return d
603
604     def test_download_from_mdmf_cap(self):
605         # We should be able to download an MDMF file given its cap
606         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
607         def _created(node):
608             self.uri = node.get_uri()
609
610             return node.overwrite(MutableData("contents1" * 100000))
611         def _then(ignored):
612             node = self.nodemaker.create_from_cap(self.uri)
613             return node.download_best_version()
614         def _downloaded(data):
615             self.failUnlessEqual(data, "contents1" * 100000)
616         d.addCallback(_created)
617         d.addCallback(_then)
618         d.addCallback(_downloaded)
619         return d
620
621
622     def test_create_and_download_from_bare_mdmf_cap(self):
623         # MDMF caps have extension parameters on them by default. We
624         # need to make sure that they work without extension parameters.
625         contents = MutableData("contents" * 100000)
626         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
627                                                contents=contents)
628         def _created(node):
629             uri = node.get_uri()
630             self._created = node
631             self.failUnlessIn(":3:131073", uri)
632             # Now strip that off the end of the uri, then try creating
633             # and downloading the node again.
634             bare_uri = uri.replace(":3:131073", "")
635             assert ":3:131073" not in bare_uri
636
637             return self.nodemaker.create_from_cap(bare_uri)
638         d.addCallback(_created)
639         def _created_bare(node):
640             self.failUnlessEqual(node.get_writekey(),
641                                  self._created.get_writekey())
642             self.failUnlessEqual(node.get_readkey(),
643                                  self._created.get_readkey())
644             self.failUnlessEqual(node.get_storage_index(),
645                                  self._created.get_storage_index())
646             return node.download_best_version()
647         d.addCallback(_created_bare)
648         d.addCallback(lambda data:
649             self.failUnlessEqual(data, "contents" * 100000))
650         return d
651
652
653     def test_mdmf_write_count(self):
654         # Publishing an MDMF file should only cause one write for each
655         # share that is to be published. Otherwise, we introduce
656         # undesirable semantics that are a regression from SDMF
657         upload = MutableData("MDMF" * 100000) # about 400 KiB
658         d = self.nodemaker.create_mutable_file(upload,
659                                                version=MDMF_VERSION)
660         def _check_server_write_counts(ignored):
661             sb = self.nodemaker.storage_broker
662             for server in sb.servers.itervalues():
663                 self.failUnlessEqual(server.get_rref().queries, 1)
664         d.addCallback(_check_server_write_counts)
665         return d
666
667
668     def test_create_with_initial_contents(self):
669         upload1 = MutableData("contents 1")
670         d = self.nodemaker.create_mutable_file(upload1)
671         def _created(n):
672             d = n.download_best_version()
673             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
674             upload2 = MutableData("contents 2")
675             d.addCallback(lambda res: n.overwrite(upload2))
676             d.addCallback(lambda res: n.download_best_version())
677             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
678             return d
679         d.addCallback(_created)
680         return d
681
682
683     def test_create_mdmf_with_initial_contents(self):
684         initial_contents = "foobarbaz" * 131072 # 900KiB
685         initial_contents_uploadable = MutableData(initial_contents)
686         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
687                                                version=MDMF_VERSION)
688         def _created(n):
689             d = n.download_best_version()
690             d.addCallback(lambda data:
691                 self.failUnlessEqual(data, initial_contents))
692             uploadable2 = MutableData(initial_contents + "foobarbaz")
693             d.addCallback(lambda ignored:
694                 n.overwrite(uploadable2))
695             d.addCallback(lambda ignored:
696                 n.download_best_version())
697             d.addCallback(lambda data:
698                 self.failUnlessEqual(data, initial_contents +
699                                            "foobarbaz"))
700             return d
701         d.addCallback(_created)
702         return d
703
704
705     def test_response_cache_memory_leak(self):
706         d = self.nodemaker.create_mutable_file("contents")
707         def _created(n):
708             d = n.download_best_version()
709             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
710             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
711
712             def _check_cache(expected):
713                 # The total size of cache entries should not increase on the second download;
714                 # in fact the cache contents should be identical.
715                 d2 = n.download_best_version()
716                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
717                 return d2
718             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
719             return d
720         d.addCallback(_created)
721         return d
722
723     def test_create_with_initial_contents_function(self):
724         data = "initial contents"
725         def _make_contents(n):
726             self.failUnless(isinstance(n, MutableFileNode))
727             key = n.get_writekey()
728             self.failUnless(isinstance(key, str), key)
729             self.failUnlessEqual(len(key), 16) # AES key size
730             return MutableData(data)
731         d = self.nodemaker.create_mutable_file(_make_contents)
732         def _created(n):
733             return n.download_best_version()
734         d.addCallback(_created)
735         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
736         return d
737
738
739     def test_create_mdmf_with_initial_contents_function(self):
740         data = "initial contents" * 100000
741         def _make_contents(n):
742             self.failUnless(isinstance(n, MutableFileNode))
743             key = n.get_writekey()
744             self.failUnless(isinstance(key, str), key)
745             self.failUnlessEqual(len(key), 16)
746             return MutableData(data)
747         d = self.nodemaker.create_mutable_file(_make_contents,
748                                                version=MDMF_VERSION)
749         d.addCallback(lambda n:
750             n.download_best_version())
751         d.addCallback(lambda data2:
752             self.failUnlessEqual(data2, data))
753         return d
754
755
756     def test_create_with_too_large_contents(self):
757         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
758         BIG_uploadable = MutableData(BIG)
759         d = self.nodemaker.create_mutable_file(BIG_uploadable)
760         def _created(n):
761             other_BIG_uploadable = MutableData(BIG)
762             d = n.overwrite(other_BIG_uploadable)
763             return d
764         d.addCallback(_created)
765         return d
766
767     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
768         d = n.get_servermap(MODE_READ)
769         d.addCallback(lambda servermap: servermap.best_recoverable_version())
770         d.addCallback(lambda verinfo:
771                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
772         return d
773
774     def test_modify(self):
775         def _modifier(old_contents, servermap, first_time):
776             new_contents = old_contents + "line2"
777             return new_contents
778         def _non_modifier(old_contents, servermap, first_time):
779             return old_contents
780         def _none_modifier(old_contents, servermap, first_time):
781             return None
782         def _error_modifier(old_contents, servermap, first_time):
783             raise ValueError("oops")
784         def _toobig_modifier(old_contents, servermap, first_time):
785             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
786             return new_content
787         calls = []
788         def _ucw_error_modifier(old_contents, servermap, first_time):
789             # simulate an UncoordinatedWriteError once
790             calls.append(1)
791             if len(calls) <= 1:
792                 raise UncoordinatedWriteError("simulated")
793             new_contents = old_contents + "line3"
794             return new_contents
795         def _ucw_error_non_modifier(old_contents, servermap, first_time):
796             # simulate an UncoordinatedWriteError once, and don't actually
797             # modify the contents on subsequent invocations
798             calls.append(1)
799             if len(calls) <= 1:
800                 raise UncoordinatedWriteError("simulated")
801             return old_contents
802
803         initial_contents = "line1"
804         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
805         def _created(n):
806             d = n.modify(_modifier)
807             d.addCallback(lambda res: n.download_best_version())
808             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
809             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
810
811             d.addCallback(lambda res: n.modify(_non_modifier))
812             d.addCallback(lambda res: n.download_best_version())
813             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
814             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
815
816             d.addCallback(lambda res: n.modify(_none_modifier))
817             d.addCallback(lambda res: n.download_best_version())
818             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
819             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
820
821             d.addCallback(lambda res:
822                           self.shouldFail(ValueError, "error_modifier", None,
823                                           n.modify, _error_modifier))
824             d.addCallback(lambda res: n.download_best_version())
825             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
826             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
827
828
829             d.addCallback(lambda res: n.download_best_version())
830             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
831             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
832
833             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
834             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
835             d.addCallback(lambda res: n.download_best_version())
836             d.addCallback(lambda res: self.failUnlessEqual(res,
837                                                            "line1line2line3"))
838             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
839
840             def _reset_ucw_error_modifier(res):
841                 calls[:] = []
842                 return res
843             d.addCallback(_reset_ucw_error_modifier)
844
845             # in practice, this n.modify call should publish twice: the first
846             # one gets a UCWE, the second does not. But our test jig (in
847             # which the modifier raises the UCWE) skips over the first one,
848             # so in this test there will be only one publish, and the seqnum
849             # will only be one larger than the previous test, not two (i.e. 4
850             # instead of 5).
851             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
852             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
853             d.addCallback(lambda res: n.download_best_version())
854             d.addCallback(lambda res: self.failUnlessEqual(res,
855                                                            "line1line2line3"))
856             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
857             d.addCallback(lambda res: n.modify(_toobig_modifier))
858             return d
859         d.addCallback(_created)
860         return d
861
862
863     def test_modify_backoffer(self):
864         def _modifier(old_contents, servermap, first_time):
865             return old_contents + "line2"
866         calls = []
867         def _ucw_error_modifier(old_contents, servermap, first_time):
868             # simulate an UncoordinatedWriteError once
869             calls.append(1)
870             if len(calls) <= 1:
871                 raise UncoordinatedWriteError("simulated")
872             return old_contents + "line3"
873         def _always_ucw_error_modifier(old_contents, servermap, first_time):
874             raise UncoordinatedWriteError("simulated")
875         def _backoff_stopper(node, f):
876             return f
877         def _backoff_pauser(node, f):
878             d = defer.Deferred()
879             reactor.callLater(0.5, d.callback, None)
880             return d
881
882         # the give-up-er will hit its maximum retry count quickly
883         giveuper = BackoffAgent()
884         giveuper._delay = 0.1
885         giveuper.factor = 1
886
887         d = self.nodemaker.create_mutable_file(MutableData("line1"))
888         def _created(n):
889             d = n.modify(_modifier)
890             d.addCallback(lambda res: n.download_best_version())
891             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
892             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
893
894             d.addCallback(lambda res:
895                           self.shouldFail(UncoordinatedWriteError,
896                                           "_backoff_stopper", None,
897                                           n.modify, _ucw_error_modifier,
898                                           _backoff_stopper))
899             d.addCallback(lambda res: n.download_best_version())
900             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
901             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
902
903             def _reset_ucw_error_modifier(res):
904                 calls[:] = []
905                 return res
906             d.addCallback(_reset_ucw_error_modifier)
907             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
908                                                _backoff_pauser))
909             d.addCallback(lambda res: n.download_best_version())
910             d.addCallback(lambda res: self.failUnlessEqual(res,
911                                                            "line1line2line3"))
912             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
913
914             d.addCallback(lambda res:
915                           self.shouldFail(UncoordinatedWriteError,
916                                           "giveuper", None,
917                                           n.modify, _always_ucw_error_modifier,
918                                           giveuper.delay))
919             d.addCallback(lambda res: n.download_best_version())
920             d.addCallback(lambda res: self.failUnlessEqual(res,
921                                                            "line1line2line3"))
922             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
923
924             return d
925         d.addCallback(_created)
926         return d
927
928     def test_upload_and_download_full_size_keys(self):
929         self.nodemaker.key_generator = client.KeyGenerator()
930         d = self.nodemaker.create_mutable_file()
931         def _created(n):
932             d = defer.succeed(None)
933             d.addCallback(lambda res: n.get_servermap(MODE_READ))
934             d.addCallback(lambda smap: smap.dump(StringIO()))
935             d.addCallback(lambda sio:
936                           self.failUnless("3-of-10" in sio.getvalue()))
937             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
938             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
939             d.addCallback(lambda res: n.download_best_version())
940             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
941             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
942             d.addCallback(lambda res: n.download_best_version())
943             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
944             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
945             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
946             d.addCallback(lambda res: n.download_best_version())
947             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
948             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
949             d.addCallback(lambda smap:
950                           n.download_version(smap,
951                                              smap.best_recoverable_version()))
952             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
953             return d
954         d.addCallback(_created)
955         return d
956
957
958     def test_size_after_servermap_update(self):
959         # a mutable file node should have something to say about how big
960         # it is after a servermap update is performed, since this tells
961         # us how large the best version of that mutable file is.
962         d = self.nodemaker.create_mutable_file()
963         def _created(n):
964             self.n = n
965             return n.get_servermap(MODE_READ)
966         d.addCallback(_created)
967         d.addCallback(lambda ignored:
968             self.failUnlessEqual(self.n.get_size(), 0))
969         d.addCallback(lambda ignored:
970             self.n.overwrite(MutableData("foobarbaz")))
971         d.addCallback(lambda ignored:
972             self.failUnlessEqual(self.n.get_size(), 9))
973         d.addCallback(lambda ignored:
974             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
975         d.addCallback(_created)
976         d.addCallback(lambda ignored:
977             self.failUnlessEqual(self.n.get_size(), 9))
978         return d
979
980
981 class PublishMixin:
982     def publish_one(self):
983         # publish a file and create shares, which can then be manipulated
984         # later.
985         self.CONTENTS = "New contents go here" * 1000
986         self.uploadable = MutableData(self.CONTENTS)
987         self._storage = FakeStorage()
988         self._nodemaker = make_nodemaker(self._storage)
989         self._storage_broker = self._nodemaker.storage_broker
990         d = self._nodemaker.create_mutable_file(self.uploadable)
991         def _created(node):
992             self._fn = node
993             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
994         d.addCallback(_created)
995         return d
996
997     def publish_mdmf(self):
998         # like publish_one, except that the result is guaranteed to be
999         # an MDMF file.
1000         # self.CONTENTS should have more than one segment.
1001         self.CONTENTS = "This is an MDMF file" * 100000
1002         self.uploadable = MutableData(self.CONTENTS)
1003         self._storage = FakeStorage()
1004         self._nodemaker = make_nodemaker(self._storage)
1005         self._storage_broker = self._nodemaker.storage_broker
1006         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
1007         def _created(node):
1008             self._fn = node
1009             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1010         d.addCallback(_created)
1011         return d
1012
1013
1014     def publish_sdmf(self):
1015         # like publish_one, except that the result is guaranteed to be
1016         # an SDMF file
1017         self.CONTENTS = "This is an SDMF file" * 1000
1018         self.uploadable = MutableData(self.CONTENTS)
1019         self._storage = FakeStorage()
1020         self._nodemaker = make_nodemaker(self._storage)
1021         self._storage_broker = self._nodemaker.storage_broker
1022         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
1023         def _created(node):
1024             self._fn = node
1025             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1026         d.addCallback(_created)
1027         return d
1028
1029
1030     def publish_multiple(self, version=0):
1031         self.CONTENTS = ["Contents 0",
1032                          "Contents 1",
1033                          "Contents 2",
1034                          "Contents 3a",
1035                          "Contents 3b"]
1036         self.uploadables = [MutableData(d) for d in self.CONTENTS]
1037         self._copied_shares = {}
1038         self._storage = FakeStorage()
1039         self._nodemaker = make_nodemaker(self._storage)
1040         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1041         def _created(node):
1042             self._fn = node
1043             # now create multiple versions of the same file, and accumulate
1044             # their shares, so we can mix and match them later.
1045             d = defer.succeed(None)
1046             d.addCallback(self._copy_shares, 0)
1047             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1048             d.addCallback(self._copy_shares, 1)
1049             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1050             d.addCallback(self._copy_shares, 2)
1051             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1052             d.addCallback(self._copy_shares, 3)
1053             # now we replace all the shares with version s3, and upload a new
1054             # version to get s4b.
1055             rollback = dict([(i,2) for i in range(10)])
1056             d.addCallback(lambda res: self._set_versions(rollback))
1057             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1058             d.addCallback(self._copy_shares, 4)
1059             # we leave the storage in state 4
1060             return d
1061         d.addCallback(_created)
1062         return d
1063
1064
1065     def _copy_shares(self, ignored, index):
1066         shares = self._storage._peers
1067         # we need a deep copy
1068         new_shares = {}
1069         for peerid in shares:
1070             new_shares[peerid] = {}
1071             for shnum in shares[peerid]:
1072                 new_shares[peerid][shnum] = shares[peerid][shnum]
1073         self._copied_shares[index] = new_shares
1074
1075     def _set_versions(self, versionmap):
1076         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1077         # share to be at. Any shnum which is left out of the map will stay at
1078         # its current version.
1079         shares = self._storage._peers
1080         oldshares = self._copied_shares
1081         for peerid in shares:
1082             for shnum in shares[peerid]:
1083                 if shnum in versionmap:
1084                     index = versionmap[shnum]
1085                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1086
1087 class Servermap(unittest.TestCase, PublishMixin):
1088     def setUp(self):
1089         return self.publish_one()
1090
1091     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1092                        update_range=None):
1093         if fn is None:
1094             fn = self._fn
1095         if sb is None:
1096             sb = self._storage_broker
1097         smu = ServermapUpdater(fn, sb, Monitor(),
1098                                ServerMap(), mode, update_range=update_range)
1099         d = smu.update()
1100         return d
1101
1102     def update_servermap(self, oldmap, mode=MODE_CHECK):
1103         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1104                                oldmap, mode)
1105         d = smu.update()
1106         return d
1107
1108     def failUnlessOneRecoverable(self, sm, num_shares):
1109         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1110         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1111         best = sm.best_recoverable_version()
1112         self.failIfEqual(best, None)
1113         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1114         self.failUnlessEqual(len(sm.shares_available()), 1)
1115         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1116         shnum, peerids = sm.make_sharemap().items()[0]
1117         peerid = list(peerids)[0]
1118         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1119         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1120         return sm
1121
1122     def test_basic(self):
1123         d = defer.succeed(None)
1124         ms = self.make_servermap
1125         us = self.update_servermap
1126
1127         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1128         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1129         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1130         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1131         d.addCallback(lambda res: ms(mode=MODE_READ))
1132         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1133         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1134         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1135         # this mode stops at 'k' shares
1136         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1137
1138         # and can we re-use the same servermap? Note that these are sorted in
1139         # increasing order of number of servers queried, since once a server
1140         # gets into the servermap, we'll always ask it for an update.
1141         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1142         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1143         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1144         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1145         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1146         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1147         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1148         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1149         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1150
1151         return d
1152
1153     def test_fetch_privkey(self):
1154         d = defer.succeed(None)
1155         # use the sibling filenode (which hasn't been used yet), and make
1156         # sure it can fetch the privkey. The file is small, so the privkey
1157         # will be fetched on the first (query) pass.
1158         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1159         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1160
1161         # create a new file, which is large enough to knock the privkey out
1162         # of the early part of the file
1163         LARGE = "These are Larger contents" * 200 # about 5KB
1164         LARGE_uploadable = MutableData(LARGE)
1165         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1166         def _created(large_fn):
1167             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1168             return self.make_servermap(MODE_WRITE, large_fn2)
1169         d.addCallback(_created)
1170         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1171         return d
1172
1173
1174     def test_mark_bad(self):
1175         d = defer.succeed(None)
1176         ms = self.make_servermap
1177
1178         d.addCallback(lambda res: ms(mode=MODE_READ))
1179         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1180         def _made_map(sm):
1181             v = sm.best_recoverable_version()
1182             vm = sm.make_versionmap()
1183             shares = list(vm[v])
1184             self.failUnlessEqual(len(shares), 6)
1185             self._corrupted = set()
1186             # mark the first 5 shares as corrupt, then update the servermap.
1187             # The map should not have the marked shares it in any more, and
1188             # new shares should be found to replace the missing ones.
1189             for (shnum, peerid, timestamp) in shares:
1190                 if shnum < 5:
1191                     self._corrupted.add( (peerid, shnum) )
1192                     sm.mark_bad_share(peerid, shnum, "")
1193             return self.update_servermap(sm, MODE_WRITE)
1194         d.addCallback(_made_map)
1195         def _check_map(sm):
1196             # this should find all 5 shares that weren't marked bad
1197             v = sm.best_recoverable_version()
1198             vm = sm.make_versionmap()
1199             shares = list(vm[v])
1200             for (peerid, shnum) in self._corrupted:
1201                 peer_shares = sm.shares_on_peer(peerid)
1202                 self.failIf(shnum in peer_shares,
1203                             "%d was in %s" % (shnum, peer_shares))
1204             self.failUnlessEqual(len(shares), 5)
1205         d.addCallback(_check_map)
1206         return d
1207
1208     def failUnlessNoneRecoverable(self, sm):
1209         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1210         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1211         best = sm.best_recoverable_version()
1212         self.failUnlessEqual(best, None)
1213         self.failUnlessEqual(len(sm.shares_available()), 0)
1214
1215     def test_no_shares(self):
1216         self._storage._peers = {} # delete all shares
1217         ms = self.make_servermap
1218         d = defer.succeed(None)
1219 #
1220         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1221         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1222
1223         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1224         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1225
1226         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1227         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1228
1229         d.addCallback(lambda res: ms(mode=MODE_READ))
1230         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1231
1232         return d
1233
1234     def failUnlessNotQuiteEnough(self, sm):
1235         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1236         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1237         best = sm.best_recoverable_version()
1238         self.failUnlessEqual(best, None)
1239         self.failUnlessEqual(len(sm.shares_available()), 1)
1240         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1241         return sm
1242
1243     def test_not_quite_enough_shares(self):
1244         s = self._storage
1245         ms = self.make_servermap
1246         num_shares = len(s._peers)
1247         for peerid in s._peers:
1248             s._peers[peerid] = {}
1249             num_shares -= 1
1250             if num_shares == 2:
1251                 break
1252         # now there ought to be only two shares left
1253         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1254
1255         d = defer.succeed(None)
1256
1257         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1258         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1259         d.addCallback(lambda sm:
1260                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1261         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1262         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1263         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1264         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1265         d.addCallback(lambda res: ms(mode=MODE_READ))
1266         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1267
1268         return d
1269
1270
1271     def test_servermapupdater_finds_mdmf_files(self):
1272         # setUp already published an MDMF file for us. We just need to
1273         # make sure that when we run the ServermapUpdater, the file is
1274         # reported to have one recoverable version.
1275         d = defer.succeed(None)
1276         d.addCallback(lambda ignored:
1277             self.publish_mdmf())
1278         d.addCallback(lambda ignored:
1279             self.make_servermap(mode=MODE_CHECK))
1280         # Calling make_servermap also updates the servermap in the mode
1281         # that we specify, so we just need to see what it says.
1282         def _check_servermap(sm):
1283             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1284         d.addCallback(_check_servermap)
1285         return d
1286
1287
1288     def test_fetch_update(self):
1289         d = defer.succeed(None)
1290         d.addCallback(lambda ignored:
1291             self.publish_mdmf())
1292         d.addCallback(lambda ignored:
1293             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1294         def _check_servermap(sm):
1295             # 10 shares
1296             self.failUnlessEqual(len(sm.update_data), 10)
1297             # one version
1298             for data in sm.update_data.itervalues():
1299                 self.failUnlessEqual(len(data), 1)
1300         d.addCallback(_check_servermap)
1301         return d
1302
1303
1304     def test_servermapupdater_finds_sdmf_files(self):
1305         d = defer.succeed(None)
1306         d.addCallback(lambda ignored:
1307             self.publish_sdmf())
1308         d.addCallback(lambda ignored:
1309             self.make_servermap(mode=MODE_CHECK))
1310         d.addCallback(lambda servermap:
1311             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1312         return d
1313
1314
1315 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1316     def setUp(self):
1317         return self.publish_one()
1318
1319     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1320         if oldmap is None:
1321             oldmap = ServerMap()
1322         if sb is None:
1323             sb = self._storage_broker
1324         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1325         d = smu.update()
1326         return d
1327
1328     def abbrev_verinfo(self, verinfo):
1329         if verinfo is None:
1330             return None
1331         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1332          offsets_tuple) = verinfo
1333         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1334
1335     def abbrev_verinfo_dict(self, verinfo_d):
1336         output = {}
1337         for verinfo,value in verinfo_d.items():
1338             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1339              offsets_tuple) = verinfo
1340             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1341         return output
1342
1343     def dump_servermap(self, servermap):
1344         print "SERVERMAP", servermap
1345         print "RECOVERABLE", [self.abbrev_verinfo(v)
1346                               for v in servermap.recoverable_versions()]
1347         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1348         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1349
1350     def do_download(self, servermap, version=None):
1351         if version is None:
1352             version = servermap.best_recoverable_version()
1353         r = Retrieve(self._fn, servermap, version)
1354         c = consumer.MemoryConsumer()
1355         d = r.download(consumer=c)
1356         d.addCallback(lambda mc: "".join(mc.chunks))
1357         return d
1358
1359
1360     def test_basic(self):
1361         d = self.make_servermap()
1362         def _do_retrieve(servermap):
1363             self._smap = servermap
1364             #self.dump_servermap(servermap)
1365             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1366             return self.do_download(servermap)
1367         d.addCallback(_do_retrieve)
1368         def _retrieved(new_contents):
1369             self.failUnlessEqual(new_contents, self.CONTENTS)
1370         d.addCallback(_retrieved)
1371         # we should be able to re-use the same servermap, both with and
1372         # without updating it.
1373         d.addCallback(lambda res: self.do_download(self._smap))
1374         d.addCallback(_retrieved)
1375         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1376         d.addCallback(lambda res: self.do_download(self._smap))
1377         d.addCallback(_retrieved)
1378         # clobbering the pubkey should make the servermap updater re-fetch it
1379         def _clobber_pubkey(res):
1380             self._fn._pubkey = None
1381         d.addCallback(_clobber_pubkey)
1382         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1383         d.addCallback(lambda res: self.do_download(self._smap))
1384         d.addCallback(_retrieved)
1385         return d
1386
1387     def test_all_shares_vanished(self):
1388         d = self.make_servermap()
1389         def _remove_shares(servermap):
1390             for shares in self._storage._peers.values():
1391                 shares.clear()
1392             d1 = self.shouldFail(NotEnoughSharesError,
1393                                  "test_all_shares_vanished",
1394                                  "ran out of peers",
1395                                  self.do_download, servermap)
1396             return d1
1397         d.addCallback(_remove_shares)
1398         return d
1399
1400     def test_no_servers(self):
1401         sb2 = make_storagebroker(num_peers=0)
1402         # if there are no servers, then a MODE_READ servermap should come
1403         # back empty
1404         d = self.make_servermap(sb=sb2)
1405         def _check_servermap(servermap):
1406             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1407             self.failIf(servermap.recoverable_versions())
1408             self.failIf(servermap.unrecoverable_versions())
1409             self.failIf(servermap.all_peers())
1410         d.addCallback(_check_servermap)
1411         return d
1412
1413     def test_no_servers_download(self):
1414         sb2 = make_storagebroker(num_peers=0)
1415         self._fn._storage_broker = sb2
1416         d = self.shouldFail(UnrecoverableFileError,
1417                             "test_no_servers_download",
1418                             "no recoverable versions",
1419                             self._fn.download_best_version)
1420         def _restore(res):
1421             # a failed download that occurs while we aren't connected to
1422             # anybody should not prevent a subsequent download from working.
1423             # This isn't quite the webapi-driven test that #463 wants, but it
1424             # should be close enough.
1425             self._fn._storage_broker = self._storage_broker
1426             return self._fn.download_best_version()
1427         def _retrieved(new_contents):
1428             self.failUnlessEqual(new_contents, self.CONTENTS)
1429         d.addCallback(_restore)
1430         d.addCallback(_retrieved)
1431         return d
1432
1433
1434     def _test_corrupt_all(self, offset, substring,
1435                           should_succeed=False,
1436                           corrupt_early=True,
1437                           failure_checker=None,
1438                           fetch_privkey=False):
1439         d = defer.succeed(None)
1440         if corrupt_early:
1441             d.addCallback(corrupt, self._storage, offset)
1442         d.addCallback(lambda res: self.make_servermap())
1443         if not corrupt_early:
1444             d.addCallback(corrupt, self._storage, offset)
1445         def _do_retrieve(servermap):
1446             ver = servermap.best_recoverable_version()
1447             if ver is None and not should_succeed:
1448                 # no recoverable versions == not succeeding. The problem
1449                 # should be noted in the servermap's list of problems.
1450                 if substring:
1451                     allproblems = [str(f) for f in servermap.problems]
1452                     self.failUnlessIn(substring, "".join(allproblems))
1453                 return servermap
1454             if should_succeed:
1455                 d1 = self._fn.download_version(servermap, ver,
1456                                                fetch_privkey)
1457                 d1.addCallback(lambda new_contents:
1458                                self.failUnlessEqual(new_contents, self.CONTENTS))
1459             else:
1460                 d1 = self.shouldFail(NotEnoughSharesError,
1461                                      "_corrupt_all(offset=%s)" % (offset,),
1462                                      substring,
1463                                      self._fn.download_version, servermap,
1464                                                                 ver,
1465                                                                 fetch_privkey)
1466             if failure_checker:
1467                 d1.addCallback(failure_checker)
1468             d1.addCallback(lambda res: servermap)
1469             return d1
1470         d.addCallback(_do_retrieve)
1471         return d
1472
1473     def test_corrupt_all_verbyte(self):
1474         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1475         # error in unpack_share().
1476         d = self._test_corrupt_all(0, "UnknownVersionError")
1477         def _check_servermap(servermap):
1478             # and the dump should mention the problems
1479             s = StringIO()
1480             dump = servermap.dump(s).getvalue()
1481             self.failUnless("30 PROBLEMS" in dump, dump)
1482         d.addCallback(_check_servermap)
1483         return d
1484
1485     def test_corrupt_all_seqnum(self):
1486         # a corrupt sequence number will trigger a bad signature
1487         return self._test_corrupt_all(1, "signature is invalid")
1488
1489     def test_corrupt_all_R(self):
1490         # a corrupt root hash will trigger a bad signature
1491         return self._test_corrupt_all(9, "signature is invalid")
1492
1493     def test_corrupt_all_IV(self):
1494         # a corrupt salt/IV will trigger a bad signature
1495         return self._test_corrupt_all(41, "signature is invalid")
1496
1497     def test_corrupt_all_k(self):
1498         # a corrupt 'k' will trigger a bad signature
1499         return self._test_corrupt_all(57, "signature is invalid")
1500
1501     def test_corrupt_all_N(self):
1502         # a corrupt 'N' will trigger a bad signature
1503         return self._test_corrupt_all(58, "signature is invalid")
1504
1505     def test_corrupt_all_segsize(self):
1506         # a corrupt segsize will trigger a bad signature
1507         return self._test_corrupt_all(59, "signature is invalid")
1508
1509     def test_corrupt_all_datalen(self):
1510         # a corrupt data length will trigger a bad signature
1511         return self._test_corrupt_all(67, "signature is invalid")
1512
1513     def test_corrupt_all_pubkey(self):
1514         # a corrupt pubkey won't match the URI's fingerprint. We need to
1515         # remove the pubkey from the filenode, or else it won't bother trying
1516         # to update it.
1517         self._fn._pubkey = None
1518         return self._test_corrupt_all("pubkey",
1519                                       "pubkey doesn't match fingerprint")
1520
1521     def test_corrupt_all_sig(self):
1522         # a corrupt signature is a bad one
1523         # the signature runs from about [543:799], depending upon the length
1524         # of the pubkey
1525         return self._test_corrupt_all("signature", "signature is invalid")
1526
1527     def test_corrupt_all_share_hash_chain_number(self):
1528         # a corrupt share hash chain entry will show up as a bad hash. If we
1529         # mangle the first byte, that will look like a bad hash number,
1530         # causing an IndexError
1531         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1532
1533     def test_corrupt_all_share_hash_chain_hash(self):
1534         # a corrupt share hash chain entry will show up as a bad hash. If we
1535         # mangle a few bytes in, that will look like a bad hash.
1536         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1537
1538     def test_corrupt_all_block_hash_tree(self):
1539         return self._test_corrupt_all("block_hash_tree",
1540                                       "block hash tree failure")
1541
1542     def test_corrupt_all_block(self):
1543         return self._test_corrupt_all("share_data", "block hash tree failure")
1544
1545     def test_corrupt_all_encprivkey(self):
1546         # a corrupted privkey won't even be noticed by the reader, only by a
1547         # writer.
1548         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1549
1550
1551     def test_corrupt_all_encprivkey_late(self):
1552         # this should work for the same reason as above, but we corrupt 
1553         # after the servermap update to exercise the error handling
1554         # code.
1555         # We need to remove the privkey from the node, or the retrieve
1556         # process won't know to update it.
1557         self._fn._privkey = None
1558         return self._test_corrupt_all("enc_privkey",
1559                                       None, # this shouldn't fail
1560                                       should_succeed=True,
1561                                       corrupt_early=False,
1562                                       fetch_privkey=True)
1563
1564
1565     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1566     # just use a .todo because the failing-but-ignored test emits about 30kB
1567     # of noise.
1568     def OFF_test_corrupt_all_seqnum_late(self):
1569         # corrupting the seqnum between mapupdate and retrieve should result
1570         # in NotEnoughSharesError, since each share will look invalid
1571         def _check(res):
1572             f = res[0]
1573             self.failUnless(f.check(NotEnoughSharesError))
1574             self.failUnless("uncoordinated write" in str(f))
1575         return self._test_corrupt_all(1, "ran out of peers",
1576                                       corrupt_early=False,
1577                                       failure_checker=_check)
1578
1579     def test_corrupt_all_block_hash_tree_late(self):
1580         def _check(res):
1581             f = res[0]
1582             self.failUnless(f.check(NotEnoughSharesError))
1583         return self._test_corrupt_all("block_hash_tree",
1584                                       "block hash tree failure",
1585                                       corrupt_early=False,
1586                                       failure_checker=_check)
1587
1588
1589     def test_corrupt_all_block_late(self):
1590         def _check(res):
1591             f = res[0]
1592             self.failUnless(f.check(NotEnoughSharesError))
1593         return self._test_corrupt_all("share_data", "block hash tree failure",
1594                                       corrupt_early=False,
1595                                       failure_checker=_check)
1596
1597
1598     def test_basic_pubkey_at_end(self):
1599         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1600         # download to succeed but forcing a bunch of retries first. Note that
1601         # this is rather pessimistic: our Retrieve process will throw away
1602         # the whole share if the pubkey is bad, even though the rest of the
1603         # share might be good.
1604
1605         self._fn._pubkey = None
1606         k = self._fn.get_required_shares()
1607         N = self._fn.get_total_shares()
1608         d = defer.succeed(None)
1609         d.addCallback(corrupt, self._storage, "pubkey",
1610                       shnums_to_corrupt=range(0, N-k))
1611         d.addCallback(lambda res: self.make_servermap())
1612         def _do_retrieve(servermap):
1613             self.failUnless(servermap.problems)
1614             self.failUnless("pubkey doesn't match fingerprint"
1615                             in str(servermap.problems[0]))
1616             ver = servermap.best_recoverable_version()
1617             r = Retrieve(self._fn, servermap, ver)
1618             c = consumer.MemoryConsumer()
1619             return r.download(c)
1620         d.addCallback(_do_retrieve)
1621         d.addCallback(lambda mc: "".join(mc.chunks))
1622         d.addCallback(lambda new_contents:
1623                       self.failUnlessEqual(new_contents, self.CONTENTS))
1624         return d
1625
1626
1627     def _test_corrupt_some(self, offset, mdmf=False):
1628         if mdmf:
1629             d = self.publish_mdmf()
1630         else:
1631             d = defer.succeed(None)
1632         d.addCallback(lambda ignored:
1633             corrupt(None, self._storage, offset, range(5)))
1634         d.addCallback(lambda ignored:
1635             self.make_servermap())
1636         def _do_retrieve(servermap):
1637             ver = servermap.best_recoverable_version()
1638             self.failUnless(ver)
1639             return self._fn.download_best_version()
1640         d.addCallback(_do_retrieve)
1641         d.addCallback(lambda new_contents:
1642             self.failUnlessEqual(new_contents, self.CONTENTS))
1643         return d
1644
1645
1646     def test_corrupt_some(self):
1647         # corrupt the data of first five shares (so the servermap thinks
1648         # they're good but retrieve marks them as bad), so that the
1649         # MODE_READ set of 6 will be insufficient, forcing node.download to
1650         # retry with more servers.
1651         return self._test_corrupt_some("share_data")
1652
1653
1654     def test_download_fails(self):
1655         d = corrupt(None, self._storage, "signature")
1656         d.addCallback(lambda ignored:
1657             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1658                             "no recoverable versions",
1659                             self._fn.download_best_version))
1660         return d
1661
1662
1663
1664     def test_corrupt_mdmf_block_hash_tree(self):
1665         d = self.publish_mdmf()
1666         d.addCallback(lambda ignored:
1667             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1668                                    "block hash tree failure",
1669                                    corrupt_early=False,
1670                                    should_succeed=False))
1671         return d
1672
1673
1674     def test_corrupt_mdmf_block_hash_tree_late(self):
1675         d = self.publish_mdmf()
1676         d.addCallback(lambda ignored:
1677             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1678                                    "block hash tree failure",
1679                                    corrupt_early=True,
1680                                    should_succeed=False))
1681         return d
1682
1683
1684     def test_corrupt_mdmf_share_data(self):
1685         d = self.publish_mdmf()
1686         d.addCallback(lambda ignored:
1687             # TODO: Find out what the block size is and corrupt a
1688             # specific block, rather than just guessing.
1689             self._test_corrupt_all(("share_data", 12 * 40),
1690                                     "block hash tree failure",
1691                                     corrupt_early=True,
1692                                     should_succeed=False))
1693         return d
1694
1695
1696     def test_corrupt_some_mdmf(self):
1697         return self._test_corrupt_some(("share_data", 12 * 40),
1698                                        mdmf=True)
1699
1700
1701 class CheckerMixin:
1702     def check_good(self, r, where):
1703         self.failUnless(r.is_healthy(), where)
1704         return r
1705
1706     def check_bad(self, r, where):
1707         self.failIf(r.is_healthy(), where)
1708         return r
1709
1710     def check_expected_failure(self, r, expected_exception, substring, where):
1711         for (peerid, storage_index, shnum, f) in r.problems:
1712             if f.check(expected_exception):
1713                 self.failUnless(substring in str(f),
1714                                 "%s: substring '%s' not in '%s'" %
1715                                 (where, substring, str(f)))
1716                 return
1717         self.fail("%s: didn't see expected exception %s in problems %s" %
1718                   (where, expected_exception, r.problems))
1719
1720
1721 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1722     def setUp(self):
1723         return self.publish_one()
1724
1725
1726     def test_check_good(self):
1727         d = self._fn.check(Monitor())
1728         d.addCallback(self.check_good, "test_check_good")
1729         return d
1730
1731     def test_check_mdmf_good(self):
1732         d = self.publish_mdmf()
1733         d.addCallback(lambda ignored:
1734             self._fn.check(Monitor()))
1735         d.addCallback(self.check_good, "test_check_mdmf_good")
1736         return d
1737
1738     def test_check_no_shares(self):
1739         for shares in self._storage._peers.values():
1740             shares.clear()
1741         d = self._fn.check(Monitor())
1742         d.addCallback(self.check_bad, "test_check_no_shares")
1743         return d
1744
1745     def test_check_mdmf_no_shares(self):
1746         d = self.publish_mdmf()
1747         def _then(ignored):
1748             for share in self._storage._peers.values():
1749                 share.clear()
1750         d.addCallback(_then)
1751         d.addCallback(lambda ignored:
1752             self._fn.check(Monitor()))
1753         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1754         return d
1755
1756     def test_check_not_enough_shares(self):
1757         for shares in self._storage._peers.values():
1758             for shnum in shares.keys():
1759                 if shnum > 0:
1760                     del shares[shnum]
1761         d = self._fn.check(Monitor())
1762         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1763         return d
1764
1765     def test_check_mdmf_not_enough_shares(self):
1766         d = self.publish_mdmf()
1767         def _then(ignored):
1768             for shares in self._storage._peers.values():
1769                 for shnum in shares.keys():
1770                     if shnum > 0:
1771                         del shares[shnum]
1772         d.addCallback(_then)
1773         d.addCallback(lambda ignored:
1774             self._fn.check(Monitor()))
1775         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1776         return d
1777
1778
1779     def test_check_all_bad_sig(self):
1780         d = corrupt(None, self._storage, 1) # bad sig
1781         d.addCallback(lambda ignored:
1782             self._fn.check(Monitor()))
1783         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1784         return d
1785
1786     def test_check_mdmf_all_bad_sig(self):
1787         d = self.publish_mdmf()
1788         d.addCallback(lambda ignored:
1789             corrupt(None, self._storage, 1))
1790         d.addCallback(lambda ignored:
1791             self._fn.check(Monitor()))
1792         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1793         return d
1794
1795     def test_check_all_bad_blocks(self):
1796         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1797         # the Checker won't notice this.. it doesn't look at actual data
1798         d.addCallback(lambda ignored:
1799             self._fn.check(Monitor()))
1800         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1801         return d
1802
1803
1804     def test_check_mdmf_all_bad_blocks(self):
1805         d = self.publish_mdmf()
1806         d.addCallback(lambda ignored:
1807             corrupt(None, self._storage, "share_data"))
1808         d.addCallback(lambda ignored:
1809             self._fn.check(Monitor()))
1810         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1811         return d
1812
1813     def test_verify_good(self):
1814         d = self._fn.check(Monitor(), verify=True)
1815         d.addCallback(self.check_good, "test_verify_good")
1816         return d
1817
1818     def test_verify_all_bad_sig(self):
1819         d = corrupt(None, self._storage, 1) # bad sig
1820         d.addCallback(lambda ignored:
1821             self._fn.check(Monitor(), verify=True))
1822         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1823         return d
1824
1825     def test_verify_one_bad_sig(self):
1826         d = corrupt(None, self._storage, 1, [9]) # bad sig
1827         d.addCallback(lambda ignored:
1828             self._fn.check(Monitor(), verify=True))
1829         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1830         return d
1831
1832     def test_verify_one_bad_block(self):
1833         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1834         # the Verifier *will* notice this, since it examines every byte
1835         d.addCallback(lambda ignored:
1836             self._fn.check(Monitor(), verify=True))
1837         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1838         d.addCallback(self.check_expected_failure,
1839                       CorruptShareError, "block hash tree failure",
1840                       "test_verify_one_bad_block")
1841         return d
1842
1843     def test_verify_one_bad_sharehash(self):
1844         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1845         d.addCallback(lambda ignored:
1846             self._fn.check(Monitor(), verify=True))
1847         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1848         d.addCallback(self.check_expected_failure,
1849                       CorruptShareError, "corrupt hashes",
1850                       "test_verify_one_bad_sharehash")
1851         return d
1852
1853     def test_verify_one_bad_encprivkey(self):
1854         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1855         d.addCallback(lambda ignored:
1856             self._fn.check(Monitor(), verify=True))
1857         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1858         d.addCallback(self.check_expected_failure,
1859                       CorruptShareError, "invalid privkey",
1860                       "test_verify_one_bad_encprivkey")
1861         return d
1862
1863     def test_verify_one_bad_encprivkey_uncheckable(self):
1864         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1865         readonly_fn = self._fn.get_readonly()
1866         # a read-only node has no way to validate the privkey
1867         d.addCallback(lambda ignored:
1868             readonly_fn.check(Monitor(), verify=True))
1869         d.addCallback(self.check_good,
1870                       "test_verify_one_bad_encprivkey_uncheckable")
1871         return d
1872
1873
1874     def test_verify_mdmf_good(self):
1875         d = self.publish_mdmf()
1876         d.addCallback(lambda ignored:
1877             self._fn.check(Monitor(), verify=True))
1878         d.addCallback(self.check_good, "test_verify_mdmf_good")
1879         return d
1880
1881
1882     def test_verify_mdmf_one_bad_block(self):
1883         d = self.publish_mdmf()
1884         d.addCallback(lambda ignored:
1885             corrupt(None, self._storage, "share_data", [1]))
1886         d.addCallback(lambda ignored:
1887             self._fn.check(Monitor(), verify=True))
1888         # We should find one bad block here
1889         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1890         d.addCallback(self.check_expected_failure,
1891                       CorruptShareError, "block hash tree failure",
1892                       "test_verify_mdmf_one_bad_block")
1893         return d
1894
1895
1896     def test_verify_mdmf_bad_encprivkey(self):
1897         d = self.publish_mdmf()
1898         d.addCallback(lambda ignored:
1899             corrupt(None, self._storage, "enc_privkey", [0]))
1900         d.addCallback(lambda ignored:
1901             self._fn.check(Monitor(), verify=True))
1902         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1903         d.addCallback(self.check_expected_failure,
1904                       CorruptShareError, "privkey",
1905                       "test_verify_mdmf_bad_encprivkey")
1906         return d
1907
1908
1909     def test_verify_mdmf_bad_sig(self):
1910         d = self.publish_mdmf()
1911         d.addCallback(lambda ignored:
1912             corrupt(None, self._storage, 1, [1]))
1913         d.addCallback(lambda ignored:
1914             self._fn.check(Monitor(), verify=True))
1915         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1916         return d
1917
1918
1919     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1920         d = self.publish_mdmf()
1921         d.addCallback(lambda ignored:
1922             corrupt(None, self._storage, "enc_privkey", [1]))
1923         d.addCallback(lambda ignored:
1924             self._fn.get_readonly())
1925         d.addCallback(lambda fn:
1926             fn.check(Monitor(), verify=True))
1927         d.addCallback(self.check_good,
1928                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1929         return d
1930
1931
1932 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1933
1934     def get_shares(self, s):
1935         all_shares = {} # maps (peerid, shnum) to share data
1936         for peerid in s._peers:
1937             shares = s._peers[peerid]
1938             for shnum in shares:
1939                 data = shares[shnum]
1940                 all_shares[ (peerid, shnum) ] = data
1941         return all_shares
1942
1943     def copy_shares(self, ignored=None):
1944         self.old_shares.append(self.get_shares(self._storage))
1945
1946     def test_repair_nop(self):
1947         self.old_shares = []
1948         d = self.publish_one()
1949         d.addCallback(self.copy_shares)
1950         d.addCallback(lambda res: self._fn.check(Monitor()))
1951         d.addCallback(lambda check_results: self._fn.repair(check_results))
1952         def _check_results(rres):
1953             self.failUnless(IRepairResults.providedBy(rres))
1954             self.failUnless(rres.get_successful())
1955             # TODO: examine results
1956
1957             self.copy_shares()
1958
1959             initial_shares = self.old_shares[0]
1960             new_shares = self.old_shares[1]
1961             # TODO: this really shouldn't change anything. When we implement
1962             # a "minimal-bandwidth" repairer", change this test to assert:
1963             #self.failUnlessEqual(new_shares, initial_shares)
1964
1965             # all shares should be in the same place as before
1966             self.failUnlessEqual(set(initial_shares.keys()),
1967                                  set(new_shares.keys()))
1968             # but they should all be at a newer seqnum. The IV will be
1969             # different, so the roothash will be too.
1970             for key in initial_shares:
1971                 (version0,
1972                  seqnum0,
1973                  root_hash0,
1974                  IV0,
1975                  k0, N0, segsize0, datalen0,
1976                  o0) = unpack_header(initial_shares[key])
1977                 (version1,
1978                  seqnum1,
1979                  root_hash1,
1980                  IV1,
1981                  k1, N1, segsize1, datalen1,
1982                  o1) = unpack_header(new_shares[key])
1983                 self.failUnlessEqual(version0, version1)
1984                 self.failUnlessEqual(seqnum0+1, seqnum1)
1985                 self.failUnlessEqual(k0, k1)
1986                 self.failUnlessEqual(N0, N1)
1987                 self.failUnlessEqual(segsize0, segsize1)
1988                 self.failUnlessEqual(datalen0, datalen1)
1989         d.addCallback(_check_results)
1990         return d
1991
1992     def failIfSharesChanged(self, ignored=None):
1993         old_shares = self.old_shares[-2]
1994         current_shares = self.old_shares[-1]
1995         self.failUnlessEqual(old_shares, current_shares)
1996
1997
1998     def test_unrepairable_0shares(self):
1999         d = self.publish_one()
2000         def _delete_all_shares(ign):
2001             shares = self._storage._peers
2002             for peerid in shares:
2003                 shares[peerid] = {}
2004         d.addCallback(_delete_all_shares)
2005         d.addCallback(lambda ign: self._fn.check(Monitor()))
2006         d.addCallback(lambda check_results: self._fn.repair(check_results))
2007         def _check(crr):
2008             self.failUnlessEqual(crr.get_successful(), False)
2009         d.addCallback(_check)
2010         return d
2011
2012     def test_mdmf_unrepairable_0shares(self):
2013         d = self.publish_mdmf()
2014         def _delete_all_shares(ign):
2015             shares = self._storage._peers
2016             for peerid in shares:
2017                 shares[peerid] = {}
2018         d.addCallback(_delete_all_shares)
2019         d.addCallback(lambda ign: self._fn.check(Monitor()))
2020         d.addCallback(lambda check_results: self._fn.repair(check_results))
2021         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2022         return d
2023
2024
2025     def test_unrepairable_1share(self):
2026         d = self.publish_one()
2027         def _delete_all_shares(ign):
2028             shares = self._storage._peers
2029             for peerid in shares:
2030                 for shnum in list(shares[peerid]):
2031                     if shnum > 0:
2032                         del shares[peerid][shnum]
2033         d.addCallback(_delete_all_shares)
2034         d.addCallback(lambda ign: self._fn.check(Monitor()))
2035         d.addCallback(lambda check_results: self._fn.repair(check_results))
2036         def _check(crr):
2037             self.failUnlessEqual(crr.get_successful(), False)
2038         d.addCallback(_check)
2039         return d
2040
2041     def test_mdmf_unrepairable_1share(self):
2042         d = self.publish_mdmf()
2043         def _delete_all_shares(ign):
2044             shares = self._storage._peers
2045             for peerid in shares:
2046                 for shnum in list(shares[peerid]):
2047                     if shnum > 0:
2048                         del shares[peerid][shnum]
2049         d.addCallback(_delete_all_shares)
2050         d.addCallback(lambda ign: self._fn.check(Monitor()))
2051         d.addCallback(lambda check_results: self._fn.repair(check_results))
2052         def _check(crr):
2053             self.failUnlessEqual(crr.get_successful(), False)
2054         d.addCallback(_check)
2055         return d
2056
2057     def test_repairable_5shares(self):
2058         d = self.publish_mdmf()
2059         def _delete_all_shares(ign):
2060             shares = self._storage._peers
2061             for peerid in shares:
2062                 for shnum in list(shares[peerid]):
2063                     if shnum > 4:
2064                         del shares[peerid][shnum]
2065         d.addCallback(_delete_all_shares)
2066         d.addCallback(lambda ign: self._fn.check(Monitor()))
2067         d.addCallback(lambda check_results: self._fn.repair(check_results))
2068         def _check(crr):
2069             self.failUnlessEqual(crr.get_successful(), True)
2070         d.addCallback(_check)
2071         return d
2072
2073     def test_mdmf_repairable_5shares(self):
2074         d = self.publish_mdmf()
2075         def _delete_some_shares(ign):
2076             shares = self._storage._peers
2077             for peerid in shares:
2078                 for shnum in list(shares[peerid]):
2079                     if shnum > 5:
2080                         del shares[peerid][shnum]
2081         d.addCallback(_delete_some_shares)
2082         d.addCallback(lambda ign: self._fn.check(Monitor()))
2083         def _check(cr):
2084             self.failIf(cr.is_healthy())
2085             self.failUnless(cr.is_recoverable())
2086             return cr
2087         d.addCallback(_check)
2088         d.addCallback(lambda check_results: self._fn.repair(check_results))
2089         def _check1(crr):
2090             self.failUnlessEqual(crr.get_successful(), True)
2091         d.addCallback(_check1)
2092         return d
2093
2094
2095     def test_merge(self):
2096         self.old_shares = []
2097         d = self.publish_multiple()
2098         # repair will refuse to merge multiple highest seqnums unless you
2099         # pass force=True
2100         d.addCallback(lambda res:
2101                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2102                                           1:4,3:4,5:4,7:4,9:4}))
2103         d.addCallback(self.copy_shares)
2104         d.addCallback(lambda res: self._fn.check(Monitor()))
2105         def _try_repair(check_results):
2106             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2107             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2108                                  self._fn.repair, check_results)
2109             d2.addCallback(self.copy_shares)
2110             d2.addCallback(self.failIfSharesChanged)
2111             d2.addCallback(lambda res: check_results)
2112             return d2
2113         d.addCallback(_try_repair)
2114         d.addCallback(lambda check_results:
2115                       self._fn.repair(check_results, force=True))
2116         # this should give us 10 shares of the highest roothash
2117         def _check_repair_results(rres):
2118             self.failUnless(rres.get_successful())
2119             pass # TODO
2120         d.addCallback(_check_repair_results)
2121         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2122         def _check_smap(smap):
2123             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2124             self.failIf(smap.unrecoverable_versions())
2125             # now, which should have won?
2126             roothash_s4a = self.get_roothash_for(3)
2127             roothash_s4b = self.get_roothash_for(4)
2128             if roothash_s4b > roothash_s4a:
2129                 expected_contents = self.CONTENTS[4]
2130             else:
2131                 expected_contents = self.CONTENTS[3]
2132             new_versionid = smap.best_recoverable_version()
2133             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2134             d2 = self._fn.download_version(smap, new_versionid)
2135             d2.addCallback(self.failUnlessEqual, expected_contents)
2136             return d2
2137         d.addCallback(_check_smap)
2138         return d
2139
2140     def test_non_merge(self):
2141         self.old_shares = []
2142         d = self.publish_multiple()
2143         # repair should not refuse a repair that doesn't need to merge. In
2144         # this case, we combine v2 with v3. The repair should ignore v2 and
2145         # copy v3 into a new v5.
2146         d.addCallback(lambda res:
2147                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2148                                           1:3,3:3,5:3,7:3,9:3}))
2149         d.addCallback(lambda res: self._fn.check(Monitor()))
2150         d.addCallback(lambda check_results: self._fn.repair(check_results))
2151         # this should give us 10 shares of v3
2152         def _check_repair_results(rres):
2153             self.failUnless(rres.get_successful())
2154             pass # TODO
2155         d.addCallback(_check_repair_results)
2156         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2157         def _check_smap(smap):
2158             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2159             self.failIf(smap.unrecoverable_versions())
2160             # now, which should have won?
2161             expected_contents = self.CONTENTS[3]
2162             new_versionid = smap.best_recoverable_version()
2163             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2164             d2 = self._fn.download_version(smap, new_versionid)
2165             d2.addCallback(self.failUnlessEqual, expected_contents)
2166             return d2
2167         d.addCallback(_check_smap)
2168         return d
2169
2170     def get_roothash_for(self, index):
2171         # return the roothash for the first share we see in the saved set
2172         shares = self._copied_shares[index]
2173         for peerid in shares:
2174             for shnum in shares[peerid]:
2175                 share = shares[peerid][shnum]
2176                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2177                           unpack_header(share)
2178                 return root_hash
2179
2180     def test_check_and_repair_readcap(self):
2181         # we can't currently repair from a mutable readcap: #625
2182         self.old_shares = []
2183         d = self.publish_one()
2184         d.addCallback(self.copy_shares)
2185         def _get_readcap(res):
2186             self._fn3 = self._fn.get_readonly()
2187             # also delete some shares
2188             for peerid,shares in self._storage._peers.items():
2189                 shares.pop(0, None)
2190         d.addCallback(_get_readcap)
2191         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2192         def _check_results(crr):
2193             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2194             # we should detect the unhealthy, but skip over mutable-readcap
2195             # repairs until #625 is fixed
2196             self.failIf(crr.get_pre_repair_results().is_healthy())
2197             self.failIf(crr.get_repair_attempted())
2198             self.failIf(crr.get_post_repair_results().is_healthy())
2199         d.addCallback(_check_results)
2200         return d
2201
2202 class DevNullDictionary(dict):
2203     def __setitem__(self, key, value):
2204         return
2205
2206 class MultipleEncodings(unittest.TestCase):
2207     def setUp(self):
2208         self.CONTENTS = "New contents go here"
2209         self.uploadable = MutableData(self.CONTENTS)
2210         self._storage = FakeStorage()
2211         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2212         self._storage_broker = self._nodemaker.storage_broker
2213         d = self._nodemaker.create_mutable_file(self.uploadable)
2214         def _created(node):
2215             self._fn = node
2216         d.addCallback(_created)
2217         return d
2218
2219     def _encode(self, k, n, data, version=SDMF_VERSION):
2220         # encode 'data' into a peerid->shares dict.
2221
2222         fn = self._fn
2223         # disable the nodecache, since for these tests we explicitly need
2224         # multiple nodes pointing at the same file
2225         self._nodemaker._node_cache = DevNullDictionary()
2226         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2227         # then we copy over other fields that are normally fetched from the
2228         # existing shares
2229         fn2._pubkey = fn._pubkey
2230         fn2._privkey = fn._privkey
2231         fn2._encprivkey = fn._encprivkey
2232         # and set the encoding parameters to something completely different
2233         fn2._required_shares = k
2234         fn2._total_shares = n
2235
2236         s = self._storage
2237         s._peers = {} # clear existing storage
2238         p2 = Publish(fn2, self._storage_broker, None)
2239         uploadable = MutableData(data)
2240         d = p2.publish(uploadable)
2241         def _published(res):
2242             shares = s._peers
2243             s._peers = {}
2244             return shares
2245         d.addCallback(_published)
2246         return d
2247
2248     def make_servermap(self, mode=MODE_READ, oldmap=None):
2249         if oldmap is None:
2250             oldmap = ServerMap()
2251         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2252                                oldmap, mode)
2253         d = smu.update()
2254         return d
2255
2256     def test_multiple_encodings(self):
2257         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2258         # then mix up the shares, to make sure that download survives seeing
2259         # a variety of encodings. This is actually kind of tricky to set up.
2260
2261         contents1 = "Contents for encoding 1 (3-of-10) go here"
2262         contents2 = "Contents for encoding 2 (4-of-9) go here"
2263         contents3 = "Contents for encoding 3 (4-of-7) go here"
2264
2265         # we make a retrieval object that doesn't know what encoding
2266         # parameters to use
2267         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2268
2269         # now we upload a file through fn1, and grab its shares
2270         d = self._encode(3, 10, contents1)
2271         def _encoded_1(shares):
2272             self._shares1 = shares
2273         d.addCallback(_encoded_1)
2274         d.addCallback(lambda res: self._encode(4, 9, contents2))
2275         def _encoded_2(shares):
2276             self._shares2 = shares
2277         d.addCallback(_encoded_2)
2278         d.addCallback(lambda res: self._encode(4, 7, contents3))
2279         def _encoded_3(shares):
2280             self._shares3 = shares
2281         d.addCallback(_encoded_3)
2282
2283         def _merge(res):
2284             log.msg("merging sharelists")
2285             # we merge the shares from the two sets, leaving each shnum in
2286             # its original location, but using a share from set1 or set2
2287             # according to the following sequence:
2288             #
2289             #  4-of-9  a  s2
2290             #  4-of-9  b  s2
2291             #  4-of-7  c   s3
2292             #  4-of-9  d  s2
2293             #  3-of-9  e s1
2294             #  3-of-9  f s1
2295             #  3-of-9  g s1
2296             #  4-of-9  h  s2
2297             #
2298             # so that neither form can be recovered until fetch [f], at which
2299             # point version-s1 (the 3-of-10 form) should be recoverable. If
2300             # the implementation latches on to the first version it sees,
2301             # then s2 will be recoverable at fetch [g].
2302
2303             # Later, when we implement code that handles multiple versions,
2304             # we can use this framework to assert that all recoverable
2305             # versions are retrieved, and test that 'epsilon' does its job
2306
2307             places = [2, 2, 3, 2, 1, 1, 1, 2]
2308
2309             sharemap = {}
2310             sb = self._storage_broker
2311
2312             for peerid in sorted(sb.get_all_serverids()):
2313                 for shnum in self._shares1.get(peerid, {}):
2314                     if shnum < len(places):
2315                         which = places[shnum]
2316                     else:
2317                         which = "x"
2318                     self._storage._peers[peerid] = peers = {}
2319                     in_1 = shnum in self._shares1[peerid]
2320                     in_2 = shnum in self._shares2.get(peerid, {})
2321                     in_3 = shnum in self._shares3.get(peerid, {})
2322                     if which == 1:
2323                         if in_1:
2324                             peers[shnum] = self._shares1[peerid][shnum]
2325                             sharemap[shnum] = peerid
2326                     elif which == 2:
2327                         if in_2:
2328                             peers[shnum] = self._shares2[peerid][shnum]
2329                             sharemap[shnum] = peerid
2330                     elif which == 3:
2331                         if in_3:
2332                             peers[shnum] = self._shares3[peerid][shnum]
2333                             sharemap[shnum] = peerid
2334
2335             # we don't bother placing any other shares
2336             # now sort the sequence so that share 0 is returned first
2337             new_sequence = [sharemap[shnum]
2338                             for shnum in sorted(sharemap.keys())]
2339             self._storage._sequence = new_sequence
2340             log.msg("merge done")
2341         d.addCallback(_merge)
2342         d.addCallback(lambda res: fn3.download_best_version())
2343         def _retrieved(new_contents):
2344             # the current specified behavior is "first version recoverable"
2345             self.failUnlessEqual(new_contents, contents1)
2346         d.addCallback(_retrieved)
2347         return d
2348
2349
2350 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2351
2352     def setUp(self):
2353         return self.publish_multiple()
2354
2355     def test_multiple_versions(self):
2356         # if we see a mix of versions in the grid, download_best_version
2357         # should get the latest one
2358         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2359         d = self._fn.download_best_version()
2360         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2361         # and the checker should report problems
2362         d.addCallback(lambda res: self._fn.check(Monitor()))
2363         d.addCallback(self.check_bad, "test_multiple_versions")
2364
2365         # but if everything is at version 2, that's what we should download
2366         d.addCallback(lambda res:
2367                       self._set_versions(dict([(i,2) for i in range(10)])))
2368         d.addCallback(lambda res: self._fn.download_best_version())
2369         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2370         # if exactly one share is at version 3, we should still get v2
2371         d.addCallback(lambda res:
2372                       self._set_versions({0:3}))
2373         d.addCallback(lambda res: self._fn.download_best_version())
2374         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2375         # but the servermap should see the unrecoverable version. This
2376         # depends upon the single newer share being queried early.
2377         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2378         def _check_smap(smap):
2379             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2380             newer = smap.unrecoverable_newer_versions()
2381             self.failUnlessEqual(len(newer), 1)
2382             verinfo, health = newer.items()[0]
2383             self.failUnlessEqual(verinfo[0], 4)
2384             self.failUnlessEqual(health, (1,3))
2385             self.failIf(smap.needs_merge())
2386         d.addCallback(_check_smap)
2387         # if we have a mix of two parallel versions (s4a and s4b), we could
2388         # recover either
2389         d.addCallback(lambda res:
2390                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2391                                           1:4,3:4,5:4,7:4,9:4}))
2392         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2393         def _check_smap_mixed(smap):
2394             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2395             newer = smap.unrecoverable_newer_versions()
2396             self.failUnlessEqual(len(newer), 0)
2397             self.failUnless(smap.needs_merge())
2398         d.addCallback(_check_smap_mixed)
2399         d.addCallback(lambda res: self._fn.download_best_version())
2400         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2401                                                   res == self.CONTENTS[4]))
2402         return d
2403
2404     def test_replace(self):
2405         # if we see a mix of versions in the grid, we should be able to
2406         # replace them all with a newer version
2407
2408         # if exactly one share is at version 3, we should download (and
2409         # replace) v2, and the result should be v4. Note that the index we
2410         # give to _set_versions is different than the sequence number.
2411         target = dict([(i,2) for i in range(10)]) # seqnum3
2412         target[0] = 3 # seqnum4
2413         self._set_versions(target)
2414
2415         def _modify(oldversion, servermap, first_time):
2416             return oldversion + " modified"
2417         d = self._fn.modify(_modify)
2418         d.addCallback(lambda res: self._fn.download_best_version())
2419         expected = self.CONTENTS[2] + " modified"
2420         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2421         # and the servermap should indicate that the outlier was replaced too
2422         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2423         def _check_smap(smap):
2424             self.failUnlessEqual(smap.highest_seqnum(), 5)
2425             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2426             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2427         d.addCallback(_check_smap)
2428         return d
2429
2430
2431 class Utils(unittest.TestCase):
2432     def test_cache(self):
2433         c = ResponseCache()
2434         # xdata = base62.b2a(os.urandom(100))[:100]
2435         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2436         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2437         c.add("v1", 1, 0, xdata)
2438         c.add("v1", 1, 2000, ydata)
2439         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2440         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2441         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2442         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2443         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2444         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2445         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2446         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2447         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2448         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2449         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2450         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2451         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2452         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2453         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2454         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2455         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2456         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2457
2458         # test joining fragments
2459         c = ResponseCache()
2460         c.add("v1", 1, 0, xdata[:10])
2461         c.add("v1", 1, 10, xdata[10:20])
2462         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2463
2464 class Exceptions(unittest.TestCase):
2465     def test_repr(self):
2466         nmde = NeedMoreDataError(100, 50, 100)
2467         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2468         ucwe = UncoordinatedWriteError()
2469         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2470
2471 class SameKeyGenerator:
2472     def __init__(self, pubkey, privkey):
2473         self.pubkey = pubkey
2474         self.privkey = privkey
2475     def generate(self, keysize=None):
2476         return defer.succeed( (self.pubkey, self.privkey) )
2477
2478 class FirstServerGetsKilled:
2479     done = False
2480     def notify(self, retval, wrapper, methname):
2481         if not self.done:
2482             wrapper.broken = True
2483             self.done = True
2484         return retval
2485
2486 class FirstServerGetsDeleted:
2487     def __init__(self):
2488         self.done = False
2489         self.silenced = None
2490     def notify(self, retval, wrapper, methname):
2491         if not self.done:
2492             # this query will work, but later queries should think the share
2493             # has been deleted
2494             self.done = True
2495             self.silenced = wrapper
2496             return retval
2497         if wrapper == self.silenced:
2498             assert methname == "slot_testv_and_readv_and_writev"
2499             return (True, {})
2500         return retval
2501
2502 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2503     def do_publish_surprise(self, version):
2504         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2505         self.set_up_grid()
2506         nm = self.g.clients[0].nodemaker
2507         d = nm.create_mutable_file(MutableData("contents 1"),
2508                                     version=version)
2509         def _created(n):
2510             d = defer.succeed(None)
2511             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2512             def _got_smap1(smap):
2513                 # stash the old state of the file
2514                 self.old_map = smap
2515             d.addCallback(_got_smap1)
2516             # then modify the file, leaving the old map untouched
2517             d.addCallback(lambda res: log.msg("starting winning write"))
2518             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2519             # now attempt to modify the file with the old servermap. This
2520             # will look just like an uncoordinated write, in which every
2521             # single share got updated between our mapupdate and our publish
2522             d.addCallback(lambda res: log.msg("starting doomed write"))
2523             d.addCallback(lambda res:
2524                           self.shouldFail(UncoordinatedWriteError,
2525                                           "test_publish_surprise", None,
2526                                           n.upload,
2527                                           MutableData("contents 2a"), self.old_map))
2528             return d
2529         d.addCallback(_created)
2530         return d
2531
2532     def test_publish_surprise(self):
2533         return self.do_publish_surprise(SDMF_VERSION)
2534
2535     def test_retrieve_surprise(self):
2536         self.basedir = "mutable/Problems/test_retrieve_surprise"
2537         self.set_up_grid()
2538         nm = self.g.clients[0].nodemaker
2539         d = nm.create_mutable_file(MutableData("contents 1"))
2540         def _created(n):
2541             d = defer.succeed(None)
2542             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2543             def _got_smap1(smap):
2544                 # stash the old state of the file
2545                 self.old_map = smap
2546             d.addCallback(_got_smap1)
2547             # then modify the file, leaving the old map untouched
2548             d.addCallback(lambda res: log.msg("starting winning write"))
2549             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2550             # now attempt to retrieve the old version with the old servermap.
2551             # This will look like someone has changed the file since we
2552             # updated the servermap.
2553             d.addCallback(lambda res: n._cache._clear())
2554             d.addCallback(lambda res: log.msg("starting doomed read"))
2555             d.addCallback(lambda res:
2556                           self.shouldFail(NotEnoughSharesError,
2557                                           "test_retrieve_surprise",
2558                                           "ran out of peers: have 0 of 1",
2559                                           n.download_version,
2560                                           self.old_map,
2561                                           self.old_map.best_recoverable_version(),
2562                                           ))
2563             return d
2564         d.addCallback(_created)
2565         return d
2566
2567
2568     def test_unexpected_shares(self):
2569         # upload the file, take a servermap, shut down one of the servers,
2570         # upload it again (causing shares to appear on a new server), then
2571         # upload using the old servermap. The last upload should fail with an
2572         # UncoordinatedWriteError, because of the shares that didn't appear
2573         # in the servermap.
2574         self.basedir = "mutable/Problems/test_unexpected_shares"
2575         self.set_up_grid()
2576         nm = self.g.clients[0].nodemaker
2577         d = nm.create_mutable_file(MutableData("contents 1"))
2578         def _created(n):
2579             d = defer.succeed(None)
2580             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2581             def _got_smap1(smap):
2582                 # stash the old state of the file
2583                 self.old_map = smap
2584                 # now shut down one of the servers
2585                 peer0 = list(smap.make_sharemap()[0])[0]
2586                 self.g.remove_server(peer0)
2587                 # then modify the file, leaving the old map untouched
2588                 log.msg("starting winning write")
2589                 return n.overwrite(MutableData("contents 2"))
2590             d.addCallback(_got_smap1)
2591             # now attempt to modify the file with the old servermap. This
2592             # will look just like an uncoordinated write, in which every
2593             # single share got updated between our mapupdate and our publish
2594             d.addCallback(lambda res: log.msg("starting doomed write"))
2595             d.addCallback(lambda res:
2596                           self.shouldFail(UncoordinatedWriteError,
2597                                           "test_surprise", None,
2598                                           n.upload,
2599                                           MutableData("contents 2a"), self.old_map))
2600             return d
2601         d.addCallback(_created)
2602         return d
2603
2604     def test_bad_server(self):
2605         # Break one server, then create the file: the initial publish should
2606         # complete with an alternate server. Breaking a second server should
2607         # not prevent an update from succeeding either.
2608         self.basedir = "mutable/Problems/test_bad_server"
2609         self.set_up_grid()
2610         nm = self.g.clients[0].nodemaker
2611
2612         # to make sure that one of the initial peers is broken, we have to
2613         # get creative. We create an RSA key and compute its storage-index.
2614         # Then we make a KeyGenerator that always returns that one key, and
2615         # use it to create the mutable file. This will get easier when we can
2616         # use #467 static-server-selection to disable permutation and force
2617         # the choice of server for share[0].
2618
2619         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2620         def _got_key( (pubkey, privkey) ):
2621             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2622             pubkey_s = pubkey.serialize()
2623             privkey_s = privkey.serialize()
2624             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2625                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2626             self._storage_index = u.get_storage_index()
2627         d.addCallback(_got_key)
2628         def _break_peer0(res):
2629             si = self._storage_index
2630             servers = nm.storage_broker.get_servers_for_psi(si)
2631             self.g.break_server(servers[0].get_serverid())
2632             self.server1 = servers[1]
2633         d.addCallback(_break_peer0)
2634         # now "create" the file, using the pre-established key, and let the
2635         # initial publish finally happen
2636         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2637         # that ought to work
2638         def _got_node(n):
2639             d = n.download_best_version()
2640             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2641             # now break the second peer
2642             def _break_peer1(res):
2643                 self.g.break_server(self.server1.get_serverid())
2644             d.addCallback(_break_peer1)
2645             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2646             # that ought to work too
2647             d.addCallback(lambda res: n.download_best_version())
2648             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2649             def _explain_error(f):
2650                 print f
2651                 if f.check(NotEnoughServersError):
2652                     print "first_error:", f.value.first_error
2653                 return f
2654             d.addErrback(_explain_error)
2655             return d
2656         d.addCallback(_got_node)
2657         return d
2658
2659     def test_bad_server_overlap(self):
2660         # like test_bad_server, but with no extra unused servers to fall back
2661         # upon. This means that we must re-use a server which we've already
2662         # used. If we don't remember the fact that we sent them one share
2663         # already, we'll mistakenly think we're experiencing an
2664         # UncoordinatedWriteError.
2665
2666         # Break one server, then create the file: the initial publish should
2667         # complete with an alternate server. Breaking a second server should
2668         # not prevent an update from succeeding either.
2669         self.basedir = "mutable/Problems/test_bad_server_overlap"
2670         self.set_up_grid()
2671         nm = self.g.clients[0].nodemaker
2672         sb = nm.storage_broker
2673
2674         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2675         self.g.break_server(peerids[0])
2676
2677         d = nm.create_mutable_file(MutableData("contents 1"))
2678         def _created(n):
2679             d = n.download_best_version()
2680             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2681             # now break one of the remaining servers
2682             def _break_second_server(res):
2683                 self.g.break_server(peerids[1])
2684             d.addCallback(_break_second_server)
2685             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2686             # that ought to work too
2687             d.addCallback(lambda res: n.download_best_version())
2688             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2689             return d
2690         d.addCallback(_created)
2691         return d
2692
2693     def test_publish_all_servers_bad(self):
2694         # Break all servers: the publish should fail
2695         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2696         self.set_up_grid()
2697         nm = self.g.clients[0].nodemaker
2698         for s in nm.storage_broker.get_connected_servers():
2699             s.get_rref().broken = True
2700
2701         d = self.shouldFail(NotEnoughServersError,
2702                             "test_publish_all_servers_bad",
2703                             "ran out of good servers",
2704                             nm.create_mutable_file, MutableData("contents"))
2705         return d
2706
2707     def test_publish_no_servers(self):
2708         # no servers at all: the publish should fail
2709         self.basedir = "mutable/Problems/test_publish_no_servers"
2710         self.set_up_grid(num_servers=0)
2711         nm = self.g.clients[0].nodemaker
2712
2713         d = self.shouldFail(NotEnoughServersError,
2714                             "test_publish_no_servers",
2715                             "Ran out of non-bad servers",
2716                             nm.create_mutable_file, MutableData("contents"))
2717         return d
2718
2719
2720     def test_privkey_query_error(self):
2721         # when a servermap is updated with MODE_WRITE, it tries to get the
2722         # privkey. Something might go wrong during this query attempt.
2723         # Exercise the code in _privkey_query_failed which tries to handle
2724         # such an error.
2725         self.basedir = "mutable/Problems/test_privkey_query_error"
2726         self.set_up_grid(num_servers=20)
2727         nm = self.g.clients[0].nodemaker
2728         nm._node_cache = DevNullDictionary() # disable the nodecache
2729
2730         # we need some contents that are large enough to push the privkey out
2731         # of the early part of the file
2732         LARGE = "These are Larger contents" * 2000 # about 50KB
2733         LARGE_uploadable = MutableData(LARGE)
2734         d = nm.create_mutable_file(LARGE_uploadable)
2735         def _created(n):
2736             self.uri = n.get_uri()
2737             self.n2 = nm.create_from_cap(self.uri)
2738
2739             # When a mapupdate is performed on a node that doesn't yet know
2740             # the privkey, a short read is sent to a batch of servers, to get
2741             # the verinfo and (hopefully, if the file is short enough) the
2742             # encprivkey. Our file is too large to let this first read
2743             # contain the encprivkey. Each non-encprivkey-bearing response
2744             # that arrives (until the node gets the encprivkey) will trigger
2745             # a second read to specifically read the encprivkey.
2746             #
2747             # So, to exercise this case:
2748             #  1. notice which server gets a read() call first
2749             #  2. tell that server to start throwing errors
2750             killer = FirstServerGetsKilled()
2751             for s in nm.storage_broker.get_connected_servers():
2752                 s.get_rref().post_call_notifier = killer.notify
2753         d.addCallback(_created)
2754
2755         # now we update a servermap from a new node (which doesn't have the
2756         # privkey yet, forcing it to use a separate privkey query). Note that
2757         # the map-update will succeed, since we'll just get a copy from one
2758         # of the other shares.
2759         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2760
2761         return d
2762
2763     def test_privkey_query_missing(self):
2764         # like test_privkey_query_error, but the shares are deleted by the
2765         # second query, instead of raising an exception.
2766         self.basedir = "mutable/Problems/test_privkey_query_missing"
2767         self.set_up_grid(num_servers=20)
2768         nm = self.g.clients[0].nodemaker
2769         LARGE = "These are Larger contents" * 2000 # about 50KiB
2770         LARGE_uploadable = MutableData(LARGE)
2771         nm._node_cache = DevNullDictionary() # disable the nodecache
2772
2773         d = nm.create_mutable_file(LARGE_uploadable)
2774         def _created(n):
2775             self.uri = n.get_uri()
2776             self.n2 = nm.create_from_cap(self.uri)
2777             deleter = FirstServerGetsDeleted()
2778             for s in nm.storage_broker.get_connected_servers():
2779                 s.get_rref().post_call_notifier = deleter.notify
2780         d.addCallback(_created)
2781         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2782         return d
2783
2784
2785     def test_block_and_hash_query_error(self):
2786         # This tests for what happens when a query to a remote server
2787         # fails in either the hash validation step or the block getting
2788         # step (because of batching, this is the same actual query).
2789         # We need to have the storage server persist up until the point
2790         # that its prefix is validated, then suddenly die. This
2791         # exercises some exception handling code in Retrieve.
2792         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2793         self.set_up_grid(num_servers=20)
2794         nm = self.g.clients[0].nodemaker
2795         CONTENTS = "contents" * 2000
2796         CONTENTS_uploadable = MutableData(CONTENTS)
2797         d = nm.create_mutable_file(CONTENTS_uploadable)
2798         def _created(node):
2799             self._node = node
2800         d.addCallback(_created)
2801         d.addCallback(lambda ignored:
2802             self._node.get_servermap(MODE_READ))
2803         def _then(servermap):
2804             # we have our servermap. Now we set up the servers like the
2805             # tests above -- the first one that gets a read call should
2806             # start throwing errors, but only after returning its prefix
2807             # for validation. Since we'll download without fetching the
2808             # private key, the next query to the remote server will be
2809             # for either a block and salt or for hashes, either of which
2810             # will exercise the error handling code.
2811             killer = FirstServerGetsKilled()
2812             for s in nm.storage_broker.get_connected_servers():
2813                 s.get_rref().post_call_notifier = killer.notify
2814             ver = servermap.best_recoverable_version()
2815             assert ver
2816             return self._node.download_version(servermap, ver)
2817         d.addCallback(_then)
2818         d.addCallback(lambda data:
2819             self.failUnlessEqual(data, CONTENTS))
2820         return d
2821
2822
2823 class FileHandle(unittest.TestCase):
2824     def setUp(self):
2825         self.test_data = "Test Data" * 50000
2826         self.sio = StringIO(self.test_data)
2827         self.uploadable = MutableFileHandle(self.sio)
2828
2829
2830     def test_filehandle_read(self):
2831         self.basedir = "mutable/FileHandle/test_filehandle_read"
2832         chunk_size = 10
2833         for i in xrange(0, len(self.test_data), chunk_size):
2834             data = self.uploadable.read(chunk_size)
2835             data = "".join(data)
2836             start = i
2837             end = i + chunk_size
2838             self.failUnlessEqual(data, self.test_data[start:end])
2839
2840
2841     def test_filehandle_get_size(self):
2842         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2843         actual_size = len(self.test_data)
2844         size = self.uploadable.get_size()
2845         self.failUnlessEqual(size, actual_size)
2846
2847
2848     def test_filehandle_get_size_out_of_order(self):
2849         # We should be able to call get_size whenever we want without
2850         # disturbing the location of the seek pointer.
2851         chunk_size = 100
2852         data = self.uploadable.read(chunk_size)
2853         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2854
2855         # Now get the size.
2856         size = self.uploadable.get_size()
2857         self.failUnlessEqual(size, len(self.test_data))
2858
2859         # Now get more data. We should be right where we left off.
2860         more_data = self.uploadable.read(chunk_size)
2861         start = chunk_size
2862         end = chunk_size * 2
2863         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2864
2865
2866     def test_filehandle_file(self):
2867         # Make sure that the MutableFileHandle works on a file as well
2868         # as a StringIO object, since in some cases it will be asked to
2869         # deal with files.
2870         self.basedir = self.mktemp()
2871         # necessary? What am I doing wrong here?
2872         os.mkdir(self.basedir)
2873         f_path = os.path.join(self.basedir, "test_file")
2874         f = open(f_path, "w")
2875         f.write(self.test_data)
2876         f.close()
2877         f = open(f_path, "r")
2878
2879         uploadable = MutableFileHandle(f)
2880
2881         data = uploadable.read(len(self.test_data))
2882         self.failUnlessEqual("".join(data), self.test_data)
2883         size = uploadable.get_size()
2884         self.failUnlessEqual(size, len(self.test_data))
2885
2886
2887     def test_close(self):
2888         # Make sure that the MutableFileHandle closes its handle when
2889         # told to do so.
2890         self.uploadable.close()
2891         self.failUnless(self.sio.closed)
2892
2893
2894 class DataHandle(unittest.TestCase):
2895     def setUp(self):
2896         self.test_data = "Test Data" * 50000
2897         self.uploadable = MutableData(self.test_data)
2898
2899
2900     def test_datahandle_read(self):
2901         chunk_size = 10
2902         for i in xrange(0, len(self.test_data), chunk_size):
2903             data = self.uploadable.read(chunk_size)
2904             data = "".join(data)
2905             start = i
2906             end = i + chunk_size
2907             self.failUnlessEqual(data, self.test_data[start:end])
2908
2909
2910     def test_datahandle_get_size(self):
2911         actual_size = len(self.test_data)
2912         size = self.uploadable.get_size()
2913         self.failUnlessEqual(size, actual_size)
2914
2915
2916     def test_datahandle_get_size_out_of_order(self):
2917         # We should be able to call get_size whenever we want without
2918         # disturbing the location of the seek pointer.
2919         chunk_size = 100
2920         data = self.uploadable.read(chunk_size)
2921         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2922
2923         # Now get the size.
2924         size = self.uploadable.get_size()
2925         self.failUnlessEqual(size, len(self.test_data))
2926
2927         # Now get more data. We should be right where we left off.
2928         more_data = self.uploadable.read(chunk_size)
2929         start = chunk_size
2930         end = chunk_size * 2
2931         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2932
2933
2934 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2935               PublishMixin):
2936     def setUp(self):
2937         GridTestMixin.setUp(self)
2938         self.basedir = self.mktemp()
2939         self.set_up_grid()
2940         self.c = self.g.clients[0]
2941         self.nm = self.c.nodemaker
2942         self.data = "test data" * 100000 # about 900 KiB; MDMF
2943         self.small_data = "test data" * 10 # about 90 B; SDMF
2944
2945
2946     def do_upload_mdmf(self):
2947         d = self.nm.create_mutable_file(MutableData(self.data),
2948                                         version=MDMF_VERSION)
2949         def _then(n):
2950             assert isinstance(n, MutableFileNode)
2951             assert n._protocol_version == MDMF_VERSION
2952             self.mdmf_node = n
2953             return n
2954         d.addCallback(_then)
2955         return d
2956
2957     def do_upload_sdmf(self):
2958         d = self.nm.create_mutable_file(MutableData(self.small_data))
2959         def _then(n):
2960             assert isinstance(n, MutableFileNode)
2961             assert n._protocol_version == SDMF_VERSION
2962             self.sdmf_node = n
2963             return n
2964         d.addCallback(_then)
2965         return d
2966
2967     def do_upload_empty_sdmf(self):
2968         d = self.nm.create_mutable_file(MutableData(""))
2969         def _then(n):
2970             assert isinstance(n, MutableFileNode)
2971             self.sdmf_zero_length_node = n
2972             assert n._protocol_version == SDMF_VERSION
2973             return n
2974         d.addCallback(_then)
2975         return d
2976
2977     def do_upload(self):
2978         d = self.do_upload_mdmf()
2979         d.addCallback(lambda ign: self.do_upload_sdmf())
2980         return d
2981
2982     def test_debug(self):
2983         d = self.do_upload_mdmf()
2984         def _debug(n):
2985             fso = debug.FindSharesOptions()
2986             storage_index = base32.b2a(n.get_storage_index())
2987             fso.si_s = storage_index
2988             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2989                             for (i,ss,storedir)
2990                             in self.iterate_servers()]
2991             fso.stdout = StringIO()
2992             fso.stderr = StringIO()
2993             debug.find_shares(fso)
2994             sharefiles = fso.stdout.getvalue().splitlines()
2995             expected = self.nm.default_encoding_parameters["n"]
2996             self.failUnlessEqual(len(sharefiles), expected)
2997
2998             do = debug.DumpOptions()
2999             do["filename"] = sharefiles[0]
3000             do.stdout = StringIO()
3001             debug.dump_share(do)
3002             output = do.stdout.getvalue()
3003             lines = set(output.splitlines())
3004             self.failUnless("Mutable slot found:" in lines, output)
3005             self.failUnless(" share_type: MDMF" in lines, output)
3006             self.failUnless(" num_extra_leases: 0" in lines, output)
3007             self.failUnless(" MDMF contents:" in lines, output)
3008             self.failUnless("  seqnum: 1" in lines, output)
3009             self.failUnless("  required_shares: 3" in lines, output)
3010             self.failUnless("  total_shares: 10" in lines, output)
3011             self.failUnless("  segsize: 131073" in lines, output)
3012             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
3013             vcap = n.get_verify_cap().to_string()
3014             self.failUnless("  verify-cap: %s" % vcap in lines, output)
3015
3016             cso = debug.CatalogSharesOptions()
3017             cso.nodedirs = fso.nodedirs
3018             cso.stdout = StringIO()
3019             cso.stderr = StringIO()
3020             debug.catalog_shares(cso)
3021             shares = cso.stdout.getvalue().splitlines()
3022             oneshare = shares[0] # all shares should be MDMF
3023             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3024             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3025             fields = oneshare.split()
3026             self.failUnlessEqual(fields[0], "MDMF")
3027             self.failUnlessEqual(fields[1], storage_index)
3028             self.failUnlessEqual(fields[2], "3/10")
3029             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3030             self.failUnless(fields[4].startswith("#1:"), fields[3])
3031             # the rest of fields[4] is the roothash, which depends upon
3032             # encryption salts and is not constant. fields[5] is the
3033             # remaining time on the longest lease, which is timing dependent.
3034             # The rest of the line is the quoted pathname to the share.
3035         d.addCallback(_debug)
3036         return d
3037
3038     def test_get_sequence_number(self):
3039         d = self.do_upload()
3040         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3041         d.addCallback(lambda bv:
3042             self.failUnlessEqual(bv.get_sequence_number(), 1))
3043         d.addCallback(lambda ignored:
3044             self.sdmf_node.get_best_readable_version())
3045         d.addCallback(lambda bv:
3046             self.failUnlessEqual(bv.get_sequence_number(), 1))
3047         # Now update. The sequence number in both cases should be 1 in
3048         # both cases.
3049         def _do_update(ignored):
3050             new_data = MutableData("foo bar baz" * 100000)
3051             new_small_data = MutableData("foo bar baz" * 10)
3052             d1 = self.mdmf_node.overwrite(new_data)
3053             d2 = self.sdmf_node.overwrite(new_small_data)
3054             dl = gatherResults([d1, d2])
3055             return dl
3056         d.addCallback(_do_update)
3057         d.addCallback(lambda ignored:
3058             self.mdmf_node.get_best_readable_version())
3059         d.addCallback(lambda bv:
3060             self.failUnlessEqual(bv.get_sequence_number(), 2))
3061         d.addCallback(lambda ignored:
3062             self.sdmf_node.get_best_readable_version())
3063         d.addCallback(lambda bv:
3064             self.failUnlessEqual(bv.get_sequence_number(), 2))
3065         return d
3066
3067
3068     def test_version_extension_api(self):
3069         # We need to define an API by which an uploader can set the
3070         # extension parameters, and by which a downloader can retrieve
3071         # extensions.
3072         d = self.do_upload_mdmf()
3073         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3074         def _got_version(version):
3075             hints = version.get_downloader_hints()
3076             # Should be empty at this point.
3077             self.failUnlessIn("k", hints)
3078             self.failUnlessEqual(hints['k'], 3)
3079             self.failUnlessIn('segsize', hints)
3080             self.failUnlessEqual(hints['segsize'], 131073)
3081         d.addCallback(_got_version)
3082         return d
3083
3084
3085     def test_extensions_from_cap(self):
3086         # If we initialize a mutable file with a cap that has extension
3087         # parameters in it and then grab the extension parameters using
3088         # our API, we should see that they're set correctly.
3089         d = self.do_upload_mdmf()
3090         def _then(ign):
3091             mdmf_uri = self.mdmf_node.get_uri()
3092             new_node = self.nm.create_from_cap(mdmf_uri)
3093             return new_node.get_best_mutable_version()
3094         d.addCallback(_then)
3095         def _got_version(version):
3096             hints = version.get_downloader_hints()
3097             self.failUnlessIn("k", hints)
3098             self.failUnlessEqual(hints["k"], 3)
3099             self.failUnlessIn("segsize", hints)
3100             self.failUnlessEqual(hints["segsize"], 131073)
3101         d.addCallback(_got_version)
3102         return d
3103
3104
3105     def test_extensions_from_upload(self):
3106         # If we create a new mutable file with some contents, we should
3107         # get back an MDMF cap with the right hints in place.
3108         contents = "foo bar baz" * 100000
3109         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3110         def _got_mutable_file(n):
3111             rw_uri = n.get_uri()
3112             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3113             self.failUnlessIn(expected_k, rw_uri)
3114             # XXX: Get this more intelligently.
3115             self.failUnlessIn("131073", rw_uri)
3116
3117             ro_uri = n.get_readonly_uri()
3118             self.failUnlessIn(expected_k, ro_uri)
3119             self.failUnlessIn("131073", ro_uri)
3120         d.addCallback(_got_mutable_file)
3121         return d
3122
3123
3124     def test_cap_after_upload(self):
3125         # If we create a new mutable file and upload things to it, and
3126         # it's an MDMF file, we should get an MDMF cap back from that
3127         # file and should be able to use that.
3128         # That's essentially what MDMF node is, so just check that.
3129         d = self.do_upload_mdmf()
3130         def _then(ign):
3131             mdmf_uri = self.mdmf_node.get_uri()
3132             cap = uri.from_string(mdmf_uri)
3133             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3134             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3135             cap = uri.from_string(readonly_mdmf_uri)
3136             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3137         d.addCallback(_then)
3138         return d
3139
3140     def test_mutable_version(self):
3141         # assert that getting parameters from the IMutableVersion object
3142         # gives us the same data as getting them from the filenode itself
3143         d = self.do_upload()
3144         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3145         def _check_mdmf(bv):
3146             n = self.mdmf_node
3147             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3148             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3149             self.failIf(bv.is_readonly())
3150         d.addCallback(_check_mdmf)
3151         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3152         def _check_sdmf(bv):
3153             n = self.sdmf_node
3154             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3155             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3156             self.failIf(bv.is_readonly())
3157         d.addCallback(_check_sdmf)
3158         return d
3159
3160
3161     def test_get_readonly_version(self):
3162         d = self.do_upload()
3163         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3164         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3165
3166         # Attempting to get a mutable version of a mutable file from a
3167         # filenode initialized with a readcap should return a readonly
3168         # version of that same node.
3169         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3170         d.addCallback(lambda ro: ro.get_best_mutable_version())
3171         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3172
3173         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3174         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3175
3176         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3177         d.addCallback(lambda ro: ro.get_best_mutable_version())
3178         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3179         return d
3180
3181
3182     def test_toplevel_overwrite(self):
3183         new_data = MutableData("foo bar baz" * 100000)
3184         new_small_data = MutableData("foo bar baz" * 10)
3185         d = self.do_upload()
3186         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3187         d.addCallback(lambda ignored:
3188             self.mdmf_node.download_best_version())
3189         d.addCallback(lambda data:
3190             self.failUnlessEqual(data, "foo bar baz" * 100000))
3191         d.addCallback(lambda ignored:
3192             self.sdmf_node.overwrite(new_small_data))
3193         d.addCallback(lambda ignored:
3194             self.sdmf_node.download_best_version())
3195         d.addCallback(lambda data:
3196             self.failUnlessEqual(data, "foo bar baz" * 10))
3197         return d
3198
3199
3200     def test_toplevel_modify(self):
3201         d = self.do_upload()
3202         def modifier(old_contents, servermap, first_time):
3203             return old_contents + "modified"
3204         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3205         d.addCallback(lambda ignored:
3206             self.mdmf_node.download_best_version())
3207         d.addCallback(lambda data:
3208             self.failUnlessIn("modified", data))
3209         d.addCallback(lambda ignored:
3210             self.sdmf_node.modify(modifier))
3211         d.addCallback(lambda ignored:
3212             self.sdmf_node.download_best_version())
3213         d.addCallback(lambda data:
3214             self.failUnlessIn("modified", data))
3215         return d
3216
3217
3218     def test_version_modify(self):
3219         # TODO: When we can publish multiple versions, alter this test
3220         # to modify a version other than the best usable version, then
3221         # test to see that the best recoverable version is that.
3222         d = self.do_upload()
3223         def modifier(old_contents, servermap, first_time):
3224             return old_contents + "modified"
3225         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3226         d.addCallback(lambda ignored:
3227             self.mdmf_node.download_best_version())
3228         d.addCallback(lambda data:
3229             self.failUnlessIn("modified", data))
3230         d.addCallback(lambda ignored:
3231             self.sdmf_node.modify(modifier))
3232         d.addCallback(lambda ignored:
3233             self.sdmf_node.download_best_version())
3234         d.addCallback(lambda data:
3235             self.failUnlessIn("modified", data))
3236         return d
3237
3238
3239     def test_download_version(self):
3240         d = self.publish_multiple()
3241         # We want to have two recoverable versions on the grid.
3242         d.addCallback(lambda res:
3243                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3244                                           1:1,3:1,5:1,7:1,9:1}))
3245         # Now try to download each version. We should get the plaintext
3246         # associated with that version.
3247         d.addCallback(lambda ignored:
3248             self._fn.get_servermap(mode=MODE_READ))
3249         def _got_servermap(smap):
3250             versions = smap.recoverable_versions()
3251             assert len(versions) == 2
3252
3253             self.servermap = smap
3254             self.version1, self.version2 = versions
3255             assert self.version1 != self.version2
3256
3257             self.version1_seqnum = self.version1[0]
3258             self.version2_seqnum = self.version2[0]
3259             self.version1_index = self.version1_seqnum - 1
3260             self.version2_index = self.version2_seqnum - 1
3261
3262         d.addCallback(_got_servermap)
3263         d.addCallback(lambda ignored:
3264             self._fn.download_version(self.servermap, self.version1))
3265         d.addCallback(lambda results:
3266             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3267                                  results))
3268         d.addCallback(lambda ignored:
3269             self._fn.download_version(self.servermap, self.version2))
3270         d.addCallback(lambda results:
3271             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3272                                  results))
3273         return d
3274
3275
3276     def test_download_nonexistent_version(self):
3277         d = self.do_upload_mdmf()
3278         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3279         def _set_servermap(servermap):
3280             self.servermap = servermap
3281         d.addCallback(_set_servermap)
3282         d.addCallback(lambda ignored:
3283            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3284                            None,
3285                            self.mdmf_node.download_version, self.servermap,
3286                            "not a version"))
3287         return d
3288
3289
3290     def test_partial_read(self):
3291         d = self.do_upload_mdmf()
3292         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3293         modes = [("start_on_segment_boundary",
3294                   mathutil.next_multiple(128 * 1024, 3), 50),
3295                  ("ending_one_byte_after_segment_boundary",
3296                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3297                  ("zero_length_at_start", 0, 0),
3298                  ("zero_length_in_middle", 50, 0),
3299                  ("zero_length_at_segment_boundary",
3300                   mathutil.next_multiple(128 * 1024, 3), 0),
3301                  ]
3302         for (name, offset, length) in modes:
3303             d.addCallback(self._do_partial_read, name, offset, length)
3304         # then read only a few bytes at a time, and see that the results are
3305         # what we expect.
3306         def _read_data(version):
3307             c = consumer.MemoryConsumer()
3308             d2 = defer.succeed(None)
3309             for i in xrange(0, len(self.data), 10000):
3310                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3311             d2.addCallback(lambda ignored:
3312                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3313             return d2
3314         d.addCallback(_read_data)
3315         return d
3316     def _do_partial_read(self, version, name, offset, length):
3317         c = consumer.MemoryConsumer()
3318         d = version.read(c, offset, length)
3319         expected = self.data[offset:offset+length]
3320         d.addCallback(lambda ignored: "".join(c.chunks))
3321         def _check(results):
3322             if results != expected:
3323                 print
3324                 print "got: %s ... %s" % (results[:20], results[-20:])
3325                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3326                 self.fail("results[%s] != expected" % name)
3327             return version # daisy-chained to next call
3328         d.addCallback(_check)
3329         return d
3330
3331
3332     def _test_read_and_download(self, node, expected):
3333         d = node.get_best_readable_version()
3334         def _read_data(version):
3335             c = consumer.MemoryConsumer()
3336             d2 = defer.succeed(None)
3337             d2.addCallback(lambda ignored: version.read(c))
3338             d2.addCallback(lambda ignored:
3339                 self.failUnlessEqual(expected, "".join(c.chunks)))
3340             return d2
3341         d.addCallback(_read_data)
3342         d.addCallback(lambda ignored: node.download_best_version())
3343         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3344         return d
3345
3346     def test_read_and_download_mdmf(self):
3347         d = self.do_upload_mdmf()
3348         d.addCallback(self._test_read_and_download, self.data)
3349         return d
3350
3351     def test_read_and_download_sdmf(self):
3352         d = self.do_upload_sdmf()
3353         d.addCallback(self._test_read_and_download, self.small_data)
3354         return d
3355
3356     def test_read_and_download_sdmf_zero_length(self):
3357         d = self.do_upload_empty_sdmf()
3358         d.addCallback(self._test_read_and_download, "")
3359         return d
3360
3361
3362 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3363     timeout = 400 # these tests are too big, 120s is not enough on slow
3364                   # platforms
3365     def setUp(self):
3366         GridTestMixin.setUp(self)
3367         self.basedir = self.mktemp()
3368         self.set_up_grid()
3369         self.c = self.g.clients[0]
3370         self.nm = self.c.nodemaker
3371         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3372         self.small_data = "test data" * 10 # about 90 B; SDMF
3373
3374
3375     def do_upload_sdmf(self):
3376         d = self.nm.create_mutable_file(MutableData(self.small_data))
3377         def _then(n):
3378             assert isinstance(n, MutableFileNode)
3379             self.sdmf_node = n
3380             # Make SDMF node that has 255 shares.
3381             self.nm.default_encoding_parameters['n'] = 255
3382             self.nm.default_encoding_parameters['k'] = 127
3383             return self.nm.create_mutable_file(MutableData(self.small_data))
3384         d.addCallback(_then)
3385         def _then2(n):
3386             assert isinstance(n, MutableFileNode)
3387             self.sdmf_max_shares_node = n
3388         d.addCallback(_then2)
3389         return d
3390
3391     def do_upload_mdmf(self):
3392         d = self.nm.create_mutable_file(MutableData(self.data),
3393                                         version=MDMF_VERSION)
3394         def _then(n):
3395             assert isinstance(n, MutableFileNode)
3396             self.mdmf_node = n
3397             # Make MDMF node that has 255 shares.
3398             self.nm.default_encoding_parameters['n'] = 255
3399             self.nm.default_encoding_parameters['k'] = 127
3400             return self.nm.create_mutable_file(MutableData(self.data),
3401                                                version=MDMF_VERSION)
3402         d.addCallback(_then)
3403         def _then2(n):
3404             assert isinstance(n, MutableFileNode)
3405             self.mdmf_max_shares_node = n
3406         d.addCallback(_then2)
3407         return d
3408
3409     def _test_replace(self, offset, new_data):
3410         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3411         d0 = self.do_upload_mdmf()
3412         def _run(ign):
3413             d = defer.succeed(None)
3414             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3415                 d.addCallback(lambda ign: node.get_best_mutable_version())
3416                 d.addCallback(lambda mv:
3417                     mv.update(MutableData(new_data), offset))
3418                 # close around node.
3419                 d.addCallback(lambda ignored, node=node:
3420                     node.download_best_version())
3421                 def _check(results):
3422                     if results != expected:
3423                         print
3424                         print "got: %s ... %s" % (results[:20], results[-20:])
3425                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3426                         self.fail("results != expected")
3427                 d.addCallback(_check)
3428             return d
3429         d0.addCallback(_run)
3430         return d0
3431
3432     def test_append(self):
3433         # We should be able to append data to a mutable file and get
3434         # what we expect.
3435         return self._test_replace(len(self.data), "appended")
3436
3437     def test_replace_middle(self):
3438         # We should be able to replace data in the middle of a mutable
3439         # file and get what we expect back.
3440         return self._test_replace(100, "replaced")
3441
3442     def test_replace_beginning(self):
3443         # We should be able to replace data at the beginning of the file
3444         # without truncating the file
3445         return self._test_replace(0, "beginning")
3446
3447     def test_replace_segstart1(self):
3448         return self._test_replace(128*1024+1, "NNNN")
3449
3450     def test_replace_zero_length_beginning(self):
3451         return self._test_replace(0, "")
3452
3453     def test_replace_zero_length_middle(self):
3454         return self._test_replace(50, "")
3455
3456     def test_replace_zero_length_segstart1(self):
3457         return self._test_replace(128*1024+1, "")
3458
3459     def test_replace_and_extend(self):
3460         # We should be able to replace data in the middle of a mutable
3461         # file and extend that mutable file and get what we expect.
3462         return self._test_replace(100, "modified " * 100000)
3463
3464
3465     def _check_differences(self, got, expected):
3466         # displaying arbitrary file corruption is tricky for a
3467         # 1MB file of repeating data,, so look for likely places
3468         # with problems and display them separately
3469         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3470         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3471         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3472                     for (start,end) in gotmods]
3473         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3474                     for (start,end) in expmods]
3475         #print "expecting: %s" % expspans
3476
3477         SEGSIZE = 128*1024
3478         if got != expected:
3479             print "differences:"
3480             for segnum in range(len(expected)//SEGSIZE):
3481                 start = segnum * SEGSIZE
3482                 end = (segnum+1) * SEGSIZE
3483                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3484                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3485                 if got_ends != exp_ends:
3486                     print "expected[%d]: %s" % (start, exp_ends)
3487                     print "got     [%d]: %s" % (start, got_ends)
3488             if expspans != gotspans:
3489                 print "expected: %s" % expspans
3490                 print "got     : %s" % gotspans
3491             open("EXPECTED","wb").write(expected)
3492             open("GOT","wb").write(got)
3493             print "wrote data to EXPECTED and GOT"
3494             self.fail("didn't get expected data")
3495
3496
3497     def test_replace_locations(self):
3498         # exercise fencepost conditions
3499         SEGSIZE = 128*1024
3500         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3501         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3502         d0 = self.do_upload_mdmf()
3503         def _run(ign):
3504             expected = self.data
3505             d = defer.succeed(None)
3506             for offset in suspects:
3507                 new_data = letters.next()*2 # "AA", then "BB", etc
3508                 expected = expected[:offset]+new_data+expected[offset+2:]
3509                 d.addCallback(lambda ign:
3510                               self.mdmf_node.get_best_mutable_version())
3511                 def _modify(mv, offset=offset, new_data=new_data):
3512                     # close over 'offset','new_data'
3513                     md = MutableData(new_data)
3514                     return mv.update(md, offset)
3515                 d.addCallback(_modify)
3516                 d.addCallback(lambda ignored:
3517                               self.mdmf_node.download_best_version())
3518                 d.addCallback(self._check_differences, expected)
3519             return d
3520         d0.addCallback(_run)
3521         return d0
3522
3523     def test_replace_locations_max_shares(self):
3524         # exercise fencepost conditions
3525         SEGSIZE = 128*1024
3526         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3527         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3528         d0 = self.do_upload_mdmf()
3529         def _run(ign):
3530             expected = self.data
3531             d = defer.succeed(None)
3532             for offset in suspects:
3533                 new_data = letters.next()*2 # "AA", then "BB", etc
3534                 expected = expected[:offset]+new_data+expected[offset+2:]
3535                 d.addCallback(lambda ign:
3536                               self.mdmf_max_shares_node.get_best_mutable_version())
3537                 def _modify(mv, offset=offset, new_data=new_data):
3538                     # close over 'offset','new_data'
3539                     md = MutableData(new_data)
3540                     return mv.update(md, offset)
3541                 d.addCallback(_modify)
3542                 d.addCallback(lambda ignored:
3543                               self.mdmf_max_shares_node.download_best_version())
3544                 d.addCallback(self._check_differences, expected)
3545             return d
3546         d0.addCallback(_run)
3547         return d0
3548
3549
3550     def test_append_power_of_two(self):
3551         # If we attempt to extend a mutable file so that its segment
3552         # count crosses a power-of-two boundary, the update operation
3553         # should know how to reencode the file.
3554
3555         # Note that the data populating self.mdmf_node is about 900 KiB
3556         # long -- this is 7 segments in the default segment size. So we
3557         # need to add 2 segments worth of data to push it over a
3558         # power-of-two boundary.
3559         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3560         new_data = self.data + (segment * 2)
3561         d0 = self.do_upload_mdmf()
3562         def _run(ign):
3563             d = defer.succeed(None)
3564             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3565                 d.addCallback(lambda ign: node.get_best_mutable_version())
3566                 d.addCallback(lambda mv:
3567                     mv.update(MutableData(segment * 2), len(self.data)))
3568                 d.addCallback(lambda ignored, node=node:
3569                     node.download_best_version())
3570                 d.addCallback(lambda results:
3571                     self.failUnlessEqual(results, new_data))
3572             return d
3573         d0.addCallback(_run)
3574         return d0
3575
3576     def test_update_sdmf(self):
3577         # Running update on a single-segment file should still work.
3578         new_data = self.small_data + "appended"
3579         d0 = self.do_upload_sdmf()
3580         def _run(ign):
3581             d = defer.succeed(None)
3582             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3583                 d.addCallback(lambda ign: node.get_best_mutable_version())
3584                 d.addCallback(lambda mv:
3585                     mv.update(MutableData("appended"), len(self.small_data)))
3586                 d.addCallback(lambda ignored, node=node:
3587                     node.download_best_version())
3588                 d.addCallback(lambda results:
3589                     self.failUnlessEqual(results, new_data))
3590             return d
3591         d0.addCallback(_run)
3592         return d0
3593
3594     def test_replace_in_last_segment(self):
3595         # The wrapper should know how to handle the tail segment
3596         # appropriately.
3597         replace_offset = len(self.data) - 100
3598         new_data = self.data[:replace_offset] + "replaced"
3599         rest_offset = replace_offset + len("replaced")
3600         new_data += self.data[rest_offset:]
3601         d0 = self.do_upload_mdmf()
3602         def _run(ign):
3603             d = defer.succeed(None)
3604             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3605                 d.addCallback(lambda ign: node.get_best_mutable_version())
3606                 d.addCallback(lambda mv:
3607                     mv.update(MutableData("replaced"), replace_offset))
3608                 d.addCallback(lambda ignored, node=node:
3609                     node.download_best_version())
3610                 d.addCallback(lambda results:
3611                     self.failUnlessEqual(results, new_data))
3612             return d
3613         d0.addCallback(_run)
3614         return d0
3615
3616     def test_multiple_segment_replace(self):
3617         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3618         new_data = self.data[:replace_offset]
3619         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3620         new_data += 2 * new_segment
3621         new_data += "replaced"
3622         rest_offset = len(new_data)
3623         new_data += self.data[rest_offset:]
3624         d0 = self.do_upload_mdmf()
3625         def _run(ign):
3626             d = defer.succeed(None)
3627             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3628                 d.addCallback(lambda ign: node.get_best_mutable_version())
3629                 d.addCallback(lambda mv:
3630                     mv.update(MutableData((2 * new_segment) + "replaced"),
3631                               replace_offset))
3632                 d.addCallback(lambda ignored, node=node:
3633                     node.download_best_version())
3634                 d.addCallback(lambda results:
3635                     self.failUnlessEqual(results, new_data))
3636             return d
3637         d0.addCallback(_run)
3638         return d0
3639
3640 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3641     sdmf_old_shares = {}
3642     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3643     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3644     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3645     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3646     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3647     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3648     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3649     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3650     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3651     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3652     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3653     sdmf_old_contents = "This is a test file.\n"
3654     def copy_sdmf_shares(self):
3655         # We'll basically be short-circuiting the upload process.
3656         servernums = self.g.servers_by_number.keys()
3657         assert len(servernums) == 10
3658
3659         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3660         # Get the storage index.
3661         cap = uri.from_string(self.sdmf_old_cap)
3662         si = cap.get_storage_index()
3663
3664         # Now execute each assignment by writing the storage.
3665         for (share, servernum) in assignments:
3666             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3667             storedir = self.get_serverdir(servernum)
3668             storage_path = os.path.join(storedir, "shares",
3669                                         storage_index_to_dir(si))
3670             fileutil.make_dirs(storage_path)
3671             fileutil.write(os.path.join(storage_path, "%d" % share),
3672                            sharedata)
3673         # ...and verify that the shares are there.
3674         shares = self.find_uri_shares(self.sdmf_old_cap)
3675         assert len(shares) == 10
3676
3677     def test_new_downloader_can_read_old_shares(self):
3678         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3679         self.set_up_grid()
3680         self.copy_sdmf_shares()
3681         nm = self.g.clients[0].nodemaker
3682         n = nm.create_from_cap(self.sdmf_old_cap)
3683         d = n.download_best_version()
3684         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3685         return d
3686
3687 class DifferentEncoding(unittest.TestCase):
3688     def setUp(self):
3689         self._storage = s = FakeStorage()
3690         self.nodemaker = make_nodemaker(s)
3691
3692     def test_filenode(self):
3693         # create a file with 3-of-20, then modify it with a client configured
3694         # to do 3-of-10. #1510 tracks a failure here
3695         self.nodemaker.default_encoding_parameters["n"] = 20
3696         d = self.nodemaker.create_mutable_file("old contents")
3697         def _created(n):
3698             filecap = n.get_cap().to_string()
3699             del n # we want a new object, not the cached one
3700             self.nodemaker.default_encoding_parameters["n"] = 10
3701             n2 = self.nodemaker.create_from_cap(filecap)
3702             return n2
3703         d.addCallback(_created)
3704         def modifier(old_contents, servermap, first_time):
3705             return "new contents"
3706         d.addCallback(lambda n: n.modify(modifier))
3707         return d