]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
1dc87b984a1bafd5c20b40131599986fbcd779f0
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from allmydata import uri, client
7 from allmydata.nodemaker import NodeMaker
8 from allmydata.util import base32, consumer, fileutil, mathutil
9 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
10      ssk_pubkey_fingerprint_hash
11 from allmydata.util.consumer import MemoryConsumer
12 from allmydata.util.deferredutil import gatherResults
13 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
14      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import ShouldFailMixin
17 from allmydata.test.no_network import GridTestMixin
18 from foolscap.api import eventually, fireEventually
19 from foolscap.logging import log
20 from allmydata.storage_client import StorageFarmBroker
21 from allmydata.storage.common import storage_index_to_dir
22 from allmydata.scripts import debug
23
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28      NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31                                       MutableData, \
32                                       DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
36
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39 from allmydata.test.test_download import PausingConsumer, \
40      PausingAndStoppingConsumer, StoppingConsumer, \
41      ImmediatelyStoppingConsumer
42
43
44 # this "FakeStorage" exists to put the share data in RAM and avoid using real
45 # network connections, both to speed up the tests and to reduce the amount of
46 # non-mutable.py code being exercised.
47
48 class FakeStorage:
49     # this class replaces the collection of storage servers, allowing the
50     # tests to examine and manipulate the published shares. It also lets us
51     # control the order in which read queries are answered, to exercise more
52     # of the error-handling code in Retrieve .
53     #
54     # Note that we ignore the storage index: this FakeStorage instance can
55     # only be used for a single storage index.
56
57
58     def __init__(self):
59         self._peers = {}
60         # _sequence is used to cause the responses to occur in a specific
61         # order. If it is in use, then we will defer queries instead of
62         # answering them right away, accumulating the Deferreds in a dict. We
63         # don't know exactly how many queries we'll get, so exactly one
64         # second after the first query arrives, we will release them all (in
65         # order).
66         self._sequence = None
67         self._pending = {}
68         self._pending_timer = None
69
70     def read(self, peerid, storage_index):
71         shares = self._peers.get(peerid, {})
72         if self._sequence is None:
73             return defer.succeed(shares)
74         d = defer.Deferred()
75         if not self._pending:
76             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
77         if peerid not in self._pending:
78             self._pending[peerid] = []
79         self._pending[peerid].append( (d, shares) )
80         return d
81
82     def _fire_readers(self):
83         self._pending_timer = None
84         pending = self._pending
85         self._pending = {}
86         for peerid in self._sequence:
87             if peerid in pending:
88                 for (d, shares) in pending.pop(peerid):
89                     eventually(d.callback, shares)
90         for peerid in pending:
91             for (d, shares) in pending[peerid]:
92                 eventually(d.callback, shares)
93
94     def write(self, peerid, storage_index, shnum, offset, data):
95         if peerid not in self._peers:
96             self._peers[peerid] = {}
97         shares = self._peers[peerid]
98         f = StringIO()
99         f.write(shares.get(shnum, ""))
100         f.seek(offset)
101         f.write(data)
102         shares[shnum] = f.getvalue()
103
104
105 class FakeStorageServer:
106     def __init__(self, peerid, storage):
107         self.peerid = peerid
108         self.storage = storage
109         self.queries = 0
110     def callRemote(self, methname, *args, **kwargs):
111         self.queries += 1
112         def _call():
113             meth = getattr(self, methname)
114             return meth(*args, **kwargs)
115         d = fireEventually()
116         d.addCallback(lambda res: _call())
117         return d
118
119     def callRemoteOnly(self, methname, *args, **kwargs):
120         self.queries += 1
121         d = self.callRemote(methname, *args, **kwargs)
122         d.addBoth(lambda ignore: None)
123         pass
124
125     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
126         pass
127
128     def slot_readv(self, storage_index, shnums, readv):
129         d = self.storage.read(self.peerid, storage_index)
130         def _read(shares):
131             response = {}
132             for shnum in shares:
133                 if shnums and shnum not in shnums:
134                     continue
135                 vector = response[shnum] = []
136                 for (offset, length) in readv:
137                     assert isinstance(offset, (int, long)), offset
138                     assert isinstance(length, (int, long)), length
139                     vector.append(shares[shnum][offset:offset+length])
140             return response
141         d.addCallback(_read)
142         return d
143
144     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
145                                         tw_vectors, read_vector):
146         # always-pass: parrot the test vectors back to them.
147         readv = {}
148         for shnum, (testv, writev, new_length) in tw_vectors.items():
149             for (offset, length, op, specimen) in testv:
150                 assert op in ("le", "eq", "ge")
151             # TODO: this isn't right, the read is controlled by read_vector,
152             # not by testv
153             readv[shnum] = [ specimen
154                              for (offset, length, op, specimen)
155                              in testv ]
156             for (offset, data) in writev:
157                 self.storage.write(self.peerid, storage_index, shnum,
158                                    offset, data)
159         answer = (True, readv)
160         return fireEventually(answer)
161
162
163 def flip_bit(original, byte_offset):
164     return (original[:byte_offset] +
165             chr(ord(original[byte_offset]) ^ 0x01) +
166             original[byte_offset+1:])
167
168 def add_two(original, byte_offset):
169     # It isn't enough to simply flip the bit for the version number,
170     # because 1 is a valid version number. So we add two instead.
171     return (original[:byte_offset] +
172             chr(ord(original[byte_offset]) ^ 0x02) +
173             original[byte_offset+1:])
174
175 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
176     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
177     # list of shnums to corrupt.
178     ds = []
179     for peerid in s._peers:
180         shares = s._peers[peerid]
181         for shnum in shares:
182             if (shnums_to_corrupt is not None
183                 and shnum not in shnums_to_corrupt):
184                 continue
185             data = shares[shnum]
186             # We're feeding the reader all of the share data, so it
187             # won't need to use the rref that we didn't provide, nor the
188             # storage index that we didn't provide. We do this because
189             # the reader will work for both MDMF and SDMF.
190             reader = MDMFSlotReadProxy(None, None, shnum, data)
191             # We need to get the offsets for the next part.
192             d = reader.get_verinfo()
193             def _do_corruption(verinfo, data, shnum):
194                 (seqnum,
195                  root_hash,
196                  IV,
197                  segsize,
198                  datalen,
199                  k, n, prefix, o) = verinfo
200                 if isinstance(offset, tuple):
201                     offset1, offset2 = offset
202                 else:
203                     offset1 = offset
204                     offset2 = 0
205                 if offset1 == "pubkey" and IV:
206                     real_offset = 107
207                 elif offset1 in o:
208                     real_offset = o[offset1]
209                 else:
210                     real_offset = offset1
211                 real_offset = int(real_offset) + offset2 + offset_offset
212                 assert isinstance(real_offset, int), offset
213                 if offset1 == 0: # verbyte
214                     f = add_two
215                 else:
216                     f = flip_bit
217                 shares[shnum] = f(data, real_offset)
218             d.addCallback(_do_corruption, data, shnum)
219             ds.append(d)
220     dl = defer.DeferredList(ds)
221     dl.addCallback(lambda ignored: res)
222     return dl
223
224 def make_storagebroker(s=None, num_peers=10):
225     if not s:
226         s = FakeStorage()
227     peerids = [tagged_hash("peerid", "%d" % i)[:20]
228                for i in range(num_peers)]
229     storage_broker = StorageFarmBroker(None, True)
230     for peerid in peerids:
231         fss = FakeStorageServer(peerid, s)
232         storage_broker.test_add_rref(peerid, fss)
233     return storage_broker
234
235 def make_nodemaker(s=None, num_peers=10):
236     storage_broker = make_storagebroker(s, num_peers)
237     sh = client.SecretHolder("lease secret", "convergence secret")
238     keygen = client.KeyGenerator()
239     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
240     nodemaker = NodeMaker(storage_broker, sh, None,
241                           None, None,
242                           {"k": 3, "n": 10}, keygen)
243     return nodemaker
244
245 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
246     # this used to be in Publish, but we removed the limit. Some of
247     # these tests test whether the new code correctly allows files
248     # larger than the limit.
249     OLD_MAX_SEGMENT_SIZE = 3500000
250     def setUp(self):
251         self._storage = s = FakeStorage()
252         self.nodemaker = make_nodemaker(s)
253
254     def test_create(self):
255         d = self.nodemaker.create_mutable_file()
256         def _created(n):
257             self.failUnless(isinstance(n, MutableFileNode))
258             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
259             sb = self.nodemaker.storage_broker
260             peer0 = sorted(sb.get_all_serverids())[0]
261             shnums = self._storage._peers[peer0].keys()
262             self.failUnlessEqual(len(shnums), 1)
263         d.addCallback(_created)
264         return d
265
266
267     def test_create_mdmf(self):
268         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
269         def _created(n):
270             self.failUnless(isinstance(n, MutableFileNode))
271             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
272             sb = self.nodemaker.storage_broker
273             peer0 = sorted(sb.get_all_serverids())[0]
274             shnums = self._storage._peers[peer0].keys()
275             self.failUnlessEqual(len(shnums), 1)
276         d.addCallback(_created)
277         return d
278
279     def test_single_share(self):
280         # Make sure that we tolerate publishing a single share.
281         self.nodemaker.default_encoding_parameters['k'] = 1
282         self.nodemaker.default_encoding_parameters['happy'] = 1
283         self.nodemaker.default_encoding_parameters['n'] = 1
284         d = defer.succeed(None)
285         for v in (SDMF_VERSION, MDMF_VERSION):
286             d.addCallback(lambda ignored:
287                 self.nodemaker.create_mutable_file(version=v))
288             def _created(n):
289                 self.failUnless(isinstance(n, MutableFileNode))
290                 self._node = n
291                 return n
292             d.addCallback(_created)
293             d.addCallback(lambda n:
294                 n.overwrite(MutableData("Contents" * 50000)))
295             d.addCallback(lambda ignored:
296                 self._node.download_best_version())
297             d.addCallback(lambda contents:
298                 self.failUnlessEqual(contents, "Contents" * 50000))
299         return d
300
301     def test_max_shares(self):
302         self.nodemaker.default_encoding_parameters['n'] = 255
303         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
304         def _created(n):
305             self.failUnless(isinstance(n, MutableFileNode))
306             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
307             sb = self.nodemaker.storage_broker
308             num_shares = sum([len(self._storage._peers[x].keys()) for x \
309                               in sb.get_all_serverids()])
310             self.failUnlessEqual(num_shares, 255)
311             self._node = n
312             return n
313         d.addCallback(_created)
314         # Now we upload some contents
315         d.addCallback(lambda n:
316             n.overwrite(MutableData("contents" * 50000)))
317         # ...then download contents
318         d.addCallback(lambda ignored:
319             self._node.download_best_version())
320         # ...and check to make sure everything went okay.
321         d.addCallback(lambda contents:
322             self.failUnlessEqual("contents" * 50000, contents))
323         return d
324
325     def test_max_shares_mdmf(self):
326         # Test how files behave when there are 255 shares.
327         self.nodemaker.default_encoding_parameters['n'] = 255
328         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
329         def _created(n):
330             self.failUnless(isinstance(n, MutableFileNode))
331             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
332             sb = self.nodemaker.storage_broker
333             num_shares = sum([len(self._storage._peers[x].keys()) for x \
334                               in sb.get_all_serverids()])
335             self.failUnlessEqual(num_shares, 255)
336             self._node = n
337             return n
338         d.addCallback(_created)
339         d.addCallback(lambda n:
340             n.overwrite(MutableData("contents" * 50000)))
341         d.addCallback(lambda ignored:
342             self._node.download_best_version())
343         d.addCallback(lambda contents:
344             self.failUnlessEqual(contents, "contents" * 50000))
345         return d
346
347     def test_mdmf_filenode_cap(self):
348         # Test that an MDMF filenode, once created, returns an MDMF URI.
349         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
350         def _created(n):
351             self.failUnless(isinstance(n, MutableFileNode))
352             cap = n.get_cap()
353             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
354             rcap = n.get_readcap()
355             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
356             vcap = n.get_verify_cap()
357             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
358         d.addCallback(_created)
359         return d
360
361
362     def test_create_from_mdmf_writecap(self):
363         # Test that the nodemaker is capable of creating an MDMF
364         # filenode given an MDMF cap.
365         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
366         def _created(n):
367             self.failUnless(isinstance(n, MutableFileNode))
368             s = n.get_uri()
369             self.failUnless(s.startswith("URI:MDMF"))
370             n2 = self.nodemaker.create_from_cap(s)
371             self.failUnless(isinstance(n2, MutableFileNode))
372             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
373             self.failUnlessEqual(n.get_uri(), n2.get_uri())
374         d.addCallback(_created)
375         return d
376
377
378     def test_create_from_mdmf_writecap_with_extensions(self):
379         # Test that the nodemaker is capable of creating an MDMF
380         # filenode when given a writecap with extension parameters in
381         # them.
382         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
383         def _created(n):
384             self.failUnless(isinstance(n, MutableFileNode))
385             s = n.get_uri()
386             # We need to cheat a little and delete the nodemaker's
387             # cache, otherwise we'll get the same node instance back.
388             self.failUnlessIn(":3:131073", s)
389             n2 = self.nodemaker.create_from_cap(s)
390
391             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
392             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
393             hints = n2._downloader_hints
394             self.failUnlessEqual(hints['k'], 3)
395             self.failUnlessEqual(hints['segsize'], 131073)
396         d.addCallback(_created)
397         return d
398
399
400     def test_create_from_mdmf_readcap(self):
401         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
402         def _created(n):
403             self.failUnless(isinstance(n, MutableFileNode))
404             s = n.get_readonly_uri()
405             n2 = self.nodemaker.create_from_cap(s)
406             self.failUnless(isinstance(n2, MutableFileNode))
407
408             # Check that it's a readonly node
409             self.failUnless(n2.is_readonly())
410         d.addCallback(_created)
411         return d
412
413
414     def test_create_from_mdmf_readcap_with_extensions(self):
415         # We should be able to create an MDMF filenode with the
416         # extension parameters without it breaking.
417         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
418         def _created(n):
419             self.failUnless(isinstance(n, MutableFileNode))
420             s = n.get_readonly_uri()
421             self.failUnlessIn(":3:131073", s)
422
423             n2 = self.nodemaker.create_from_cap(s)
424             self.failUnless(isinstance(n2, MutableFileNode))
425             self.failUnless(n2.is_readonly())
426             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
427             hints = n2._downloader_hints
428             self.failUnlessEqual(hints["k"], 3)
429             self.failUnlessEqual(hints["segsize"], 131073)
430         d.addCallback(_created)
431         return d
432
433
434     def test_internal_version_from_cap(self):
435         # MutableFileNodes and MutableFileVersions have an internal
436         # switch that tells them whether they're dealing with an SDMF or
437         # MDMF mutable file when they start doing stuff. We want to make
438         # sure that this is set appropriately given an MDMF cap.
439         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
440         def _created(n):
441             self.uri = n.get_uri()
442             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
443
444             n2 = self.nodemaker.create_from_cap(self.uri)
445             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
446         d.addCallback(_created)
447         return d
448
449
450     def test_serialize(self):
451         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
452         calls = []
453         def _callback(*args, **kwargs):
454             self.failUnlessEqual(args, (4,) )
455             self.failUnlessEqual(kwargs, {"foo": 5})
456             calls.append(1)
457             return 6
458         d = n._do_serialized(_callback, 4, foo=5)
459         def _check_callback(res):
460             self.failUnlessEqual(res, 6)
461             self.failUnlessEqual(calls, [1])
462         d.addCallback(_check_callback)
463
464         def _errback():
465             raise ValueError("heya")
466         d.addCallback(lambda res:
467                       self.shouldFail(ValueError, "_check_errback", "heya",
468                                       n._do_serialized, _errback))
469         return d
470
471     def test_upload_and_download(self):
472         d = self.nodemaker.create_mutable_file()
473         def _created(n):
474             d = defer.succeed(None)
475             d.addCallback(lambda res: n.get_servermap(MODE_READ))
476             d.addCallback(lambda smap: smap.dump(StringIO()))
477             d.addCallback(lambda sio:
478                           self.failUnless("3-of-10" in sio.getvalue()))
479             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
480             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
481             d.addCallback(lambda res: n.download_best_version())
482             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
483             d.addCallback(lambda res: n.get_size_of_best_version())
484             d.addCallback(lambda size:
485                           self.failUnlessEqual(size, len("contents 1")))
486             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
487             d.addCallback(lambda res: n.download_best_version())
488             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
489             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
490             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
491             d.addCallback(lambda res: n.download_best_version())
492             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
494             d.addCallback(lambda smap:
495                           n.download_version(smap,
496                                              smap.best_recoverable_version()))
497             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
498             # test a file that is large enough to overcome the
499             # mapupdate-to-retrieve data caching (i.e. make the shares larger
500             # than the default readsize, which is 2000 bytes). A 15kB file
501             # will have 5kB shares.
502             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
503             d.addCallback(lambda res: n.download_best_version())
504             d.addCallback(lambda res:
505                           self.failUnlessEqual(res, "large size file" * 1000))
506             return d
507         d.addCallback(_created)
508         return d
509
510
511     def test_upload_and_download_mdmf(self):
512         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
513         def _created(n):
514             d = defer.succeed(None)
515             d.addCallback(lambda ignored:
516                 n.get_servermap(MODE_READ))
517             def _then(servermap):
518                 dumped = servermap.dump(StringIO())
519                 self.failUnlessIn("3-of-10", dumped.getvalue())
520             d.addCallback(_then)
521             # Now overwrite the contents with some new contents. We want 
522             # to make them big enough to force the file to be uploaded
523             # in more than one segment.
524             big_contents = "contents1" * 100000 # about 900 KiB
525             big_contents_uploadable = MutableData(big_contents)
526             d.addCallback(lambda ignored:
527                 n.overwrite(big_contents_uploadable))
528             d.addCallback(lambda ignored:
529                 n.download_best_version())
530             d.addCallback(lambda data:
531                 self.failUnlessEqual(data, big_contents))
532             # Overwrite the contents again with some new contents. As
533             # before, they need to be big enough to force multiple
534             # segments, so that we make the downloader deal with
535             # multiple segments.
536             bigger_contents = "contents2" * 1000000 # about 9MiB 
537             bigger_contents_uploadable = MutableData(bigger_contents)
538             d.addCallback(lambda ignored:
539                 n.overwrite(bigger_contents_uploadable))
540             d.addCallback(lambda ignored:
541                 n.download_best_version())
542             d.addCallback(lambda data:
543                 self.failUnlessEqual(data, bigger_contents))
544             return d
545         d.addCallback(_created)
546         return d
547
548
549     def test_retrieve_producer_mdmf(self):
550         # We should make sure that the retriever is able to pause and stop
551         # correctly.
552         data = "contents1" * 100000
553         d = self.nodemaker.create_mutable_file(MutableData(data),
554                                                version=MDMF_VERSION)
555         d.addCallback(lambda node: node.get_best_mutable_version())
556         d.addCallback(self._test_retrieve_producer, "MDMF", data)
557         return d
558
559     # note: SDMF has only one big segment, so we can't use the usual
560     # after-the-first-write() trick to pause or stop the download.
561     # Disabled until we find a better approach.
562     def OFF_test_retrieve_producer_sdmf(self):
563         data = "contents1" * 100000
564         d = self.nodemaker.create_mutable_file(MutableData(data),
565                                                version=SDMF_VERSION)
566         d.addCallback(lambda node: node.get_best_mutable_version())
567         d.addCallback(self._test_retrieve_producer, "SDMF", data)
568         return d
569
570     def _test_retrieve_producer(self, version, kind, data):
571         # Now we'll retrieve it into a pausing consumer.
572         c = PausingConsumer()
573         d = version.read(c)
574         d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
575
576         c2 = PausingAndStoppingConsumer()
577         d.addCallback(lambda ign:
578                       self.shouldFail(DownloadStopped, kind+"_pause_stop",
579                                       "our Consumer called stopProducing()",
580                                       version.read, c2))
581
582         c3 = StoppingConsumer()
583         d.addCallback(lambda ign:
584                       self.shouldFail(DownloadStopped, kind+"_stop",
585                                       "our Consumer called stopProducing()",
586                                       version.read, c3))
587
588         c4 = ImmediatelyStoppingConsumer()
589         d.addCallback(lambda ign:
590                       self.shouldFail(DownloadStopped, kind+"_stop_imm",
591                                       "our Consumer called stopProducing()",
592                                       version.read, c4))
593
594         def _then(ign):
595             c5 = MemoryConsumer()
596             d1 = version.read(c5)
597             c5.producer.stopProducing()
598             return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
599                                    "our Consumer called stopProducing()",
600                                    lambda: d1)
601         d.addCallback(_then)
602         return d
603
604     def test_download_from_mdmf_cap(self):
605         # We should be able to download an MDMF file given its cap
606         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
607         def _created(node):
608             self.uri = node.get_uri()
609
610             return node.overwrite(MutableData("contents1" * 100000))
611         def _then(ignored):
612             node = self.nodemaker.create_from_cap(self.uri)
613             return node.download_best_version()
614         def _downloaded(data):
615             self.failUnlessEqual(data, "contents1" * 100000)
616         d.addCallback(_created)
617         d.addCallback(_then)
618         d.addCallback(_downloaded)
619         return d
620
621
622     def test_create_and_download_from_bare_mdmf_cap(self):
623         # MDMF caps have extension parameters on them by default. We
624         # need to make sure that they work without extension parameters.
625         contents = MutableData("contents" * 100000)
626         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
627                                                contents=contents)
628         def _created(node):
629             uri = node.get_uri()
630             self._created = node
631             self.failUnlessIn(":3:131073", uri)
632             # Now strip that off the end of the uri, then try creating
633             # and downloading the node again.
634             bare_uri = uri.replace(":3:131073", "")
635             assert ":3:131073" not in bare_uri
636
637             return self.nodemaker.create_from_cap(bare_uri)
638         d.addCallback(_created)
639         def _created_bare(node):
640             self.failUnlessEqual(node.get_writekey(),
641                                  self._created.get_writekey())
642             self.failUnlessEqual(node.get_readkey(),
643                                  self._created.get_readkey())
644             self.failUnlessEqual(node.get_storage_index(),
645                                  self._created.get_storage_index())
646             return node.download_best_version()
647         d.addCallback(_created_bare)
648         d.addCallback(lambda data:
649             self.failUnlessEqual(data, "contents" * 100000))
650         return d
651
652
653     def test_mdmf_write_count(self):
654         # Publishing an MDMF file should only cause one write for each
655         # share that is to be published. Otherwise, we introduce
656         # undesirable semantics that are a regression from SDMF
657         upload = MutableData("MDMF" * 100000) # about 400 KiB
658         d = self.nodemaker.create_mutable_file(upload,
659                                                version=MDMF_VERSION)
660         def _check_server_write_counts(ignored):
661             sb = self.nodemaker.storage_broker
662             for server in sb.servers.itervalues():
663                 self.failUnlessEqual(server.get_rref().queries, 1)
664         d.addCallback(_check_server_write_counts)
665         return d
666
667
668     def test_create_with_initial_contents(self):
669         upload1 = MutableData("contents 1")
670         d = self.nodemaker.create_mutable_file(upload1)
671         def _created(n):
672             d = n.download_best_version()
673             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
674             upload2 = MutableData("contents 2")
675             d.addCallback(lambda res: n.overwrite(upload2))
676             d.addCallback(lambda res: n.download_best_version())
677             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
678             return d
679         d.addCallback(_created)
680         return d
681
682
683     def test_create_mdmf_with_initial_contents(self):
684         initial_contents = "foobarbaz" * 131072 # 900KiB
685         initial_contents_uploadable = MutableData(initial_contents)
686         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
687                                                version=MDMF_VERSION)
688         def _created(n):
689             d = n.download_best_version()
690             d.addCallback(lambda data:
691                 self.failUnlessEqual(data, initial_contents))
692             uploadable2 = MutableData(initial_contents + "foobarbaz")
693             d.addCallback(lambda ignored:
694                 n.overwrite(uploadable2))
695             d.addCallback(lambda ignored:
696                 n.download_best_version())
697             d.addCallback(lambda data:
698                 self.failUnlessEqual(data, initial_contents +
699                                            "foobarbaz"))
700             return d
701         d.addCallback(_created)
702         return d
703
704
705     def test_response_cache_memory_leak(self):
706         d = self.nodemaker.create_mutable_file("contents")
707         def _created(n):
708             d = n.download_best_version()
709             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
710             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
711
712             def _check_cache(expected):
713                 # The total size of cache entries should not increase on the second download;
714                 # in fact the cache contents should be identical.
715                 d2 = n.download_best_version()
716                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
717                 return d2
718             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
719             return d
720         d.addCallback(_created)
721         return d
722
723     def test_create_with_initial_contents_function(self):
724         data = "initial contents"
725         def _make_contents(n):
726             self.failUnless(isinstance(n, MutableFileNode))
727             key = n.get_writekey()
728             self.failUnless(isinstance(key, str), key)
729             self.failUnlessEqual(len(key), 16) # AES key size
730             return MutableData(data)
731         d = self.nodemaker.create_mutable_file(_make_contents)
732         def _created(n):
733             return n.download_best_version()
734         d.addCallback(_created)
735         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
736         return d
737
738
739     def test_create_mdmf_with_initial_contents_function(self):
740         data = "initial contents" * 100000
741         def _make_contents(n):
742             self.failUnless(isinstance(n, MutableFileNode))
743             key = n.get_writekey()
744             self.failUnless(isinstance(key, str), key)
745             self.failUnlessEqual(len(key), 16)
746             return MutableData(data)
747         d = self.nodemaker.create_mutable_file(_make_contents,
748                                                version=MDMF_VERSION)
749         d.addCallback(lambda n:
750             n.download_best_version())
751         d.addCallback(lambda data2:
752             self.failUnlessEqual(data2, data))
753         return d
754
755
756     def test_create_with_too_large_contents(self):
757         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
758         BIG_uploadable = MutableData(BIG)
759         d = self.nodemaker.create_mutable_file(BIG_uploadable)
760         def _created(n):
761             other_BIG_uploadable = MutableData(BIG)
762             d = n.overwrite(other_BIG_uploadable)
763             return d
764         d.addCallback(_created)
765         return d
766
767     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
768         d = n.get_servermap(MODE_READ)
769         d.addCallback(lambda servermap: servermap.best_recoverable_version())
770         d.addCallback(lambda verinfo:
771                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
772         return d
773
774     def test_modify(self):
775         def _modifier(old_contents, servermap, first_time):
776             new_contents = old_contents + "line2"
777             return new_contents
778         def _non_modifier(old_contents, servermap, first_time):
779             return old_contents
780         def _none_modifier(old_contents, servermap, first_time):
781             return None
782         def _error_modifier(old_contents, servermap, first_time):
783             raise ValueError("oops")
784         def _toobig_modifier(old_contents, servermap, first_time):
785             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
786             return new_content
787         calls = []
788         def _ucw_error_modifier(old_contents, servermap, first_time):
789             # simulate an UncoordinatedWriteError once
790             calls.append(1)
791             if len(calls) <= 1:
792                 raise UncoordinatedWriteError("simulated")
793             new_contents = old_contents + "line3"
794             return new_contents
795         def _ucw_error_non_modifier(old_contents, servermap, first_time):
796             # simulate an UncoordinatedWriteError once, and don't actually
797             # modify the contents on subsequent invocations
798             calls.append(1)
799             if len(calls) <= 1:
800                 raise UncoordinatedWriteError("simulated")
801             return old_contents
802
803         initial_contents = "line1"
804         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
805         def _created(n):
806             d = n.modify(_modifier)
807             d.addCallback(lambda res: n.download_best_version())
808             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
809             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
810
811             d.addCallback(lambda res: n.modify(_non_modifier))
812             d.addCallback(lambda res: n.download_best_version())
813             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
814             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
815
816             d.addCallback(lambda res: n.modify(_none_modifier))
817             d.addCallback(lambda res: n.download_best_version())
818             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
819             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
820
821             d.addCallback(lambda res:
822                           self.shouldFail(ValueError, "error_modifier", None,
823                                           n.modify, _error_modifier))
824             d.addCallback(lambda res: n.download_best_version())
825             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
826             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
827
828
829             d.addCallback(lambda res: n.download_best_version())
830             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
831             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
832
833             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
834             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
835             d.addCallback(lambda res: n.download_best_version())
836             d.addCallback(lambda res: self.failUnlessEqual(res,
837                                                            "line1line2line3"))
838             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
839
840             def _reset_ucw_error_modifier(res):
841                 calls[:] = []
842                 return res
843             d.addCallback(_reset_ucw_error_modifier)
844
845             # in practice, this n.modify call should publish twice: the first
846             # one gets a UCWE, the second does not. But our test jig (in
847             # which the modifier raises the UCWE) skips over the first one,
848             # so in this test there will be only one publish, and the seqnum
849             # will only be one larger than the previous test, not two (i.e. 4
850             # instead of 5).
851             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
852             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
853             d.addCallback(lambda res: n.download_best_version())
854             d.addCallback(lambda res: self.failUnlessEqual(res,
855                                                            "line1line2line3"))
856             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
857             d.addCallback(lambda res: n.modify(_toobig_modifier))
858             return d
859         d.addCallback(_created)
860         return d
861
862
863     def test_modify_backoffer(self):
864         def _modifier(old_contents, servermap, first_time):
865             return old_contents + "line2"
866         calls = []
867         def _ucw_error_modifier(old_contents, servermap, first_time):
868             # simulate an UncoordinatedWriteError once
869             calls.append(1)
870             if len(calls) <= 1:
871                 raise UncoordinatedWriteError("simulated")
872             return old_contents + "line3"
873         def _always_ucw_error_modifier(old_contents, servermap, first_time):
874             raise UncoordinatedWriteError("simulated")
875         def _backoff_stopper(node, f):
876             return f
877         def _backoff_pauser(node, f):
878             d = defer.Deferred()
879             reactor.callLater(0.5, d.callback, None)
880             return d
881
882         # the give-up-er will hit its maximum retry count quickly
883         giveuper = BackoffAgent()
884         giveuper._delay = 0.1
885         giveuper.factor = 1
886
887         d = self.nodemaker.create_mutable_file(MutableData("line1"))
888         def _created(n):
889             d = n.modify(_modifier)
890             d.addCallback(lambda res: n.download_best_version())
891             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
892             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
893
894             d.addCallback(lambda res:
895                           self.shouldFail(UncoordinatedWriteError,
896                                           "_backoff_stopper", None,
897                                           n.modify, _ucw_error_modifier,
898                                           _backoff_stopper))
899             d.addCallback(lambda res: n.download_best_version())
900             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
901             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
902
903             def _reset_ucw_error_modifier(res):
904                 calls[:] = []
905                 return res
906             d.addCallback(_reset_ucw_error_modifier)
907             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
908                                                _backoff_pauser))
909             d.addCallback(lambda res: n.download_best_version())
910             d.addCallback(lambda res: self.failUnlessEqual(res,
911                                                            "line1line2line3"))
912             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
913
914             d.addCallback(lambda res:
915                           self.shouldFail(UncoordinatedWriteError,
916                                           "giveuper", None,
917                                           n.modify, _always_ucw_error_modifier,
918                                           giveuper.delay))
919             d.addCallback(lambda res: n.download_best_version())
920             d.addCallback(lambda res: self.failUnlessEqual(res,
921                                                            "line1line2line3"))
922             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
923
924             return d
925         d.addCallback(_created)
926         return d
927
928     def test_upload_and_download_full_size_keys(self):
929         self.nodemaker.key_generator = client.KeyGenerator()
930         d = self.nodemaker.create_mutable_file()
931         def _created(n):
932             d = defer.succeed(None)
933             d.addCallback(lambda res: n.get_servermap(MODE_READ))
934             d.addCallback(lambda smap: smap.dump(StringIO()))
935             d.addCallback(lambda sio:
936                           self.failUnless("3-of-10" in sio.getvalue()))
937             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
938             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
939             d.addCallback(lambda res: n.download_best_version())
940             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
941             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
942             d.addCallback(lambda res: n.download_best_version())
943             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
944             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
945             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
946             d.addCallback(lambda res: n.download_best_version())
947             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
948             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
949             d.addCallback(lambda smap:
950                           n.download_version(smap,
951                                              smap.best_recoverable_version()))
952             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
953             return d
954         d.addCallback(_created)
955         return d
956
957
958     def test_size_after_servermap_update(self):
959         # a mutable file node should have something to say about how big
960         # it is after a servermap update is performed, since this tells
961         # us how large the best version of that mutable file is.
962         d = self.nodemaker.create_mutable_file()
963         def _created(n):
964             self.n = n
965             return n.get_servermap(MODE_READ)
966         d.addCallback(_created)
967         d.addCallback(lambda ignored:
968             self.failUnlessEqual(self.n.get_size(), 0))
969         d.addCallback(lambda ignored:
970             self.n.overwrite(MutableData("foobarbaz")))
971         d.addCallback(lambda ignored:
972             self.failUnlessEqual(self.n.get_size(), 9))
973         d.addCallback(lambda ignored:
974             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
975         d.addCallback(_created)
976         d.addCallback(lambda ignored:
977             self.failUnlessEqual(self.n.get_size(), 9))
978         return d
979
980
981 class PublishMixin:
982     def publish_one(self):
983         # publish a file and create shares, which can then be manipulated
984         # later.
985         self.CONTENTS = "New contents go here" * 1000
986         self.uploadable = MutableData(self.CONTENTS)
987         self._storage = FakeStorage()
988         self._nodemaker = make_nodemaker(self._storage)
989         self._storage_broker = self._nodemaker.storage_broker
990         d = self._nodemaker.create_mutable_file(self.uploadable)
991         def _created(node):
992             self._fn = node
993             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
994         d.addCallback(_created)
995         return d
996
997     def publish_mdmf(self):
998         # like publish_one, except that the result is guaranteed to be
999         # an MDMF file.
1000         # self.CONTENTS should have more than one segment.
1001         self.CONTENTS = "This is an MDMF file" * 100000
1002         self.uploadable = MutableData(self.CONTENTS)
1003         self._storage = FakeStorage()
1004         self._nodemaker = make_nodemaker(self._storage)
1005         self._storage_broker = self._nodemaker.storage_broker
1006         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
1007         def _created(node):
1008             self._fn = node
1009             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1010         d.addCallback(_created)
1011         return d
1012
1013
1014     def publish_sdmf(self):
1015         # like publish_one, except that the result is guaranteed to be
1016         # an SDMF file
1017         self.CONTENTS = "This is an SDMF file" * 1000
1018         self.uploadable = MutableData(self.CONTENTS)
1019         self._storage = FakeStorage()
1020         self._nodemaker = make_nodemaker(self._storage)
1021         self._storage_broker = self._nodemaker.storage_broker
1022         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
1023         def _created(node):
1024             self._fn = node
1025             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1026         d.addCallback(_created)
1027         return d
1028
1029
1030     def publish_multiple(self, version=0):
1031         self.CONTENTS = ["Contents 0",
1032                          "Contents 1",
1033                          "Contents 2",
1034                          "Contents 3a",
1035                          "Contents 3b"]
1036         self.uploadables = [MutableData(d) for d in self.CONTENTS]
1037         self._copied_shares = {}
1038         self._storage = FakeStorage()
1039         self._nodemaker = make_nodemaker(self._storage)
1040         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1041         def _created(node):
1042             self._fn = node
1043             # now create multiple versions of the same file, and accumulate
1044             # their shares, so we can mix and match them later.
1045             d = defer.succeed(None)
1046             d.addCallback(self._copy_shares, 0)
1047             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1048             d.addCallback(self._copy_shares, 1)
1049             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1050             d.addCallback(self._copy_shares, 2)
1051             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1052             d.addCallback(self._copy_shares, 3)
1053             # now we replace all the shares with version s3, and upload a new
1054             # version to get s4b.
1055             rollback = dict([(i,2) for i in range(10)])
1056             d.addCallback(lambda res: self._set_versions(rollback))
1057             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1058             d.addCallback(self._copy_shares, 4)
1059             # we leave the storage in state 4
1060             return d
1061         d.addCallback(_created)
1062         return d
1063
1064
1065     def _copy_shares(self, ignored, index):
1066         shares = self._storage._peers
1067         # we need a deep copy
1068         new_shares = {}
1069         for peerid in shares:
1070             new_shares[peerid] = {}
1071             for shnum in shares[peerid]:
1072                 new_shares[peerid][shnum] = shares[peerid][shnum]
1073         self._copied_shares[index] = new_shares
1074
1075     def _set_versions(self, versionmap):
1076         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1077         # share to be at. Any shnum which is left out of the map will stay at
1078         # its current version.
1079         shares = self._storage._peers
1080         oldshares = self._copied_shares
1081         for peerid in shares:
1082             for shnum in shares[peerid]:
1083                 if shnum in versionmap:
1084                     index = versionmap[shnum]
1085                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1086
1087 class Servermap(unittest.TestCase, PublishMixin):
1088     def setUp(self):
1089         return self.publish_one()
1090
1091     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1092                        update_range=None):
1093         if fn is None:
1094             fn = self._fn
1095         if sb is None:
1096             sb = self._storage_broker
1097         smu = ServermapUpdater(fn, sb, Monitor(),
1098                                ServerMap(), mode, update_range=update_range)
1099         d = smu.update()
1100         return d
1101
1102     def update_servermap(self, oldmap, mode=MODE_CHECK):
1103         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1104                                oldmap, mode)
1105         d = smu.update()
1106         return d
1107
1108     def failUnlessOneRecoverable(self, sm, num_shares):
1109         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1110         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1111         best = sm.best_recoverable_version()
1112         self.failIfEqual(best, None)
1113         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1114         self.failUnlessEqual(len(sm.shares_available()), 1)
1115         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1116         shnum, peerids = sm.make_sharemap().items()[0]
1117         peerid = list(peerids)[0]
1118         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1119         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1120         return sm
1121
1122     def test_basic(self):
1123         d = defer.succeed(None)
1124         ms = self.make_servermap
1125         us = self.update_servermap
1126
1127         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1128         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1129         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1130         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1131         d.addCallback(lambda res: ms(mode=MODE_READ))
1132         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1133         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1134         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1135         # this mode stops at 'k' shares
1136         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1137
1138         # and can we re-use the same servermap? Note that these are sorted in
1139         # increasing order of number of servers queried, since once a server
1140         # gets into the servermap, we'll always ask it for an update.
1141         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1142         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1143         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1144         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1145         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1146         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1147         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1148         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1149         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1150
1151         return d
1152
1153     def test_fetch_privkey(self):
1154         d = defer.succeed(None)
1155         # use the sibling filenode (which hasn't been used yet), and make
1156         # sure it can fetch the privkey. The file is small, so the privkey
1157         # will be fetched on the first (query) pass.
1158         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1159         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1160
1161         # create a new file, which is large enough to knock the privkey out
1162         # of the early part of the file
1163         LARGE = "These are Larger contents" * 200 # about 5KB
1164         LARGE_uploadable = MutableData(LARGE)
1165         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1166         def _created(large_fn):
1167             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1168             return self.make_servermap(MODE_WRITE, large_fn2)
1169         d.addCallback(_created)
1170         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1171         return d
1172
1173
1174     def test_mark_bad(self):
1175         d = defer.succeed(None)
1176         ms = self.make_servermap
1177
1178         d.addCallback(lambda res: ms(mode=MODE_READ))
1179         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1180         def _made_map(sm):
1181             v = sm.best_recoverable_version()
1182             vm = sm.make_versionmap()
1183             shares = list(vm[v])
1184             self.failUnlessEqual(len(shares), 6)
1185             self._corrupted = set()
1186             # mark the first 5 shares as corrupt, then update the servermap.
1187             # The map should not have the marked shares it in any more, and
1188             # new shares should be found to replace the missing ones.
1189             for (shnum, peerid, timestamp) in shares:
1190                 if shnum < 5:
1191                     self._corrupted.add( (peerid, shnum) )
1192                     sm.mark_bad_share(peerid, shnum, "")
1193             return self.update_servermap(sm, MODE_WRITE)
1194         d.addCallback(_made_map)
1195         def _check_map(sm):
1196             # this should find all 5 shares that weren't marked bad
1197             v = sm.best_recoverable_version()
1198             vm = sm.make_versionmap()
1199             shares = list(vm[v])
1200             for (peerid, shnum) in self._corrupted:
1201                 peer_shares = sm.shares_on_peer(peerid)
1202                 self.failIf(shnum in peer_shares,
1203                             "%d was in %s" % (shnum, peer_shares))
1204             self.failUnlessEqual(len(shares), 5)
1205         d.addCallback(_check_map)
1206         return d
1207
1208     def failUnlessNoneRecoverable(self, sm):
1209         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1210         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1211         best = sm.best_recoverable_version()
1212         self.failUnlessEqual(best, None)
1213         self.failUnlessEqual(len(sm.shares_available()), 0)
1214
1215     def test_no_shares(self):
1216         self._storage._peers = {} # delete all shares
1217         ms = self.make_servermap
1218         d = defer.succeed(None)
1219 #
1220         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1221         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1222
1223         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1224         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1225
1226         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1227         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1228
1229         d.addCallback(lambda res: ms(mode=MODE_READ))
1230         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1231
1232         return d
1233
1234     def failUnlessNotQuiteEnough(self, sm):
1235         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1236         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1237         best = sm.best_recoverable_version()
1238         self.failUnlessEqual(best, None)
1239         self.failUnlessEqual(len(sm.shares_available()), 1)
1240         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1241         return sm
1242
1243     def test_not_quite_enough_shares(self):
1244         s = self._storage
1245         ms = self.make_servermap
1246         num_shares = len(s._peers)
1247         for peerid in s._peers:
1248             s._peers[peerid] = {}
1249             num_shares -= 1
1250             if num_shares == 2:
1251                 break
1252         # now there ought to be only two shares left
1253         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1254
1255         d = defer.succeed(None)
1256
1257         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1258         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1259         d.addCallback(lambda sm:
1260                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1261         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1262         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1263         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1264         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1265         d.addCallback(lambda res: ms(mode=MODE_READ))
1266         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1267
1268         return d
1269
1270
1271     def test_servermapupdater_finds_mdmf_files(self):
1272         # setUp already published an MDMF file for us. We just need to
1273         # make sure that when we run the ServermapUpdater, the file is
1274         # reported to have one recoverable version.
1275         d = defer.succeed(None)
1276         d.addCallback(lambda ignored:
1277             self.publish_mdmf())
1278         d.addCallback(lambda ignored:
1279             self.make_servermap(mode=MODE_CHECK))
1280         # Calling make_servermap also updates the servermap in the mode
1281         # that we specify, so we just need to see what it says.
1282         def _check_servermap(sm):
1283             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1284         d.addCallback(_check_servermap)
1285         return d
1286
1287
1288     def test_fetch_update(self):
1289         d = defer.succeed(None)
1290         d.addCallback(lambda ignored:
1291             self.publish_mdmf())
1292         d.addCallback(lambda ignored:
1293             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1294         def _check_servermap(sm):
1295             # 10 shares
1296             self.failUnlessEqual(len(sm.update_data), 10)
1297             # one version
1298             for data in sm.update_data.itervalues():
1299                 self.failUnlessEqual(len(data), 1)
1300         d.addCallback(_check_servermap)
1301         return d
1302
1303
1304     def test_servermapupdater_finds_sdmf_files(self):
1305         d = defer.succeed(None)
1306         d.addCallback(lambda ignored:
1307             self.publish_sdmf())
1308         d.addCallback(lambda ignored:
1309             self.make_servermap(mode=MODE_CHECK))
1310         d.addCallback(lambda servermap:
1311             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1312         return d
1313
1314
1315 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1316     def setUp(self):
1317         return self.publish_one()
1318
1319     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1320         if oldmap is None:
1321             oldmap = ServerMap()
1322         if sb is None:
1323             sb = self._storage_broker
1324         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1325         d = smu.update()
1326         return d
1327
1328     def abbrev_verinfo(self, verinfo):
1329         if verinfo is None:
1330             return None
1331         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1332          offsets_tuple) = verinfo
1333         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1334
1335     def abbrev_verinfo_dict(self, verinfo_d):
1336         output = {}
1337         for verinfo,value in verinfo_d.items():
1338             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1339              offsets_tuple) = verinfo
1340             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1341         return output
1342
1343     def dump_servermap(self, servermap):
1344         print "SERVERMAP", servermap
1345         print "RECOVERABLE", [self.abbrev_verinfo(v)
1346                               for v in servermap.recoverable_versions()]
1347         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1348         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1349
1350     def do_download(self, servermap, version=None):
1351         if version is None:
1352             version = servermap.best_recoverable_version()
1353         r = Retrieve(self._fn, servermap, version)
1354         c = consumer.MemoryConsumer()
1355         d = r.download(consumer=c)
1356         d.addCallback(lambda mc: "".join(mc.chunks))
1357         return d
1358
1359
1360     def test_basic(self):
1361         d = self.make_servermap()
1362         def _do_retrieve(servermap):
1363             self._smap = servermap
1364             #self.dump_servermap(servermap)
1365             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1366             return self.do_download(servermap)
1367         d.addCallback(_do_retrieve)
1368         def _retrieved(new_contents):
1369             self.failUnlessEqual(new_contents, self.CONTENTS)
1370         d.addCallback(_retrieved)
1371         # we should be able to re-use the same servermap, both with and
1372         # without updating it.
1373         d.addCallback(lambda res: self.do_download(self._smap))
1374         d.addCallback(_retrieved)
1375         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1376         d.addCallback(lambda res: self.do_download(self._smap))
1377         d.addCallback(_retrieved)
1378         # clobbering the pubkey should make the servermap updater re-fetch it
1379         def _clobber_pubkey(res):
1380             self._fn._pubkey = None
1381         d.addCallback(_clobber_pubkey)
1382         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1383         d.addCallback(lambda res: self.do_download(self._smap))
1384         d.addCallback(_retrieved)
1385         return d
1386
1387     def test_all_shares_vanished(self):
1388         d = self.make_servermap()
1389         def _remove_shares(servermap):
1390             for shares in self._storage._peers.values():
1391                 shares.clear()
1392             d1 = self.shouldFail(NotEnoughSharesError,
1393                                  "test_all_shares_vanished",
1394                                  "ran out of peers",
1395                                  self.do_download, servermap)
1396             return d1
1397         d.addCallback(_remove_shares)
1398         return d
1399
1400     def test_no_servers(self):
1401         sb2 = make_storagebroker(num_peers=0)
1402         # if there are no servers, then a MODE_READ servermap should come
1403         # back empty
1404         d = self.make_servermap(sb=sb2)
1405         def _check_servermap(servermap):
1406             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1407             self.failIf(servermap.recoverable_versions())
1408             self.failIf(servermap.unrecoverable_versions())
1409             self.failIf(servermap.all_peers())
1410         d.addCallback(_check_servermap)
1411         return d
1412
1413     def test_no_servers_download(self):
1414         sb2 = make_storagebroker(num_peers=0)
1415         self._fn._storage_broker = sb2
1416         d = self.shouldFail(UnrecoverableFileError,
1417                             "test_no_servers_download",
1418                             "no recoverable versions",
1419                             self._fn.download_best_version)
1420         def _restore(res):
1421             # a failed download that occurs while we aren't connected to
1422             # anybody should not prevent a subsequent download from working.
1423             # This isn't quite the webapi-driven test that #463 wants, but it
1424             # should be close enough.
1425             self._fn._storage_broker = self._storage_broker
1426             return self._fn.download_best_version()
1427         def _retrieved(new_contents):
1428             self.failUnlessEqual(new_contents, self.CONTENTS)
1429         d.addCallback(_restore)
1430         d.addCallback(_retrieved)
1431         return d
1432
1433
1434     def _test_corrupt_all(self, offset, substring,
1435                           should_succeed=False,
1436                           corrupt_early=True,
1437                           failure_checker=None,
1438                           fetch_privkey=False):
1439         d = defer.succeed(None)
1440         if corrupt_early:
1441             d.addCallback(corrupt, self._storage, offset)
1442         d.addCallback(lambda res: self.make_servermap())
1443         if not corrupt_early:
1444             d.addCallback(corrupt, self._storage, offset)
1445         def _do_retrieve(servermap):
1446             ver = servermap.best_recoverable_version()
1447             if ver is None and not should_succeed:
1448                 # no recoverable versions == not succeeding. The problem
1449                 # should be noted in the servermap's list of problems.
1450                 if substring:
1451                     allproblems = [str(f) for f in servermap.problems]
1452                     self.failUnlessIn(substring, "".join(allproblems))
1453                 return servermap
1454             if should_succeed:
1455                 d1 = self._fn.download_version(servermap, ver,
1456                                                fetch_privkey)
1457                 d1.addCallback(lambda new_contents:
1458                                self.failUnlessEqual(new_contents, self.CONTENTS))
1459             else:
1460                 d1 = self.shouldFail(NotEnoughSharesError,
1461                                      "_corrupt_all(offset=%s)" % (offset,),
1462                                      substring,
1463                                      self._fn.download_version, servermap,
1464                                                                 ver,
1465                                                                 fetch_privkey)
1466             if failure_checker:
1467                 d1.addCallback(failure_checker)
1468             d1.addCallback(lambda res: servermap)
1469             return d1
1470         d.addCallback(_do_retrieve)
1471         return d
1472
1473     def test_corrupt_all_verbyte(self):
1474         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1475         # error in unpack_share().
1476         d = self._test_corrupt_all(0, "UnknownVersionError")
1477         def _check_servermap(servermap):
1478             # and the dump should mention the problems
1479             s = StringIO()
1480             dump = servermap.dump(s).getvalue()
1481             self.failUnless("30 PROBLEMS" in dump, dump)
1482         d.addCallback(_check_servermap)
1483         return d
1484
1485     def test_corrupt_all_seqnum(self):
1486         # a corrupt sequence number will trigger a bad signature
1487         return self._test_corrupt_all(1, "signature is invalid")
1488
1489     def test_corrupt_all_R(self):
1490         # a corrupt root hash will trigger a bad signature
1491         return self._test_corrupt_all(9, "signature is invalid")
1492
1493     def test_corrupt_all_IV(self):
1494         # a corrupt salt/IV will trigger a bad signature
1495         return self._test_corrupt_all(41, "signature is invalid")
1496
1497     def test_corrupt_all_k(self):
1498         # a corrupt 'k' will trigger a bad signature
1499         return self._test_corrupt_all(57, "signature is invalid")
1500
1501     def test_corrupt_all_N(self):
1502         # a corrupt 'N' will trigger a bad signature
1503         return self._test_corrupt_all(58, "signature is invalid")
1504
1505     def test_corrupt_all_segsize(self):
1506         # a corrupt segsize will trigger a bad signature
1507         return self._test_corrupt_all(59, "signature is invalid")
1508
1509     def test_corrupt_all_datalen(self):
1510         # a corrupt data length will trigger a bad signature
1511         return self._test_corrupt_all(67, "signature is invalid")
1512
1513     def test_corrupt_all_pubkey(self):
1514         # a corrupt pubkey won't match the URI's fingerprint. We need to
1515         # remove the pubkey from the filenode, or else it won't bother trying
1516         # to update it.
1517         self._fn._pubkey = None
1518         return self._test_corrupt_all("pubkey",
1519                                       "pubkey doesn't match fingerprint")
1520
1521     def test_corrupt_all_sig(self):
1522         # a corrupt signature is a bad one
1523         # the signature runs from about [543:799], depending upon the length
1524         # of the pubkey
1525         return self._test_corrupt_all("signature", "signature is invalid")
1526
1527     def test_corrupt_all_share_hash_chain_number(self):
1528         # a corrupt share hash chain entry will show up as a bad hash. If we
1529         # mangle the first byte, that will look like a bad hash number,
1530         # causing an IndexError
1531         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1532
1533     def test_corrupt_all_share_hash_chain_hash(self):
1534         # a corrupt share hash chain entry will show up as a bad hash. If we
1535         # mangle a few bytes in, that will look like a bad hash.
1536         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1537
1538     def test_corrupt_all_block_hash_tree(self):
1539         return self._test_corrupt_all("block_hash_tree",
1540                                       "block hash tree failure")
1541
1542     def test_corrupt_all_block(self):
1543         return self._test_corrupt_all("share_data", "block hash tree failure")
1544
1545     def test_corrupt_all_encprivkey(self):
1546         # a corrupted privkey won't even be noticed by the reader, only by a
1547         # writer.
1548         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1549
1550
1551     def test_corrupt_all_encprivkey_late(self):
1552         # this should work for the same reason as above, but we corrupt 
1553         # after the servermap update to exercise the error handling
1554         # code.
1555         # We need to remove the privkey from the node, or the retrieve
1556         # process won't know to update it.
1557         self._fn._privkey = None
1558         return self._test_corrupt_all("enc_privkey",
1559                                       None, # this shouldn't fail
1560                                       should_succeed=True,
1561                                       corrupt_early=False,
1562                                       fetch_privkey=True)
1563
1564
1565     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1566     # just use a .todo because the failing-but-ignored test emits about 30kB
1567     # of noise.
1568     def OFF_test_corrupt_all_seqnum_late(self):
1569         # corrupting the seqnum between mapupdate and retrieve should result
1570         # in NotEnoughSharesError, since each share will look invalid
1571         def _check(res):
1572             f = res[0]
1573             self.failUnless(f.check(NotEnoughSharesError))
1574             self.failUnless("uncoordinated write" in str(f))
1575         return self._test_corrupt_all(1, "ran out of peers",
1576                                       corrupt_early=False,
1577                                       failure_checker=_check)
1578
1579     def test_corrupt_all_block_hash_tree_late(self):
1580         def _check(res):
1581             f = res[0]
1582             self.failUnless(f.check(NotEnoughSharesError))
1583         return self._test_corrupt_all("block_hash_tree",
1584                                       "block hash tree failure",
1585                                       corrupt_early=False,
1586                                       failure_checker=_check)
1587
1588
1589     def test_corrupt_all_block_late(self):
1590         def _check(res):
1591             f = res[0]
1592             self.failUnless(f.check(NotEnoughSharesError))
1593         return self._test_corrupt_all("share_data", "block hash tree failure",
1594                                       corrupt_early=False,
1595                                       failure_checker=_check)
1596
1597
1598     def test_basic_pubkey_at_end(self):
1599         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1600         # download to succeed but forcing a bunch of retries first. Note that
1601         # this is rather pessimistic: our Retrieve process will throw away
1602         # the whole share if the pubkey is bad, even though the rest of the
1603         # share might be good.
1604
1605         self._fn._pubkey = None
1606         k = self._fn.get_required_shares()
1607         N = self._fn.get_total_shares()
1608         d = defer.succeed(None)
1609         d.addCallback(corrupt, self._storage, "pubkey",
1610                       shnums_to_corrupt=range(0, N-k))
1611         d.addCallback(lambda res: self.make_servermap())
1612         def _do_retrieve(servermap):
1613             self.failUnless(servermap.problems)
1614             self.failUnless("pubkey doesn't match fingerprint"
1615                             in str(servermap.problems[0]))
1616             ver = servermap.best_recoverable_version()
1617             r = Retrieve(self._fn, servermap, ver)
1618             c = consumer.MemoryConsumer()
1619             return r.download(c)
1620         d.addCallback(_do_retrieve)
1621         d.addCallback(lambda mc: "".join(mc.chunks))
1622         d.addCallback(lambda new_contents:
1623                       self.failUnlessEqual(new_contents, self.CONTENTS))
1624         return d
1625
1626
1627     def _test_corrupt_some(self, offset, mdmf=False):
1628         if mdmf:
1629             d = self.publish_mdmf()
1630         else:
1631             d = defer.succeed(None)
1632         d.addCallback(lambda ignored:
1633             corrupt(None, self._storage, offset, range(5)))
1634         d.addCallback(lambda ignored:
1635             self.make_servermap())
1636         def _do_retrieve(servermap):
1637             ver = servermap.best_recoverable_version()
1638             self.failUnless(ver)
1639             return self._fn.download_best_version()
1640         d.addCallback(_do_retrieve)
1641         d.addCallback(lambda new_contents:
1642             self.failUnlessEqual(new_contents, self.CONTENTS))
1643         return d
1644
1645
1646     def test_corrupt_some(self):
1647         # corrupt the data of first five shares (so the servermap thinks
1648         # they're good but retrieve marks them as bad), so that the
1649         # MODE_READ set of 6 will be insufficient, forcing node.download to
1650         # retry with more servers.
1651         return self._test_corrupt_some("share_data")
1652
1653
1654     def test_download_fails(self):
1655         d = corrupt(None, self._storage, "signature")
1656         d.addCallback(lambda ignored:
1657             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1658                             "no recoverable versions",
1659                             self._fn.download_best_version))
1660         return d
1661
1662
1663
1664     def test_corrupt_mdmf_block_hash_tree(self):
1665         d = self.publish_mdmf()
1666         d.addCallback(lambda ignored:
1667             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1668                                    "block hash tree failure",
1669                                    corrupt_early=False,
1670                                    should_succeed=False))
1671         return d
1672
1673
1674     def test_corrupt_mdmf_block_hash_tree_late(self):
1675         d = self.publish_mdmf()
1676         d.addCallback(lambda ignored:
1677             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1678                                    "block hash tree failure",
1679                                    corrupt_early=True,
1680                                    should_succeed=False))
1681         return d
1682
1683
1684     def test_corrupt_mdmf_share_data(self):
1685         d = self.publish_mdmf()
1686         d.addCallback(lambda ignored:
1687             # TODO: Find out what the block size is and corrupt a
1688             # specific block, rather than just guessing.
1689             self._test_corrupt_all(("share_data", 12 * 40),
1690                                     "block hash tree failure",
1691                                     corrupt_early=True,
1692                                     should_succeed=False))
1693         return d
1694
1695
1696     def test_corrupt_some_mdmf(self):
1697         return self._test_corrupt_some(("share_data", 12 * 40),
1698                                        mdmf=True)
1699
1700
1701 class CheckerMixin:
1702     def check_good(self, r, where):
1703         self.failUnless(r.is_healthy(), where)
1704         return r
1705
1706     def check_bad(self, r, where):
1707         self.failIf(r.is_healthy(), where)
1708         return r
1709
1710     def check_expected_failure(self, r, expected_exception, substring, where):
1711         for (peerid, storage_index, shnum, f) in r.problems:
1712             if f.check(expected_exception):
1713                 self.failUnless(substring in str(f),
1714                                 "%s: substring '%s' not in '%s'" %
1715                                 (where, substring, str(f)))
1716                 return
1717         self.fail("%s: didn't see expected exception %s in problems %s" %
1718                   (where, expected_exception, r.problems))
1719
1720
1721 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1722     def setUp(self):
1723         return self.publish_one()
1724
1725
1726     def test_check_good(self):
1727         d = self._fn.check(Monitor())
1728         d.addCallback(self.check_good, "test_check_good")
1729         return d
1730
1731     def test_check_mdmf_good(self):
1732         d = self.publish_mdmf()
1733         d.addCallback(lambda ignored:
1734             self._fn.check(Monitor()))
1735         d.addCallback(self.check_good, "test_check_mdmf_good")
1736         return d
1737
1738     def test_check_no_shares(self):
1739         for shares in self._storage._peers.values():
1740             shares.clear()
1741         d = self._fn.check(Monitor())
1742         d.addCallback(self.check_bad, "test_check_no_shares")
1743         return d
1744
1745     def test_check_mdmf_no_shares(self):
1746         d = self.publish_mdmf()
1747         def _then(ignored):
1748             for share in self._storage._peers.values():
1749                 share.clear()
1750         d.addCallback(_then)
1751         d.addCallback(lambda ignored:
1752             self._fn.check(Monitor()))
1753         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1754         return d
1755
1756     def test_check_not_enough_shares(self):
1757         for shares in self._storage._peers.values():
1758             for shnum in shares.keys():
1759                 if shnum > 0:
1760                     del shares[shnum]
1761         d = self._fn.check(Monitor())
1762         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1763         return d
1764
1765     def test_check_mdmf_not_enough_shares(self):
1766         d = self.publish_mdmf()
1767         def _then(ignored):
1768             for shares in self._storage._peers.values():
1769                 for shnum in shares.keys():
1770                     if shnum > 0:
1771                         del shares[shnum]
1772         d.addCallback(_then)
1773         d.addCallback(lambda ignored:
1774             self._fn.check(Monitor()))
1775         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1776         return d
1777
1778
1779     def test_check_all_bad_sig(self):
1780         d = corrupt(None, self._storage, 1) # bad sig
1781         d.addCallback(lambda ignored:
1782             self._fn.check(Monitor()))
1783         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1784         return d
1785
1786     def test_check_mdmf_all_bad_sig(self):
1787         d = self.publish_mdmf()
1788         d.addCallback(lambda ignored:
1789             corrupt(None, self._storage, 1))
1790         d.addCallback(lambda ignored:
1791             self._fn.check(Monitor()))
1792         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1793         return d
1794
1795     def test_check_all_bad_blocks(self):
1796         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1797         # the Checker won't notice this.. it doesn't look at actual data
1798         d.addCallback(lambda ignored:
1799             self._fn.check(Monitor()))
1800         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1801         return d
1802
1803
1804     def test_check_mdmf_all_bad_blocks(self):
1805         d = self.publish_mdmf()
1806         d.addCallback(lambda ignored:
1807             corrupt(None, self._storage, "share_data"))
1808         d.addCallback(lambda ignored:
1809             self._fn.check(Monitor()))
1810         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1811         return d
1812
1813     def test_verify_good(self):
1814         d = self._fn.check(Monitor(), verify=True)
1815         d.addCallback(self.check_good, "test_verify_good")
1816         return d
1817
1818     def test_verify_all_bad_sig(self):
1819         d = corrupt(None, self._storage, 1) # bad sig
1820         d.addCallback(lambda ignored:
1821             self._fn.check(Monitor(), verify=True))
1822         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1823         return d
1824
1825     def test_verify_one_bad_sig(self):
1826         d = corrupt(None, self._storage, 1, [9]) # bad sig
1827         d.addCallback(lambda ignored:
1828             self._fn.check(Monitor(), verify=True))
1829         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1830         return d
1831
1832     def test_verify_one_bad_block(self):
1833         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1834         # the Verifier *will* notice this, since it examines every byte
1835         d.addCallback(lambda ignored:
1836             self._fn.check(Monitor(), verify=True))
1837         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1838         d.addCallback(self.check_expected_failure,
1839                       CorruptShareError, "block hash tree failure",
1840                       "test_verify_one_bad_block")
1841         return d
1842
1843     def test_verify_one_bad_sharehash(self):
1844         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1845         d.addCallback(lambda ignored:
1846             self._fn.check(Monitor(), verify=True))
1847         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1848         d.addCallback(self.check_expected_failure,
1849                       CorruptShareError, "corrupt hashes",
1850                       "test_verify_one_bad_sharehash")
1851         return d
1852
1853     def test_verify_one_bad_encprivkey(self):
1854         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1855         d.addCallback(lambda ignored:
1856             self._fn.check(Monitor(), verify=True))
1857         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1858         d.addCallback(self.check_expected_failure,
1859                       CorruptShareError, "invalid privkey",
1860                       "test_verify_one_bad_encprivkey")
1861         return d
1862
1863     def test_verify_one_bad_encprivkey_uncheckable(self):
1864         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1865         readonly_fn = self._fn.get_readonly()
1866         # a read-only node has no way to validate the privkey
1867         d.addCallback(lambda ignored:
1868             readonly_fn.check(Monitor(), verify=True))
1869         d.addCallback(self.check_good,
1870                       "test_verify_one_bad_encprivkey_uncheckable")
1871         return d
1872
1873
1874     def test_verify_mdmf_good(self):
1875         d = self.publish_mdmf()
1876         d.addCallback(lambda ignored:
1877             self._fn.check(Monitor(), verify=True))
1878         d.addCallback(self.check_good, "test_verify_mdmf_good")
1879         return d
1880
1881
1882     def test_verify_mdmf_one_bad_block(self):
1883         d = self.publish_mdmf()
1884         d.addCallback(lambda ignored:
1885             corrupt(None, self._storage, "share_data", [1]))
1886         d.addCallback(lambda ignored:
1887             self._fn.check(Monitor(), verify=True))
1888         # We should find one bad block here
1889         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1890         d.addCallback(self.check_expected_failure,
1891                       CorruptShareError, "block hash tree failure",
1892                       "test_verify_mdmf_one_bad_block")
1893         return d
1894
1895
1896     def test_verify_mdmf_bad_encprivkey(self):
1897         d = self.publish_mdmf()
1898         d.addCallback(lambda ignored:
1899             corrupt(None, self._storage, "enc_privkey", [0]))
1900         d.addCallback(lambda ignored:
1901             self._fn.check(Monitor(), verify=True))
1902         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1903         d.addCallback(self.check_expected_failure,
1904                       CorruptShareError, "privkey",
1905                       "test_verify_mdmf_bad_encprivkey")
1906         return d
1907
1908
1909     def test_verify_mdmf_bad_sig(self):
1910         d = self.publish_mdmf()
1911         d.addCallback(lambda ignored:
1912             corrupt(None, self._storage, 1, [1]))
1913         d.addCallback(lambda ignored:
1914             self._fn.check(Monitor(), verify=True))
1915         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1916         return d
1917
1918
1919     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1920         d = self.publish_mdmf()
1921         d.addCallback(lambda ignored:
1922             corrupt(None, self._storage, "enc_privkey", [1]))
1923         d.addCallback(lambda ignored:
1924             self._fn.get_readonly())
1925         d.addCallback(lambda fn:
1926             fn.check(Monitor(), verify=True))
1927         d.addCallback(self.check_good,
1928                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1929         return d
1930
1931
1932 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1933
1934     def get_shares(self, s):
1935         all_shares = {} # maps (peerid, shnum) to share data
1936         for peerid in s._peers:
1937             shares = s._peers[peerid]
1938             for shnum in shares:
1939                 data = shares[shnum]
1940                 all_shares[ (peerid, shnum) ] = data
1941         return all_shares
1942
1943     def copy_shares(self, ignored=None):
1944         self.old_shares.append(self.get_shares(self._storage))
1945
1946     def test_repair_nop(self):
1947         self.old_shares = []
1948         d = self.publish_one()
1949         d.addCallback(self.copy_shares)
1950         d.addCallback(lambda res: self._fn.check(Monitor()))
1951         d.addCallback(lambda check_results: self._fn.repair(check_results))
1952         def _check_results(rres):
1953             self.failUnless(IRepairResults.providedBy(rres))
1954             self.failUnless(rres.get_successful())
1955             # TODO: examine results
1956
1957             self.copy_shares()
1958
1959             initial_shares = self.old_shares[0]
1960             new_shares = self.old_shares[1]
1961             # TODO: this really shouldn't change anything. When we implement
1962             # a "minimal-bandwidth" repairer", change this test to assert:
1963             #self.failUnlessEqual(new_shares, initial_shares)
1964
1965             # all shares should be in the same place as before
1966             self.failUnlessEqual(set(initial_shares.keys()),
1967                                  set(new_shares.keys()))
1968             # but they should all be at a newer seqnum. The IV will be
1969             # different, so the roothash will be too.
1970             for key in initial_shares:
1971                 (version0,
1972                  seqnum0,
1973                  root_hash0,
1974                  IV0,
1975                  k0, N0, segsize0, datalen0,
1976                  o0) = unpack_header(initial_shares[key])
1977                 (version1,
1978                  seqnum1,
1979                  root_hash1,
1980                  IV1,
1981                  k1, N1, segsize1, datalen1,
1982                  o1) = unpack_header(new_shares[key])
1983                 self.failUnlessEqual(version0, version1)
1984                 self.failUnlessEqual(seqnum0+1, seqnum1)
1985                 self.failUnlessEqual(k0, k1)
1986                 self.failUnlessEqual(N0, N1)
1987                 self.failUnlessEqual(segsize0, segsize1)
1988                 self.failUnlessEqual(datalen0, datalen1)
1989         d.addCallback(_check_results)
1990         return d
1991
1992     def failIfSharesChanged(self, ignored=None):
1993         old_shares = self.old_shares[-2]
1994         current_shares = self.old_shares[-1]
1995         self.failUnlessEqual(old_shares, current_shares)
1996
1997
1998     def test_unrepairable_0shares(self):
1999         d = self.publish_one()
2000         def _delete_all_shares(ign):
2001             shares = self._storage._peers
2002             for peerid in shares:
2003                 shares[peerid] = {}
2004         d.addCallback(_delete_all_shares)
2005         d.addCallback(lambda ign: self._fn.check(Monitor()))
2006         d.addCallback(lambda check_results: self._fn.repair(check_results))
2007         def _check(crr):
2008             self.failUnlessEqual(crr.get_successful(), False)
2009         d.addCallback(_check)
2010         return d
2011
2012     def test_mdmf_unrepairable_0shares(self):
2013         d = self.publish_mdmf()
2014         def _delete_all_shares(ign):
2015             shares = self._storage._peers
2016             for peerid in shares:
2017                 shares[peerid] = {}
2018         d.addCallback(_delete_all_shares)
2019         d.addCallback(lambda ign: self._fn.check(Monitor()))
2020         d.addCallback(lambda check_results: self._fn.repair(check_results))
2021         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2022         return d
2023
2024
2025     def test_unrepairable_1share(self):
2026         d = self.publish_one()
2027         def _delete_all_shares(ign):
2028             shares = self._storage._peers
2029             for peerid in shares:
2030                 for shnum in list(shares[peerid]):
2031                     if shnum > 0:
2032                         del shares[peerid][shnum]
2033         d.addCallback(_delete_all_shares)
2034         d.addCallback(lambda ign: self._fn.check(Monitor()))
2035         d.addCallback(lambda check_results: self._fn.repair(check_results))
2036         def _check(crr):
2037             self.failUnlessEqual(crr.get_successful(), False)
2038         d.addCallback(_check)
2039         return d
2040
2041     def test_mdmf_unrepairable_1share(self):
2042         d = self.publish_mdmf()
2043         def _delete_all_shares(ign):
2044             shares = self._storage._peers
2045             for peerid in shares:
2046                 for shnum in list(shares[peerid]):
2047                     if shnum > 0:
2048                         del shares[peerid][shnum]
2049         d.addCallback(_delete_all_shares)
2050         d.addCallback(lambda ign: self._fn.check(Monitor()))
2051         d.addCallback(lambda check_results: self._fn.repair(check_results))
2052         def _check(crr):
2053             self.failUnlessEqual(crr.get_successful(), False)
2054         d.addCallback(_check)
2055         return d
2056
2057     def test_repairable_5shares(self):
2058         d = self.publish_mdmf()
2059         def _delete_all_shares(ign):
2060             shares = self._storage._peers
2061             for peerid in shares:
2062                 for shnum in list(shares[peerid]):
2063                     if shnum > 4:
2064                         del shares[peerid][shnum]
2065         d.addCallback(_delete_all_shares)
2066         d.addCallback(lambda ign: self._fn.check(Monitor()))
2067         d.addCallback(lambda check_results: self._fn.repair(check_results))
2068         def _check(crr):
2069             self.failUnlessEqual(crr.get_successful(), True)
2070         d.addCallback(_check)
2071         return d
2072
2073     def test_mdmf_repairable_5shares(self):
2074         d = self.publish_mdmf()
2075         def _delete_some_shares(ign):
2076             shares = self._storage._peers
2077             for peerid in shares:
2078                 for shnum in list(shares[peerid]):
2079                     if shnum > 5:
2080                         del shares[peerid][shnum]
2081         d.addCallback(_delete_some_shares)
2082         d.addCallback(lambda ign: self._fn.check(Monitor()))
2083         def _check(cr):
2084             self.failIf(cr.is_healthy())
2085             self.failUnless(cr.is_recoverable())
2086             return cr
2087         d.addCallback(_check)
2088         d.addCallback(lambda check_results: self._fn.repair(check_results))
2089         def _check1(crr):
2090             self.failUnlessEqual(crr.get_successful(), True)
2091         d.addCallback(_check1)
2092         return d
2093
2094
2095     def test_merge(self):
2096         self.old_shares = []
2097         d = self.publish_multiple()
2098         # repair will refuse to merge multiple highest seqnums unless you
2099         # pass force=True
2100         d.addCallback(lambda res:
2101                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2102                                           1:4,3:4,5:4,7:4,9:4}))
2103         d.addCallback(self.copy_shares)
2104         d.addCallback(lambda res: self._fn.check(Monitor()))
2105         def _try_repair(check_results):
2106             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2107             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2108                                  self._fn.repair, check_results)
2109             d2.addCallback(self.copy_shares)
2110             d2.addCallback(self.failIfSharesChanged)
2111             d2.addCallback(lambda res: check_results)
2112             return d2
2113         d.addCallback(_try_repair)
2114         d.addCallback(lambda check_results:
2115                       self._fn.repair(check_results, force=True))
2116         # this should give us 10 shares of the highest roothash
2117         def _check_repair_results(rres):
2118             self.failUnless(rres.get_successful())
2119             pass # TODO
2120         d.addCallback(_check_repair_results)
2121         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2122         def _check_smap(smap):
2123             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2124             self.failIf(smap.unrecoverable_versions())
2125             # now, which should have won?
2126             roothash_s4a = self.get_roothash_for(3)
2127             roothash_s4b = self.get_roothash_for(4)
2128             if roothash_s4b > roothash_s4a:
2129                 expected_contents = self.CONTENTS[4]
2130             else:
2131                 expected_contents = self.CONTENTS[3]
2132             new_versionid = smap.best_recoverable_version()
2133             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2134             d2 = self._fn.download_version(smap, new_versionid)
2135             d2.addCallback(self.failUnlessEqual, expected_contents)
2136             return d2
2137         d.addCallback(_check_smap)
2138         return d
2139
2140     def test_non_merge(self):
2141         self.old_shares = []
2142         d = self.publish_multiple()
2143         # repair should not refuse a repair that doesn't need to merge. In
2144         # this case, we combine v2 with v3. The repair should ignore v2 and
2145         # copy v3 into a new v5.
2146         d.addCallback(lambda res:
2147                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2148                                           1:3,3:3,5:3,7:3,9:3}))
2149         d.addCallback(lambda res: self._fn.check(Monitor()))
2150         d.addCallback(lambda check_results: self._fn.repair(check_results))
2151         # this should give us 10 shares of v3
2152         def _check_repair_results(rres):
2153             self.failUnless(rres.get_successful())
2154             pass # TODO
2155         d.addCallback(_check_repair_results)
2156         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2157         def _check_smap(smap):
2158             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2159             self.failIf(smap.unrecoverable_versions())
2160             # now, which should have won?
2161             expected_contents = self.CONTENTS[3]
2162             new_versionid = smap.best_recoverable_version()
2163             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2164             d2 = self._fn.download_version(smap, new_versionid)
2165             d2.addCallback(self.failUnlessEqual, expected_contents)
2166             return d2
2167         d.addCallback(_check_smap)
2168         return d
2169
2170     def get_roothash_for(self, index):
2171         # return the roothash for the first share we see in the saved set
2172         shares = self._copied_shares[index]
2173         for peerid in shares:
2174             for shnum in shares[peerid]:
2175                 share = shares[peerid][shnum]
2176                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2177                           unpack_header(share)
2178                 return root_hash
2179
2180     def test_check_and_repair_readcap(self):
2181         # we can't currently repair from a mutable readcap: #625
2182         self.old_shares = []
2183         d = self.publish_one()
2184         d.addCallback(self.copy_shares)
2185         def _get_readcap(res):
2186             self._fn3 = self._fn.get_readonly()
2187             # also delete some shares
2188             for peerid,shares in self._storage._peers.items():
2189                 shares.pop(0, None)
2190         d.addCallback(_get_readcap)
2191         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2192         def _check_results(crr):
2193             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2194             # we should detect the unhealthy, but skip over mutable-readcap
2195             # repairs until #625 is fixed
2196             self.failIf(crr.get_pre_repair_results().is_healthy())
2197             self.failIf(crr.get_repair_attempted())
2198             self.failIf(crr.get_post_repair_results().is_healthy())
2199         d.addCallback(_check_results)
2200         return d
2201
2202 class DevNullDictionary(dict):
2203     def __setitem__(self, key, value):
2204         return
2205
2206 class MultipleEncodings(unittest.TestCase):
2207     def setUp(self):
2208         self.CONTENTS = "New contents go here"
2209         self.uploadable = MutableData(self.CONTENTS)
2210         self._storage = FakeStorage()
2211         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2212         self._storage_broker = self._nodemaker.storage_broker
2213         d = self._nodemaker.create_mutable_file(self.uploadable)
2214         def _created(node):
2215             self._fn = node
2216         d.addCallback(_created)
2217         return d
2218
2219     def _encode(self, k, n, data, version=SDMF_VERSION):
2220         # encode 'data' into a peerid->shares dict.
2221
2222         fn = self._fn
2223         # disable the nodecache, since for these tests we explicitly need
2224         # multiple nodes pointing at the same file
2225         self._nodemaker._node_cache = DevNullDictionary()
2226         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2227         # then we copy over other fields that are normally fetched from the
2228         # existing shares
2229         fn2._pubkey = fn._pubkey
2230         fn2._privkey = fn._privkey
2231         fn2._encprivkey = fn._encprivkey
2232         # and set the encoding parameters to something completely different
2233         fn2._required_shares = k
2234         fn2._total_shares = n
2235
2236         s = self._storage
2237         s._peers = {} # clear existing storage
2238         p2 = Publish(fn2, self._storage_broker, None)
2239         uploadable = MutableData(data)
2240         d = p2.publish(uploadable)
2241         def _published(res):
2242             shares = s._peers
2243             s._peers = {}
2244             return shares
2245         d.addCallback(_published)
2246         return d
2247
2248     def make_servermap(self, mode=MODE_READ, oldmap=None):
2249         if oldmap is None:
2250             oldmap = ServerMap()
2251         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2252                                oldmap, mode)
2253         d = smu.update()
2254         return d
2255
2256     def test_multiple_encodings(self):
2257         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2258         # then mix up the shares, to make sure that download survives seeing
2259         # a variety of encodings. This is actually kind of tricky to set up.
2260
2261         contents1 = "Contents for encoding 1 (3-of-10) go here"
2262         contents2 = "Contents for encoding 2 (4-of-9) go here"
2263         contents3 = "Contents for encoding 3 (4-of-7) go here"
2264
2265         # we make a retrieval object that doesn't know what encoding
2266         # parameters to use
2267         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2268
2269         # now we upload a file through fn1, and grab its shares
2270         d = self._encode(3, 10, contents1)
2271         def _encoded_1(shares):
2272             self._shares1 = shares
2273         d.addCallback(_encoded_1)
2274         d.addCallback(lambda res: self._encode(4, 9, contents2))
2275         def _encoded_2(shares):
2276             self._shares2 = shares
2277         d.addCallback(_encoded_2)
2278         d.addCallback(lambda res: self._encode(4, 7, contents3))
2279         def _encoded_3(shares):
2280             self._shares3 = shares
2281         d.addCallback(_encoded_3)
2282
2283         def _merge(res):
2284             log.msg("merging sharelists")
2285             # we merge the shares from the two sets, leaving each shnum in
2286             # its original location, but using a share from set1 or set2
2287             # according to the following sequence:
2288             #
2289             #  4-of-9  a  s2
2290             #  4-of-9  b  s2
2291             #  4-of-7  c   s3
2292             #  4-of-9  d  s2
2293             #  3-of-9  e s1
2294             #  3-of-9  f s1
2295             #  3-of-9  g s1
2296             #  4-of-9  h  s2
2297             #
2298             # so that neither form can be recovered until fetch [f], at which
2299             # point version-s1 (the 3-of-10 form) should be recoverable. If
2300             # the implementation latches on to the first version it sees,
2301             # then s2 will be recoverable at fetch [g].
2302
2303             # Later, when we implement code that handles multiple versions,
2304             # we can use this framework to assert that all recoverable
2305             # versions are retrieved, and test that 'epsilon' does its job
2306
2307             places = [2, 2, 3, 2, 1, 1, 1, 2]
2308
2309             sharemap = {}
2310             sb = self._storage_broker
2311
2312             for peerid in sorted(sb.get_all_serverids()):
2313                 for shnum in self._shares1.get(peerid, {}):
2314                     if shnum < len(places):
2315                         which = places[shnum]
2316                     else:
2317                         which = "x"
2318                     self._storage._peers[peerid] = peers = {}
2319                     in_1 = shnum in self._shares1[peerid]
2320                     in_2 = shnum in self._shares2.get(peerid, {})
2321                     in_3 = shnum in self._shares3.get(peerid, {})
2322                     if which == 1:
2323                         if in_1:
2324                             peers[shnum] = self._shares1[peerid][shnum]
2325                             sharemap[shnum] = peerid
2326                     elif which == 2:
2327                         if in_2:
2328                             peers[shnum] = self._shares2[peerid][shnum]
2329                             sharemap[shnum] = peerid
2330                     elif which == 3:
2331                         if in_3:
2332                             peers[shnum] = self._shares3[peerid][shnum]
2333                             sharemap[shnum] = peerid
2334
2335             # we don't bother placing any other shares
2336             # now sort the sequence so that share 0 is returned first
2337             new_sequence = [sharemap[shnum]
2338                             for shnum in sorted(sharemap.keys())]
2339             self._storage._sequence = new_sequence
2340             log.msg("merge done")
2341         d.addCallback(_merge)
2342         d.addCallback(lambda res: fn3.download_best_version())
2343         def _retrieved(new_contents):
2344             # the current specified behavior is "first version recoverable"
2345             self.failUnlessEqual(new_contents, contents1)
2346         d.addCallback(_retrieved)
2347         return d
2348
2349
2350 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2351
2352     def setUp(self):
2353         return self.publish_multiple()
2354
2355     def test_multiple_versions(self):
2356         # if we see a mix of versions in the grid, download_best_version
2357         # should get the latest one
2358         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2359         d = self._fn.download_best_version()
2360         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2361         # and the checker should report problems
2362         d.addCallback(lambda res: self._fn.check(Monitor()))
2363         d.addCallback(self.check_bad, "test_multiple_versions")
2364
2365         # but if everything is at version 2, that's what we should download
2366         d.addCallback(lambda res:
2367                       self._set_versions(dict([(i,2) for i in range(10)])))
2368         d.addCallback(lambda res: self._fn.download_best_version())
2369         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2370         # if exactly one share is at version 3, we should still get v2
2371         d.addCallback(lambda res:
2372                       self._set_versions({0:3}))
2373         d.addCallback(lambda res: self._fn.download_best_version())
2374         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2375         # but the servermap should see the unrecoverable version. This
2376         # depends upon the single newer share being queried early.
2377         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2378         def _check_smap(smap):
2379             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2380             newer = smap.unrecoverable_newer_versions()
2381             self.failUnlessEqual(len(newer), 1)
2382             verinfo, health = newer.items()[0]
2383             self.failUnlessEqual(verinfo[0], 4)
2384             self.failUnlessEqual(health, (1,3))
2385             self.failIf(smap.needs_merge())
2386         d.addCallback(_check_smap)
2387         # if we have a mix of two parallel versions (s4a and s4b), we could
2388         # recover either
2389         d.addCallback(lambda res:
2390                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2391                                           1:4,3:4,5:4,7:4,9:4}))
2392         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2393         def _check_smap_mixed(smap):
2394             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2395             newer = smap.unrecoverable_newer_versions()
2396             self.failUnlessEqual(len(newer), 0)
2397             self.failUnless(smap.needs_merge())
2398         d.addCallback(_check_smap_mixed)
2399         d.addCallback(lambda res: self._fn.download_best_version())
2400         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2401                                                   res == self.CONTENTS[4]))
2402         return d
2403
2404     def test_replace(self):
2405         # if we see a mix of versions in the grid, we should be able to
2406         # replace them all with a newer version
2407
2408         # if exactly one share is at version 3, we should download (and
2409         # replace) v2, and the result should be v4. Note that the index we
2410         # give to _set_versions is different than the sequence number.
2411         target = dict([(i,2) for i in range(10)]) # seqnum3
2412         target[0] = 3 # seqnum4
2413         self._set_versions(target)
2414
2415         def _modify(oldversion, servermap, first_time):
2416             return oldversion + " modified"
2417         d = self._fn.modify(_modify)
2418         d.addCallback(lambda res: self._fn.download_best_version())
2419         expected = self.CONTENTS[2] + " modified"
2420         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2421         # and the servermap should indicate that the outlier was replaced too
2422         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2423         def _check_smap(smap):
2424             self.failUnlessEqual(smap.highest_seqnum(), 5)
2425             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2426             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2427         d.addCallback(_check_smap)
2428         return d
2429
2430
2431 class Utils(unittest.TestCase):
2432     def test_cache(self):
2433         c = ResponseCache()
2434         # xdata = base62.b2a(os.urandom(100))[:100]
2435         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2436         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2437         c.add("v1", 1, 0, xdata)
2438         c.add("v1", 1, 2000, ydata)
2439         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2440         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2441         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2442         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2443         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2444         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2445         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2446         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2447         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2448         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2449         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2450         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2451         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2452         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2453         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2454         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2455         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2456         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2457
2458         # test joining fragments
2459         c = ResponseCache()
2460         c.add("v1", 1, 0, xdata[:10])
2461         c.add("v1", 1, 10, xdata[10:20])
2462         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2463
2464 class Exceptions(unittest.TestCase):
2465     def test_repr(self):
2466         nmde = NeedMoreDataError(100, 50, 100)
2467         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2468         ucwe = UncoordinatedWriteError()
2469         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2470
2471 class SameKeyGenerator:
2472     def __init__(self, pubkey, privkey):
2473         self.pubkey = pubkey
2474         self.privkey = privkey
2475     def generate(self, keysize=None):
2476         return defer.succeed( (self.pubkey, self.privkey) )
2477
2478 class FirstServerGetsKilled:
2479     done = False
2480     def notify(self, retval, wrapper, methname):
2481         if not self.done:
2482             wrapper.broken = True
2483             self.done = True
2484         return retval
2485
2486 class FirstServerGetsDeleted:
2487     def __init__(self):
2488         self.done = False
2489         self.silenced = None
2490     def notify(self, retval, wrapper, methname):
2491         if not self.done:
2492             # this query will work, but later queries should think the share
2493             # has been deleted
2494             self.done = True
2495             self.silenced = wrapper
2496             return retval
2497         if wrapper == self.silenced:
2498             assert methname == "slot_testv_and_readv_and_writev"
2499             return (True, {})
2500         return retval
2501
2502 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2503     def do_publish_surprise(self, version):
2504         self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2505         self.set_up_grid()
2506         nm = self.g.clients[0].nodemaker
2507         d = nm.create_mutable_file(MutableData("contents 1"),
2508                                     version=version)
2509         def _created(n):
2510             d = defer.succeed(None)
2511             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2512             def _got_smap1(smap):
2513                 # stash the old state of the file
2514                 self.old_map = smap
2515             d.addCallback(_got_smap1)
2516             # then modify the file, leaving the old map untouched
2517             d.addCallback(lambda res: log.msg("starting winning write"))
2518             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2519             # now attempt to modify the file with the old servermap. This
2520             # will look just like an uncoordinated write, in which every
2521             # single share got updated between our mapupdate and our publish
2522             d.addCallback(lambda res: log.msg("starting doomed write"))
2523             d.addCallback(lambda res:
2524                           self.shouldFail(UncoordinatedWriteError,
2525                                           "test_publish_surprise", None,
2526                                           n.upload,
2527                                           MutableData("contents 2a"), self.old_map))
2528             return d
2529         d.addCallback(_created)
2530         return d
2531
2532     def test_publish_surprise_sdmf(self):
2533         return self.do_publish_surprise(SDMF_VERSION)
2534
2535     def test_publish_surprise_mdmf(self):
2536         raise unittest.SkipTest("this currently triggers a decoding error in unpack_checkstring (see #1534)")
2537         return self.do_publish_surprise(MDMF_VERSION)
2538
2539     def test_retrieve_surprise(self):
2540         self.basedir = "mutable/Problems/test_retrieve_surprise"
2541         self.set_up_grid()
2542         nm = self.g.clients[0].nodemaker
2543         d = nm.create_mutable_file(MutableData("contents 1"))
2544         def _created(n):
2545             d = defer.succeed(None)
2546             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2547             def _got_smap1(smap):
2548                 # stash the old state of the file
2549                 self.old_map = smap
2550             d.addCallback(_got_smap1)
2551             # then modify the file, leaving the old map untouched
2552             d.addCallback(lambda res: log.msg("starting winning write"))
2553             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2554             # now attempt to retrieve the old version with the old servermap.
2555             # This will look like someone has changed the file since we
2556             # updated the servermap.
2557             d.addCallback(lambda res: n._cache._clear())
2558             d.addCallback(lambda res: log.msg("starting doomed read"))
2559             d.addCallback(lambda res:
2560                           self.shouldFail(NotEnoughSharesError,
2561                                           "test_retrieve_surprise",
2562                                           "ran out of peers: have 0 of 1",
2563                                           n.download_version,
2564                                           self.old_map,
2565                                           self.old_map.best_recoverable_version(),
2566                                           ))
2567             return d
2568         d.addCallback(_created)
2569         return d
2570
2571
2572     def test_unexpected_shares(self):
2573         # upload the file, take a servermap, shut down one of the servers,
2574         # upload it again (causing shares to appear on a new server), then
2575         # upload using the old servermap. The last upload should fail with an
2576         # UncoordinatedWriteError, because of the shares that didn't appear
2577         # in the servermap.
2578         self.basedir = "mutable/Problems/test_unexpected_shares"
2579         self.set_up_grid()
2580         nm = self.g.clients[0].nodemaker
2581         d = nm.create_mutable_file(MutableData("contents 1"))
2582         def _created(n):
2583             d = defer.succeed(None)
2584             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2585             def _got_smap1(smap):
2586                 # stash the old state of the file
2587                 self.old_map = smap
2588                 # now shut down one of the servers
2589                 peer0 = list(smap.make_sharemap()[0])[0]
2590                 self.g.remove_server(peer0)
2591                 # then modify the file, leaving the old map untouched
2592                 log.msg("starting winning write")
2593                 return n.overwrite(MutableData("contents 2"))
2594             d.addCallback(_got_smap1)
2595             # now attempt to modify the file with the old servermap. This
2596             # will look just like an uncoordinated write, in which every
2597             # single share got updated between our mapupdate and our publish
2598             d.addCallback(lambda res: log.msg("starting doomed write"))
2599             d.addCallback(lambda res:
2600                           self.shouldFail(UncoordinatedWriteError,
2601                                           "test_surprise", None,
2602                                           n.upload,
2603                                           MutableData("contents 2a"), self.old_map))
2604             return d
2605         d.addCallback(_created)
2606         return d
2607
2608     def test_bad_server(self):
2609         # Break one server, then create the file: the initial publish should
2610         # complete with an alternate server. Breaking a second server should
2611         # not prevent an update from succeeding either.
2612         self.basedir = "mutable/Problems/test_bad_server"
2613         self.set_up_grid()
2614         nm = self.g.clients[0].nodemaker
2615
2616         # to make sure that one of the initial peers is broken, we have to
2617         # get creative. We create an RSA key and compute its storage-index.
2618         # Then we make a KeyGenerator that always returns that one key, and
2619         # use it to create the mutable file. This will get easier when we can
2620         # use #467 static-server-selection to disable permutation and force
2621         # the choice of server for share[0].
2622
2623         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2624         def _got_key( (pubkey, privkey) ):
2625             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2626             pubkey_s = pubkey.serialize()
2627             privkey_s = privkey.serialize()
2628             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2629                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2630             self._storage_index = u.get_storage_index()
2631         d.addCallback(_got_key)
2632         def _break_peer0(res):
2633             si = self._storage_index
2634             servers = nm.storage_broker.get_servers_for_psi(si)
2635             self.g.break_server(servers[0].get_serverid())
2636             self.server1 = servers[1]
2637         d.addCallback(_break_peer0)
2638         # now "create" the file, using the pre-established key, and let the
2639         # initial publish finally happen
2640         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2641         # that ought to work
2642         def _got_node(n):
2643             d = n.download_best_version()
2644             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2645             # now break the second peer
2646             def _break_peer1(res):
2647                 self.g.break_server(self.server1.get_serverid())
2648             d.addCallback(_break_peer1)
2649             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2650             # that ought to work too
2651             d.addCallback(lambda res: n.download_best_version())
2652             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2653             def _explain_error(f):
2654                 print f
2655                 if f.check(NotEnoughServersError):
2656                     print "first_error:", f.value.first_error
2657                 return f
2658             d.addErrback(_explain_error)
2659             return d
2660         d.addCallback(_got_node)
2661         return d
2662
2663     def test_bad_server_overlap(self):
2664         # like test_bad_server, but with no extra unused servers to fall back
2665         # upon. This means that we must re-use a server which we've already
2666         # used. If we don't remember the fact that we sent them one share
2667         # already, we'll mistakenly think we're experiencing an
2668         # UncoordinatedWriteError.
2669
2670         # Break one server, then create the file: the initial publish should
2671         # complete with an alternate server. Breaking a second server should
2672         # not prevent an update from succeeding either.
2673         self.basedir = "mutable/Problems/test_bad_server_overlap"
2674         self.set_up_grid()
2675         nm = self.g.clients[0].nodemaker
2676         sb = nm.storage_broker
2677
2678         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2679         self.g.break_server(peerids[0])
2680
2681         d = nm.create_mutable_file(MutableData("contents 1"))
2682         def _created(n):
2683             d = n.download_best_version()
2684             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2685             # now break one of the remaining servers
2686             def _break_second_server(res):
2687                 self.g.break_server(peerids[1])
2688             d.addCallback(_break_second_server)
2689             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2690             # that ought to work too
2691             d.addCallback(lambda res: n.download_best_version())
2692             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2693             return d
2694         d.addCallback(_created)
2695         return d
2696
2697     def test_publish_all_servers_bad(self):
2698         # Break all servers: the publish should fail
2699         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2700         self.set_up_grid()
2701         nm = self.g.clients[0].nodemaker
2702         for s in nm.storage_broker.get_connected_servers():
2703             s.get_rref().broken = True
2704
2705         d = self.shouldFail(NotEnoughServersError,
2706                             "test_publish_all_servers_bad",
2707                             "ran out of good servers",
2708                             nm.create_mutable_file, MutableData("contents"))
2709         return d
2710
2711     def test_publish_no_servers(self):
2712         # no servers at all: the publish should fail
2713         self.basedir = "mutable/Problems/test_publish_no_servers"
2714         self.set_up_grid(num_servers=0)
2715         nm = self.g.clients[0].nodemaker
2716
2717         d = self.shouldFail(NotEnoughServersError,
2718                             "test_publish_no_servers",
2719                             "Ran out of non-bad servers",
2720                             nm.create_mutable_file, MutableData("contents"))
2721         return d
2722
2723
2724     def test_privkey_query_error(self):
2725         # when a servermap is updated with MODE_WRITE, it tries to get the
2726         # privkey. Something might go wrong during this query attempt.
2727         # Exercise the code in _privkey_query_failed which tries to handle
2728         # such an error.
2729         self.basedir = "mutable/Problems/test_privkey_query_error"
2730         self.set_up_grid(num_servers=20)
2731         nm = self.g.clients[0].nodemaker
2732         nm._node_cache = DevNullDictionary() # disable the nodecache
2733
2734         # we need some contents that are large enough to push the privkey out
2735         # of the early part of the file
2736         LARGE = "These are Larger contents" * 2000 # about 50KB
2737         LARGE_uploadable = MutableData(LARGE)
2738         d = nm.create_mutable_file(LARGE_uploadable)
2739         def _created(n):
2740             self.uri = n.get_uri()
2741             self.n2 = nm.create_from_cap(self.uri)
2742
2743             # When a mapupdate is performed on a node that doesn't yet know
2744             # the privkey, a short read is sent to a batch of servers, to get
2745             # the verinfo and (hopefully, if the file is short enough) the
2746             # encprivkey. Our file is too large to let this first read
2747             # contain the encprivkey. Each non-encprivkey-bearing response
2748             # that arrives (until the node gets the encprivkey) will trigger
2749             # a second read to specifically read the encprivkey.
2750             #
2751             # So, to exercise this case:
2752             #  1. notice which server gets a read() call first
2753             #  2. tell that server to start throwing errors
2754             killer = FirstServerGetsKilled()
2755             for s in nm.storage_broker.get_connected_servers():
2756                 s.get_rref().post_call_notifier = killer.notify
2757         d.addCallback(_created)
2758
2759         # now we update a servermap from a new node (which doesn't have the
2760         # privkey yet, forcing it to use a separate privkey query). Note that
2761         # the map-update will succeed, since we'll just get a copy from one
2762         # of the other shares.
2763         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2764
2765         return d
2766
2767     def test_privkey_query_missing(self):
2768         # like test_privkey_query_error, but the shares are deleted by the
2769         # second query, instead of raising an exception.
2770         self.basedir = "mutable/Problems/test_privkey_query_missing"
2771         self.set_up_grid(num_servers=20)
2772         nm = self.g.clients[0].nodemaker
2773         LARGE = "These are Larger contents" * 2000 # about 50KiB
2774         LARGE_uploadable = MutableData(LARGE)
2775         nm._node_cache = DevNullDictionary() # disable the nodecache
2776
2777         d = nm.create_mutable_file(LARGE_uploadable)
2778         def _created(n):
2779             self.uri = n.get_uri()
2780             self.n2 = nm.create_from_cap(self.uri)
2781             deleter = FirstServerGetsDeleted()
2782             for s in nm.storage_broker.get_connected_servers():
2783                 s.get_rref().post_call_notifier = deleter.notify
2784         d.addCallback(_created)
2785         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2786         return d
2787
2788
2789     def test_block_and_hash_query_error(self):
2790         # This tests for what happens when a query to a remote server
2791         # fails in either the hash validation step or the block getting
2792         # step (because of batching, this is the same actual query).
2793         # We need to have the storage server persist up until the point
2794         # that its prefix is validated, then suddenly die. This
2795         # exercises some exception handling code in Retrieve.
2796         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2797         self.set_up_grid(num_servers=20)
2798         nm = self.g.clients[0].nodemaker
2799         CONTENTS = "contents" * 2000
2800         CONTENTS_uploadable = MutableData(CONTENTS)
2801         d = nm.create_mutable_file(CONTENTS_uploadable)
2802         def _created(node):
2803             self._node = node
2804         d.addCallback(_created)
2805         d.addCallback(lambda ignored:
2806             self._node.get_servermap(MODE_READ))
2807         def _then(servermap):
2808             # we have our servermap. Now we set up the servers like the
2809             # tests above -- the first one that gets a read call should
2810             # start throwing errors, but only after returning its prefix
2811             # for validation. Since we'll download without fetching the
2812             # private key, the next query to the remote server will be
2813             # for either a block and salt or for hashes, either of which
2814             # will exercise the error handling code.
2815             killer = FirstServerGetsKilled()
2816             for s in nm.storage_broker.get_connected_servers():
2817                 s.get_rref().post_call_notifier = killer.notify
2818             ver = servermap.best_recoverable_version()
2819             assert ver
2820             return self._node.download_version(servermap, ver)
2821         d.addCallback(_then)
2822         d.addCallback(lambda data:
2823             self.failUnlessEqual(data, CONTENTS))
2824         return d
2825
2826
2827 class FileHandle(unittest.TestCase):
2828     def setUp(self):
2829         self.test_data = "Test Data" * 50000
2830         self.sio = StringIO(self.test_data)
2831         self.uploadable = MutableFileHandle(self.sio)
2832
2833
2834     def test_filehandle_read(self):
2835         self.basedir = "mutable/FileHandle/test_filehandle_read"
2836         chunk_size = 10
2837         for i in xrange(0, len(self.test_data), chunk_size):
2838             data = self.uploadable.read(chunk_size)
2839             data = "".join(data)
2840             start = i
2841             end = i + chunk_size
2842             self.failUnlessEqual(data, self.test_data[start:end])
2843
2844
2845     def test_filehandle_get_size(self):
2846         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2847         actual_size = len(self.test_data)
2848         size = self.uploadable.get_size()
2849         self.failUnlessEqual(size, actual_size)
2850
2851
2852     def test_filehandle_get_size_out_of_order(self):
2853         # We should be able to call get_size whenever we want without
2854         # disturbing the location of the seek pointer.
2855         chunk_size = 100
2856         data = self.uploadable.read(chunk_size)
2857         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2858
2859         # Now get the size.
2860         size = self.uploadable.get_size()
2861         self.failUnlessEqual(size, len(self.test_data))
2862
2863         # Now get more data. We should be right where we left off.
2864         more_data = self.uploadable.read(chunk_size)
2865         start = chunk_size
2866         end = chunk_size * 2
2867         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2868
2869
2870     def test_filehandle_file(self):
2871         # Make sure that the MutableFileHandle works on a file as well
2872         # as a StringIO object, since in some cases it will be asked to
2873         # deal with files.
2874         self.basedir = self.mktemp()
2875         # necessary? What am I doing wrong here?
2876         os.mkdir(self.basedir)
2877         f_path = os.path.join(self.basedir, "test_file")
2878         f = open(f_path, "w")
2879         f.write(self.test_data)
2880         f.close()
2881         f = open(f_path, "r")
2882
2883         uploadable = MutableFileHandle(f)
2884
2885         data = uploadable.read(len(self.test_data))
2886         self.failUnlessEqual("".join(data), self.test_data)
2887         size = uploadable.get_size()
2888         self.failUnlessEqual(size, len(self.test_data))
2889
2890
2891     def test_close(self):
2892         # Make sure that the MutableFileHandle closes its handle when
2893         # told to do so.
2894         self.uploadable.close()
2895         self.failUnless(self.sio.closed)
2896
2897
2898 class DataHandle(unittest.TestCase):
2899     def setUp(self):
2900         self.test_data = "Test Data" * 50000
2901         self.uploadable = MutableData(self.test_data)
2902
2903
2904     def test_datahandle_read(self):
2905         chunk_size = 10
2906         for i in xrange(0, len(self.test_data), chunk_size):
2907             data = self.uploadable.read(chunk_size)
2908             data = "".join(data)
2909             start = i
2910             end = i + chunk_size
2911             self.failUnlessEqual(data, self.test_data[start:end])
2912
2913
2914     def test_datahandle_get_size(self):
2915         actual_size = len(self.test_data)
2916         size = self.uploadable.get_size()
2917         self.failUnlessEqual(size, actual_size)
2918
2919
2920     def test_datahandle_get_size_out_of_order(self):
2921         # We should be able to call get_size whenever we want without
2922         # disturbing the location of the seek pointer.
2923         chunk_size = 100
2924         data = self.uploadable.read(chunk_size)
2925         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2926
2927         # Now get the size.
2928         size = self.uploadable.get_size()
2929         self.failUnlessEqual(size, len(self.test_data))
2930
2931         # Now get more data. We should be right where we left off.
2932         more_data = self.uploadable.read(chunk_size)
2933         start = chunk_size
2934         end = chunk_size * 2
2935         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2936
2937
2938 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2939               PublishMixin):
2940     def setUp(self):
2941         GridTestMixin.setUp(self)
2942         self.basedir = self.mktemp()
2943         self.set_up_grid()
2944         self.c = self.g.clients[0]
2945         self.nm = self.c.nodemaker
2946         self.data = "test data" * 100000 # about 900 KiB; MDMF
2947         self.small_data = "test data" * 10 # about 90 B; SDMF
2948
2949
2950     def do_upload_mdmf(self):
2951         d = self.nm.create_mutable_file(MutableData(self.data),
2952                                         version=MDMF_VERSION)
2953         def _then(n):
2954             assert isinstance(n, MutableFileNode)
2955             assert n._protocol_version == MDMF_VERSION
2956             self.mdmf_node = n
2957             return n
2958         d.addCallback(_then)
2959         return d
2960
2961     def do_upload_sdmf(self):
2962         d = self.nm.create_mutable_file(MutableData(self.small_data))
2963         def _then(n):
2964             assert isinstance(n, MutableFileNode)
2965             assert n._protocol_version == SDMF_VERSION
2966             self.sdmf_node = n
2967             return n
2968         d.addCallback(_then)
2969         return d
2970
2971     def do_upload_empty_sdmf(self):
2972         d = self.nm.create_mutable_file(MutableData(""))
2973         def _then(n):
2974             assert isinstance(n, MutableFileNode)
2975             self.sdmf_zero_length_node = n
2976             assert n._protocol_version == SDMF_VERSION
2977             return n
2978         d.addCallback(_then)
2979         return d
2980
2981     def do_upload(self):
2982         d = self.do_upload_mdmf()
2983         d.addCallback(lambda ign: self.do_upload_sdmf())
2984         return d
2985
2986     def test_debug(self):
2987         d = self.do_upload_mdmf()
2988         def _debug(n):
2989             fso = debug.FindSharesOptions()
2990             storage_index = base32.b2a(n.get_storage_index())
2991             fso.si_s = storage_index
2992             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2993                             for (i,ss,storedir)
2994                             in self.iterate_servers()]
2995             fso.stdout = StringIO()
2996             fso.stderr = StringIO()
2997             debug.find_shares(fso)
2998             sharefiles = fso.stdout.getvalue().splitlines()
2999             expected = self.nm.default_encoding_parameters["n"]
3000             self.failUnlessEqual(len(sharefiles), expected)
3001
3002             do = debug.DumpOptions()
3003             do["filename"] = sharefiles[0]
3004             do.stdout = StringIO()
3005             debug.dump_share(do)
3006             output = do.stdout.getvalue()
3007             lines = set(output.splitlines())
3008             self.failUnless("Mutable slot found:" in lines, output)
3009             self.failUnless(" share_type: MDMF" in lines, output)
3010             self.failUnless(" num_extra_leases: 0" in lines, output)
3011             self.failUnless(" MDMF contents:" in lines, output)
3012             self.failUnless("  seqnum: 1" in lines, output)
3013             self.failUnless("  required_shares: 3" in lines, output)
3014             self.failUnless("  total_shares: 10" in lines, output)
3015             self.failUnless("  segsize: 131073" in lines, output)
3016             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
3017             vcap = n.get_verify_cap().to_string()
3018             self.failUnless("  verify-cap: %s" % vcap in lines, output)
3019
3020             cso = debug.CatalogSharesOptions()
3021             cso.nodedirs = fso.nodedirs
3022             cso.stdout = StringIO()
3023             cso.stderr = StringIO()
3024             debug.catalog_shares(cso)
3025             shares = cso.stdout.getvalue().splitlines()
3026             oneshare = shares[0] # all shares should be MDMF
3027             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3028             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3029             fields = oneshare.split()
3030             self.failUnlessEqual(fields[0], "MDMF")
3031             self.failUnlessEqual(fields[1], storage_index)
3032             self.failUnlessEqual(fields[2], "3/10")
3033             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3034             self.failUnless(fields[4].startswith("#1:"), fields[3])
3035             # the rest of fields[4] is the roothash, which depends upon
3036             # encryption salts and is not constant. fields[5] is the
3037             # remaining time on the longest lease, which is timing dependent.
3038             # The rest of the line is the quoted pathname to the share.
3039         d.addCallback(_debug)
3040         return d
3041
3042     def test_get_sequence_number(self):
3043         d = self.do_upload()
3044         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3045         d.addCallback(lambda bv:
3046             self.failUnlessEqual(bv.get_sequence_number(), 1))
3047         d.addCallback(lambda ignored:
3048             self.sdmf_node.get_best_readable_version())
3049         d.addCallback(lambda bv:
3050             self.failUnlessEqual(bv.get_sequence_number(), 1))
3051         # Now update. The sequence number in both cases should be 1 in
3052         # both cases.
3053         def _do_update(ignored):
3054             new_data = MutableData("foo bar baz" * 100000)
3055             new_small_data = MutableData("foo bar baz" * 10)
3056             d1 = self.mdmf_node.overwrite(new_data)
3057             d2 = self.sdmf_node.overwrite(new_small_data)
3058             dl = gatherResults([d1, d2])
3059             return dl
3060         d.addCallback(_do_update)
3061         d.addCallback(lambda ignored:
3062             self.mdmf_node.get_best_readable_version())
3063         d.addCallback(lambda bv:
3064             self.failUnlessEqual(bv.get_sequence_number(), 2))
3065         d.addCallback(lambda ignored:
3066             self.sdmf_node.get_best_readable_version())
3067         d.addCallback(lambda bv:
3068             self.failUnlessEqual(bv.get_sequence_number(), 2))
3069         return d
3070
3071
3072     def test_version_extension_api(self):
3073         # We need to define an API by which an uploader can set the
3074         # extension parameters, and by which a downloader can retrieve
3075         # extensions.
3076         d = self.do_upload_mdmf()
3077         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3078         def _got_version(version):
3079             hints = version.get_downloader_hints()
3080             # Should be empty at this point.
3081             self.failUnlessIn("k", hints)
3082             self.failUnlessEqual(hints['k'], 3)
3083             self.failUnlessIn('segsize', hints)
3084             self.failUnlessEqual(hints['segsize'], 131073)
3085         d.addCallback(_got_version)
3086         return d
3087
3088
3089     def test_extensions_from_cap(self):
3090         # If we initialize a mutable file with a cap that has extension
3091         # parameters in it and then grab the extension parameters using
3092         # our API, we should see that they're set correctly.
3093         d = self.do_upload_mdmf()
3094         def _then(ign):
3095             mdmf_uri = self.mdmf_node.get_uri()
3096             new_node = self.nm.create_from_cap(mdmf_uri)
3097             return new_node.get_best_mutable_version()
3098         d.addCallback(_then)
3099         def _got_version(version):
3100             hints = version.get_downloader_hints()
3101             self.failUnlessIn("k", hints)
3102             self.failUnlessEqual(hints["k"], 3)
3103             self.failUnlessIn("segsize", hints)
3104             self.failUnlessEqual(hints["segsize"], 131073)
3105         d.addCallback(_got_version)
3106         return d
3107
3108
3109     def test_extensions_from_upload(self):
3110         # If we create a new mutable file with some contents, we should
3111         # get back an MDMF cap with the right hints in place.
3112         contents = "foo bar baz" * 100000
3113         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3114         def _got_mutable_file(n):
3115             rw_uri = n.get_uri()
3116             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3117             self.failUnlessIn(expected_k, rw_uri)
3118             # XXX: Get this more intelligently.
3119             self.failUnlessIn("131073", rw_uri)
3120
3121             ro_uri = n.get_readonly_uri()
3122             self.failUnlessIn(expected_k, ro_uri)
3123             self.failUnlessIn("131073", ro_uri)
3124         d.addCallback(_got_mutable_file)
3125         return d
3126
3127
3128     def test_cap_after_upload(self):
3129         # If we create a new mutable file and upload things to it, and
3130         # it's an MDMF file, we should get an MDMF cap back from that
3131         # file and should be able to use that.
3132         # That's essentially what MDMF node is, so just check that.
3133         d = self.do_upload_mdmf()
3134         def _then(ign):
3135             mdmf_uri = self.mdmf_node.get_uri()
3136             cap = uri.from_string(mdmf_uri)
3137             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3138             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3139             cap = uri.from_string(readonly_mdmf_uri)
3140             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3141         d.addCallback(_then)
3142         return d
3143
3144     def test_mutable_version(self):
3145         # assert that getting parameters from the IMutableVersion object
3146         # gives us the same data as getting them from the filenode itself
3147         d = self.do_upload()
3148         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3149         def _check_mdmf(bv):
3150             n = self.mdmf_node
3151             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3152             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3153             self.failIf(bv.is_readonly())
3154         d.addCallback(_check_mdmf)
3155         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3156         def _check_sdmf(bv):
3157             n = self.sdmf_node
3158             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3159             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3160             self.failIf(bv.is_readonly())
3161         d.addCallback(_check_sdmf)
3162         return d
3163
3164
3165     def test_get_readonly_version(self):
3166         d = self.do_upload()
3167         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3168         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3169
3170         # Attempting to get a mutable version of a mutable file from a
3171         # filenode initialized with a readcap should return a readonly
3172         # version of that same node.
3173         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3174         d.addCallback(lambda ro: ro.get_best_mutable_version())
3175         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3176
3177         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3178         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3179
3180         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3181         d.addCallback(lambda ro: ro.get_best_mutable_version())
3182         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3183         return d
3184
3185
3186     def test_toplevel_overwrite(self):
3187         new_data = MutableData("foo bar baz" * 100000)
3188         new_small_data = MutableData("foo bar baz" * 10)
3189         d = self.do_upload()
3190         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3191         d.addCallback(lambda ignored:
3192             self.mdmf_node.download_best_version())
3193         d.addCallback(lambda data:
3194             self.failUnlessEqual(data, "foo bar baz" * 100000))
3195         d.addCallback(lambda ignored:
3196             self.sdmf_node.overwrite(new_small_data))
3197         d.addCallback(lambda ignored:
3198             self.sdmf_node.download_best_version())
3199         d.addCallback(lambda data:
3200             self.failUnlessEqual(data, "foo bar baz" * 10))
3201         return d
3202
3203
3204     def test_toplevel_modify(self):
3205         d = self.do_upload()
3206         def modifier(old_contents, servermap, first_time):
3207             return old_contents + "modified"
3208         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3209         d.addCallback(lambda ignored:
3210             self.mdmf_node.download_best_version())
3211         d.addCallback(lambda data:
3212             self.failUnlessIn("modified", data))
3213         d.addCallback(lambda ignored:
3214             self.sdmf_node.modify(modifier))
3215         d.addCallback(lambda ignored:
3216             self.sdmf_node.download_best_version())
3217         d.addCallback(lambda data:
3218             self.failUnlessIn("modified", data))
3219         return d
3220
3221
3222     def test_version_modify(self):
3223         # TODO: When we can publish multiple versions, alter this test
3224         # to modify a version other than the best usable version, then
3225         # test to see that the best recoverable version is that.
3226         d = self.do_upload()
3227         def modifier(old_contents, servermap, first_time):
3228             return old_contents + "modified"
3229         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3230         d.addCallback(lambda ignored:
3231             self.mdmf_node.download_best_version())
3232         d.addCallback(lambda data:
3233             self.failUnlessIn("modified", data))
3234         d.addCallback(lambda ignored:
3235             self.sdmf_node.modify(modifier))
3236         d.addCallback(lambda ignored:
3237             self.sdmf_node.download_best_version())
3238         d.addCallback(lambda data:
3239             self.failUnlessIn("modified", data))
3240         return d
3241
3242
3243     def test_download_version(self):
3244         d = self.publish_multiple()
3245         # We want to have two recoverable versions on the grid.
3246         d.addCallback(lambda res:
3247                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3248                                           1:1,3:1,5:1,7:1,9:1}))
3249         # Now try to download each version. We should get the plaintext
3250         # associated with that version.
3251         d.addCallback(lambda ignored:
3252             self._fn.get_servermap(mode=MODE_READ))
3253         def _got_servermap(smap):
3254             versions = smap.recoverable_versions()
3255             assert len(versions) == 2
3256
3257             self.servermap = smap
3258             self.version1, self.version2 = versions
3259             assert self.version1 != self.version2
3260
3261             self.version1_seqnum = self.version1[0]
3262             self.version2_seqnum = self.version2[0]
3263             self.version1_index = self.version1_seqnum - 1
3264             self.version2_index = self.version2_seqnum - 1
3265
3266         d.addCallback(_got_servermap)
3267         d.addCallback(lambda ignored:
3268             self._fn.download_version(self.servermap, self.version1))
3269         d.addCallback(lambda results:
3270             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3271                                  results))
3272         d.addCallback(lambda ignored:
3273             self._fn.download_version(self.servermap, self.version2))
3274         d.addCallback(lambda results:
3275             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3276                                  results))
3277         return d
3278
3279
3280     def test_download_nonexistent_version(self):
3281         d = self.do_upload_mdmf()
3282         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3283         def _set_servermap(servermap):
3284             self.servermap = servermap
3285         d.addCallback(_set_servermap)
3286         d.addCallback(lambda ignored:
3287            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3288                            None,
3289                            self.mdmf_node.download_version, self.servermap,
3290                            "not a version"))
3291         return d
3292
3293
3294     def test_partial_read(self):
3295         d = self.do_upload_mdmf()
3296         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3297         modes = [("start_on_segment_boundary",
3298                   mathutil.next_multiple(128 * 1024, 3), 50),
3299                  ("ending_one_byte_after_segment_boundary",
3300                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3301                  ("zero_length_at_start", 0, 0),
3302                  ("zero_length_in_middle", 50, 0),
3303                  ("zero_length_at_segment_boundary",
3304                   mathutil.next_multiple(128 * 1024, 3), 0),
3305                  ]
3306         for (name, offset, length) in modes:
3307             d.addCallback(self._do_partial_read, name, offset, length)
3308         # then read only a few bytes at a time, and see that the results are
3309         # what we expect.
3310         def _read_data(version):
3311             c = consumer.MemoryConsumer()
3312             d2 = defer.succeed(None)
3313             for i in xrange(0, len(self.data), 10000):
3314                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3315             d2.addCallback(lambda ignored:
3316                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3317             return d2
3318         d.addCallback(_read_data)
3319         return d
3320     def _do_partial_read(self, version, name, offset, length):
3321         c = consumer.MemoryConsumer()
3322         d = version.read(c, offset, length)
3323         expected = self.data[offset:offset+length]
3324         d.addCallback(lambda ignored: "".join(c.chunks))
3325         def _check(results):
3326             if results != expected:
3327                 print
3328                 print "got: %s ... %s" % (results[:20], results[-20:])
3329                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3330                 self.fail("results[%s] != expected" % name)
3331             return version # daisy-chained to next call
3332         d.addCallback(_check)
3333         return d
3334
3335
3336     def _test_read_and_download(self, node, expected):
3337         d = node.get_best_readable_version()
3338         def _read_data(version):
3339             c = consumer.MemoryConsumer()
3340             d2 = defer.succeed(None)
3341             d2.addCallback(lambda ignored: version.read(c))
3342             d2.addCallback(lambda ignored:
3343                 self.failUnlessEqual(expected, "".join(c.chunks)))
3344             return d2
3345         d.addCallback(_read_data)
3346         d.addCallback(lambda ignored: node.download_best_version())
3347         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3348         return d
3349
3350     def test_read_and_download_mdmf(self):
3351         d = self.do_upload_mdmf()
3352         d.addCallback(self._test_read_and_download, self.data)
3353         return d
3354
3355     def test_read_and_download_sdmf(self):
3356         d = self.do_upload_sdmf()
3357         d.addCallback(self._test_read_and_download, self.small_data)
3358         return d
3359
3360     def test_read_and_download_sdmf_zero_length(self):
3361         d = self.do_upload_empty_sdmf()
3362         d.addCallback(self._test_read_and_download, "")
3363         return d
3364
3365
3366 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3367     timeout = 400 # these tests are too big, 120s is not enough on slow
3368                   # platforms
3369     def setUp(self):
3370         GridTestMixin.setUp(self)
3371         self.basedir = self.mktemp()
3372         self.set_up_grid()
3373         self.c = self.g.clients[0]
3374         self.nm = self.c.nodemaker
3375         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3376         self.small_data = "test data" * 10 # about 90 B; SDMF
3377
3378
3379     def do_upload_sdmf(self):
3380         d = self.nm.create_mutable_file(MutableData(self.small_data))
3381         def _then(n):
3382             assert isinstance(n, MutableFileNode)
3383             self.sdmf_node = n
3384             # Make SDMF node that has 255 shares.
3385             self.nm.default_encoding_parameters['n'] = 255
3386             self.nm.default_encoding_parameters['k'] = 127
3387             return self.nm.create_mutable_file(MutableData(self.small_data))
3388         d.addCallback(_then)
3389         def _then2(n):
3390             assert isinstance(n, MutableFileNode)
3391             self.sdmf_max_shares_node = n
3392         d.addCallback(_then2)
3393         return d
3394
3395     def do_upload_mdmf(self):
3396         d = self.nm.create_mutable_file(MutableData(self.data),
3397                                         version=MDMF_VERSION)
3398         def _then(n):
3399             assert isinstance(n, MutableFileNode)
3400             self.mdmf_node = n
3401             # Make MDMF node that has 255 shares.
3402             self.nm.default_encoding_parameters['n'] = 255
3403             self.nm.default_encoding_parameters['k'] = 127
3404             return self.nm.create_mutable_file(MutableData(self.data),
3405                                                version=MDMF_VERSION)
3406         d.addCallback(_then)
3407         def _then2(n):
3408             assert isinstance(n, MutableFileNode)
3409             self.mdmf_max_shares_node = n
3410         d.addCallback(_then2)
3411         return d
3412
3413     def _test_replace(self, offset, new_data):
3414         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3415         d0 = self.do_upload_mdmf()
3416         def _run(ign):
3417             d = defer.succeed(None)
3418             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3419                 d.addCallback(lambda ign: node.get_best_mutable_version())
3420                 d.addCallback(lambda mv:
3421                     mv.update(MutableData(new_data), offset))
3422                 # close around node.
3423                 d.addCallback(lambda ignored, node=node:
3424                     node.download_best_version())
3425                 def _check(results):
3426                     if results != expected:
3427                         print
3428                         print "got: %s ... %s" % (results[:20], results[-20:])
3429                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3430                         self.fail("results != expected")
3431                 d.addCallback(_check)
3432             return d
3433         d0.addCallback(_run)
3434         return d0
3435
3436     def test_append(self):
3437         # We should be able to append data to a mutable file and get
3438         # what we expect.
3439         return self._test_replace(len(self.data), "appended")
3440
3441     def test_replace_middle(self):
3442         # We should be able to replace data in the middle of a mutable
3443         # file and get what we expect back.
3444         return self._test_replace(100, "replaced")
3445
3446     def test_replace_beginning(self):
3447         # We should be able to replace data at the beginning of the file
3448         # without truncating the file
3449         return self._test_replace(0, "beginning")
3450
3451     def test_replace_segstart1(self):
3452         return self._test_replace(128*1024+1, "NNNN")
3453
3454     def test_replace_zero_length_beginning(self):
3455         return self._test_replace(0, "")
3456
3457     def test_replace_zero_length_middle(self):
3458         return self._test_replace(50, "")
3459
3460     def test_replace_zero_length_segstart1(self):
3461         return self._test_replace(128*1024+1, "")
3462
3463     def test_replace_and_extend(self):
3464         # We should be able to replace data in the middle of a mutable
3465         # file and extend that mutable file and get what we expect.
3466         return self._test_replace(100, "modified " * 100000)
3467
3468
3469     def _check_differences(self, got, expected):
3470         # displaying arbitrary file corruption is tricky for a
3471         # 1MB file of repeating data,, so look for likely places
3472         # with problems and display them separately
3473         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3474         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3475         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3476                     for (start,end) in gotmods]
3477         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3478                     for (start,end) in expmods]
3479         #print "expecting: %s" % expspans
3480
3481         SEGSIZE = 128*1024
3482         if got != expected:
3483             print "differences:"
3484             for segnum in range(len(expected)//SEGSIZE):
3485                 start = segnum * SEGSIZE
3486                 end = (segnum+1) * SEGSIZE
3487                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3488                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3489                 if got_ends != exp_ends:
3490                     print "expected[%d]: %s" % (start, exp_ends)
3491                     print "got     [%d]: %s" % (start, got_ends)
3492             if expspans != gotspans:
3493                 print "expected: %s" % expspans
3494                 print "got     : %s" % gotspans
3495             open("EXPECTED","wb").write(expected)
3496             open("GOT","wb").write(got)
3497             print "wrote data to EXPECTED and GOT"
3498             self.fail("didn't get expected data")
3499
3500
3501     def test_replace_locations(self):
3502         # exercise fencepost conditions
3503         SEGSIZE = 128*1024
3504         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3505         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3506         d0 = self.do_upload_mdmf()
3507         def _run(ign):
3508             expected = self.data
3509             d = defer.succeed(None)
3510             for offset in suspects:
3511                 new_data = letters.next()*2 # "AA", then "BB", etc
3512                 expected = expected[:offset]+new_data+expected[offset+2:]
3513                 d.addCallback(lambda ign:
3514                               self.mdmf_node.get_best_mutable_version())
3515                 def _modify(mv, offset=offset, new_data=new_data):
3516                     # close over 'offset','new_data'
3517                     md = MutableData(new_data)
3518                     return mv.update(md, offset)
3519                 d.addCallback(_modify)
3520                 d.addCallback(lambda ignored:
3521                               self.mdmf_node.download_best_version())
3522                 d.addCallback(self._check_differences, expected)
3523             return d
3524         d0.addCallback(_run)
3525         return d0
3526
3527     def test_replace_locations_max_shares(self):
3528         # exercise fencepost conditions
3529         SEGSIZE = 128*1024
3530         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3531         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3532         d0 = self.do_upload_mdmf()
3533         def _run(ign):
3534             expected = self.data
3535             d = defer.succeed(None)
3536             for offset in suspects:
3537                 new_data = letters.next()*2 # "AA", then "BB", etc
3538                 expected = expected[:offset]+new_data+expected[offset+2:]
3539                 d.addCallback(lambda ign:
3540                               self.mdmf_max_shares_node.get_best_mutable_version())
3541                 def _modify(mv, offset=offset, new_data=new_data):
3542                     # close over 'offset','new_data'
3543                     md = MutableData(new_data)
3544                     return mv.update(md, offset)
3545                 d.addCallback(_modify)
3546                 d.addCallback(lambda ignored:
3547                               self.mdmf_max_shares_node.download_best_version())
3548                 d.addCallback(self._check_differences, expected)
3549             return d
3550         d0.addCallback(_run)
3551         return d0
3552
3553
3554     def test_append_power_of_two(self):
3555         # If we attempt to extend a mutable file so that its segment
3556         # count crosses a power-of-two boundary, the update operation
3557         # should know how to reencode the file.
3558
3559         # Note that the data populating self.mdmf_node is about 900 KiB
3560         # long -- this is 7 segments in the default segment size. So we
3561         # need to add 2 segments worth of data to push it over a
3562         # power-of-two boundary.
3563         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3564         new_data = self.data + (segment * 2)
3565         d0 = self.do_upload_mdmf()
3566         def _run(ign):
3567             d = defer.succeed(None)
3568             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3569                 d.addCallback(lambda ign: node.get_best_mutable_version())
3570                 d.addCallback(lambda mv:
3571                     mv.update(MutableData(segment * 2), len(self.data)))
3572                 d.addCallback(lambda ignored, node=node:
3573                     node.download_best_version())
3574                 d.addCallback(lambda results:
3575                     self.failUnlessEqual(results, new_data))
3576             return d
3577         d0.addCallback(_run)
3578         return d0
3579
3580     def test_update_sdmf(self):
3581         # Running update on a single-segment file should still work.
3582         new_data = self.small_data + "appended"
3583         d0 = self.do_upload_sdmf()
3584         def _run(ign):
3585             d = defer.succeed(None)
3586             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3587                 d.addCallback(lambda ign: node.get_best_mutable_version())
3588                 d.addCallback(lambda mv:
3589                     mv.update(MutableData("appended"), len(self.small_data)))
3590                 d.addCallback(lambda ignored, node=node:
3591                     node.download_best_version())
3592                 d.addCallback(lambda results:
3593                     self.failUnlessEqual(results, new_data))
3594             return d
3595         d0.addCallback(_run)
3596         return d0
3597
3598     def test_replace_in_last_segment(self):
3599         # The wrapper should know how to handle the tail segment
3600         # appropriately.
3601         replace_offset = len(self.data) - 100
3602         new_data = self.data[:replace_offset] + "replaced"
3603         rest_offset = replace_offset + len("replaced")
3604         new_data += self.data[rest_offset:]
3605         d0 = self.do_upload_mdmf()
3606         def _run(ign):
3607             d = defer.succeed(None)
3608             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3609                 d.addCallback(lambda ign: node.get_best_mutable_version())
3610                 d.addCallback(lambda mv:
3611                     mv.update(MutableData("replaced"), replace_offset))
3612                 d.addCallback(lambda ignored, node=node:
3613                     node.download_best_version())
3614                 d.addCallback(lambda results:
3615                     self.failUnlessEqual(results, new_data))
3616             return d
3617         d0.addCallback(_run)
3618         return d0
3619
3620     def test_multiple_segment_replace(self):
3621         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3622         new_data = self.data[:replace_offset]
3623         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3624         new_data += 2 * new_segment
3625         new_data += "replaced"
3626         rest_offset = len(new_data)
3627         new_data += self.data[rest_offset:]
3628         d0 = self.do_upload_mdmf()
3629         def _run(ign):
3630             d = defer.succeed(None)
3631             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3632                 d.addCallback(lambda ign: node.get_best_mutable_version())
3633                 d.addCallback(lambda mv:
3634                     mv.update(MutableData((2 * new_segment) + "replaced"),
3635                               replace_offset))
3636                 d.addCallback(lambda ignored, node=node:
3637                     node.download_best_version())
3638                 d.addCallback(lambda results:
3639                     self.failUnlessEqual(results, new_data))
3640             return d
3641         d0.addCallback(_run)
3642         return d0
3643
3644 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3645     sdmf_old_shares = {}
3646     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3647     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3648     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3649     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3650     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3651     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3652     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3653     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3654     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3655     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3656     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3657     sdmf_old_contents = "This is a test file.\n"
3658     def copy_sdmf_shares(self):
3659         # We'll basically be short-circuiting the upload process.
3660         servernums = self.g.servers_by_number.keys()
3661         assert len(servernums) == 10
3662
3663         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3664         # Get the storage index.
3665         cap = uri.from_string(self.sdmf_old_cap)
3666         si = cap.get_storage_index()
3667
3668         # Now execute each assignment by writing the storage.
3669         for (share, servernum) in assignments:
3670             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3671             storedir = self.get_serverdir(servernum)
3672             storage_path = os.path.join(storedir, "shares",
3673                                         storage_index_to_dir(si))
3674             fileutil.make_dirs(storage_path)
3675             fileutil.write(os.path.join(storage_path, "%d" % share),
3676                            sharedata)
3677         # ...and verify that the shares are there.
3678         shares = self.find_uri_shares(self.sdmf_old_cap)
3679         assert len(shares) == 10
3680
3681     def test_new_downloader_can_read_old_shares(self):
3682         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3683         self.set_up_grid()
3684         self.copy_sdmf_shares()
3685         nm = self.g.clients[0].nodemaker
3686         n = nm.create_from_cap(self.sdmf_old_cap)
3687         d = n.download_best_version()
3688         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3689         return d
3690
3691 class DifferentEncoding(unittest.TestCase):
3692     def setUp(self):
3693         self._storage = s = FakeStorage()
3694         self.nodemaker = make_nodemaker(s)
3695
3696     def test_filenode(self):
3697         # create a file with 3-of-20, then modify it with a client configured
3698         # to do 3-of-10. #1510 tracks a failure here
3699         self.nodemaker.default_encoding_parameters["n"] = 20
3700         d = self.nodemaker.create_mutable_file("old contents")
3701         def _created(n):
3702             filecap = n.get_cap().to_string()
3703             del n # we want a new object, not the cached one
3704             self.nodemaker.default_encoding_parameters["n"] = 10
3705             n2 = self.nodemaker.create_from_cap(filecap)
3706             return n2
3707         d.addCallback(_created)
3708         def modifier(old_contents, servermap, first_time):
3709             return "new contents"
3710         d.addCallback(lambda n: n.modify(modifier))
3711         return d