]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
test/test_mutable: tests for MDMF
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12      ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28      NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31                                       MutableData, \
32                                       DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
36
37 import allmydata.test.common_util as testutil
38
39 # this "FakeStorage" exists to put the share data in RAM and avoid using real
40 # network connections, both to speed up the tests and to reduce the amount of
41 # non-mutable.py code being exercised.
42
43 class FakeStorage:
44     # this class replaces the collection of storage servers, allowing the
45     # tests to examine and manipulate the published shares. It also lets us
46     # control the order in which read queries are answered, to exercise more
47     # of the error-handling code in Retrieve .
48     #
49     # Note that we ignore the storage index: this FakeStorage instance can
50     # only be used for a single storage index.
51
52
53     def __init__(self):
54         self._peers = {}
55         # _sequence is used to cause the responses to occur in a specific
56         # order. If it is in use, then we will defer queries instead of
57         # answering them right away, accumulating the Deferreds in a dict. We
58         # don't know exactly how many queries we'll get, so exactly one
59         # second after the first query arrives, we will release them all (in
60         # order).
61         self._sequence = None
62         self._pending = {}
63         self._pending_timer = None
64
65     def read(self, peerid, storage_index):
66         shares = self._peers.get(peerid, {})
67         if self._sequence is None:
68             return defer.succeed(shares)
69         d = defer.Deferred()
70         if not self._pending:
71             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
72         self._pending[peerid] = (d, shares)
73         return d
74
75     def _fire_readers(self):
76         self._pending_timer = None
77         pending = self._pending
78         self._pending = {}
79         for peerid in self._sequence:
80             if peerid in pending:
81                 d, shares = pending.pop(peerid)
82                 eventually(d.callback, shares)
83         for (d, shares) in pending.values():
84             eventually(d.callback, shares)
85
86     def write(self, peerid, storage_index, shnum, offset, data):
87         if peerid not in self._peers:
88             self._peers[peerid] = {}
89         shares = self._peers[peerid]
90         f = StringIO()
91         f.write(shares.get(shnum, ""))
92         f.seek(offset)
93         f.write(data)
94         shares[shnum] = f.getvalue()
95
96
97 class FakeStorageServer:
98     def __init__(self, peerid, storage):
99         self.peerid = peerid
100         self.storage = storage
101         self.queries = 0
102     def callRemote(self, methname, *args, **kwargs):
103         self.queries += 1
104         def _call():
105             meth = getattr(self, methname)
106             return meth(*args, **kwargs)
107         d = fireEventually()
108         d.addCallback(lambda res: _call())
109         return d
110
111     def callRemoteOnly(self, methname, *args, **kwargs):
112         self.queries += 1
113         d = self.callRemote(methname, *args, **kwargs)
114         d.addBoth(lambda ignore: None)
115         pass
116
117     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
118         pass
119
120     def slot_readv(self, storage_index, shnums, readv):
121         d = self.storage.read(self.peerid, storage_index)
122         def _read(shares):
123             response = {}
124             for shnum in shares:
125                 if shnums and shnum not in shnums:
126                     continue
127                 vector = response[shnum] = []
128                 for (offset, length) in readv:
129                     assert isinstance(offset, (int, long)), offset
130                     assert isinstance(length, (int, long)), length
131                     vector.append(shares[shnum][offset:offset+length])
132             return response
133         d.addCallback(_read)
134         return d
135
136     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
137                                         tw_vectors, read_vector):
138         # always-pass: parrot the test vectors back to them.
139         readv = {}
140         for shnum, (testv, writev, new_length) in tw_vectors.items():
141             for (offset, length, op, specimen) in testv:
142                 assert op in ("le", "eq", "ge")
143             # TODO: this isn't right, the read is controlled by read_vector,
144             # not by testv
145             readv[shnum] = [ specimen
146                              for (offset, length, op, specimen)
147                              in testv ]
148             for (offset, data) in writev:
149                 self.storage.write(self.peerid, storage_index, shnum,
150                                    offset, data)
151         answer = (True, readv)
152         return fireEventually(answer)
153
154
155 def flip_bit(original, byte_offset):
156     return (original[:byte_offset] +
157             chr(ord(original[byte_offset]) ^ 0x01) +
158             original[byte_offset+1:])
159
160 def add_two(original, byte_offset):
161     # It isn't enough to simply flip the bit for the version number,
162     # because 1 is a valid version number. So we add two instead.
163     return (original[:byte_offset] +
164             chr(ord(original[byte_offset]) ^ 0x02) +
165             original[byte_offset+1:])
166
167 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
168     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
169     # list of shnums to corrupt.
170     ds = []
171     for peerid in s._peers:
172         shares = s._peers[peerid]
173         for shnum in shares:
174             if (shnums_to_corrupt is not None
175                 and shnum not in shnums_to_corrupt):
176                 continue
177             data = shares[shnum]
178             # We're feeding the reader all of the share data, so it
179             # won't need to use the rref that we didn't provide, nor the
180             # storage index that we didn't provide. We do this because
181             # the reader will work for both MDMF and SDMF.
182             reader = MDMFSlotReadProxy(None, None, shnum, data)
183             # We need to get the offsets for the next part.
184             d = reader.get_verinfo()
185             def _do_corruption(verinfo, data, shnum):
186                 (seqnum,
187                  root_hash,
188                  IV,
189                  segsize,
190                  datalen,
191                  k, n, prefix, o) = verinfo
192                 if isinstance(offset, tuple):
193                     offset1, offset2 = offset
194                 else:
195                     offset1 = offset
196                     offset2 = 0
197                 if offset1 == "pubkey" and IV:
198                     real_offset = 107
199                 elif offset1 in o:
200                     real_offset = o[offset1]
201                 else:
202                     real_offset = offset1
203                 real_offset = int(real_offset) + offset2 + offset_offset
204                 assert isinstance(real_offset, int), offset
205                 if offset1 == 0: # verbyte
206                     f = add_two
207                 else:
208                     f = flip_bit
209                 shares[shnum] = f(data, real_offset)
210             d.addCallback(_do_corruption, data, shnum)
211             ds.append(d)
212     dl = defer.DeferredList(ds)
213     dl.addCallback(lambda ignored: res)
214     return dl
215
216 def make_storagebroker(s=None, num_peers=10):
217     if not s:
218         s = FakeStorage()
219     peerids = [tagged_hash("peerid", "%d" % i)[:20]
220                for i in range(num_peers)]
221     storage_broker = StorageFarmBroker(None, True)
222     for peerid in peerids:
223         fss = FakeStorageServer(peerid, s)
224         storage_broker.test_add_rref(peerid, fss)
225     return storage_broker
226
227 def make_nodemaker(s=None, num_peers=10):
228     storage_broker = make_storagebroker(s, num_peers)
229     sh = client.SecretHolder("lease secret", "convergence secret")
230     keygen = client.KeyGenerator()
231     keygen.set_default_keysize(522)
232     nodemaker = NodeMaker(storage_broker, sh, None,
233                           None, None,
234                           {"k": 3, "n": 10}, keygen)
235     return nodemaker
236
237 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
238     # this used to be in Publish, but we removed the limit. Some of
239     # these tests test whether the new code correctly allows files
240     # larger than the limit.
241     OLD_MAX_SEGMENT_SIZE = 3500000
242     def setUp(self):
243         self._storage = s = FakeStorage()
244         self.nodemaker = make_nodemaker(s)
245
246     def test_create(self):
247         d = self.nodemaker.create_mutable_file()
248         def _created(n):
249             self.failUnless(isinstance(n, MutableFileNode))
250             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
251             sb = self.nodemaker.storage_broker
252             peer0 = sorted(sb.get_all_serverids())[0]
253             shnums = self._storage._peers[peer0].keys()
254             self.failUnlessEqual(len(shnums), 1)
255         d.addCallback(_created)
256         return d
257     test_create.timeout = 15
258
259
260     def test_create_mdmf(self):
261         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
262         def _created(n):
263             self.failUnless(isinstance(n, MutableFileNode))
264             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
265             sb = self.nodemaker.storage_broker
266             peer0 = sorted(sb.get_all_serverids())[0]
267             shnums = self._storage._peers[peer0].keys()
268             self.failUnlessEqual(len(shnums), 1)
269         d.addCallback(_created)
270         return d
271
272     def test_single_share(self):
273         # Make sure that we tolerate publishing a single share.
274         self.nodemaker.default_encoding_parameters['k'] = 1
275         self.nodemaker.default_encoding_parameters['happy'] = 1
276         self.nodemaker.default_encoding_parameters['n'] = 1
277         d = defer.succeed(None)
278         for v in (SDMF_VERSION, MDMF_VERSION):
279             d.addCallback(lambda ignored:
280                 self.nodemaker.create_mutable_file(version=v))
281             def _created(n):
282                 self.failUnless(isinstance(n, MutableFileNode))
283                 self._node = n
284                 return n
285             d.addCallback(_created)
286             d.addCallback(lambda n:
287                 n.overwrite(MutableData("Contents" * 50000)))
288             d.addCallback(lambda ignored:
289                 self._node.download_best_version())
290             d.addCallback(lambda contents:
291                 self.failUnlessEqual(contents, "Contents" * 50000))
292         return d
293
294     def test_max_shares(self):
295         self.nodemaker.default_encoding_parameters['n'] = 255
296         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
297         def _created(n):
298             self.failUnless(isinstance(n, MutableFileNode))
299             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
300             sb = self.nodemaker.storage_broker
301             num_shares = sum([len(self._storage._peers[x].keys()) for x \
302                               in sb.get_all_serverids()])
303             self.failUnlessEqual(num_shares, 255)
304             self._node = n
305             return n
306         d.addCallback(_created)
307         # Now we upload some contents
308         d.addCallback(lambda n:
309             n.overwrite(MutableData("contents" * 50000)))
310         # ...then download contents
311         d.addCallback(lambda ignored:
312             self._node.download_best_version())
313         # ...and check to make sure everything went okay.
314         d.addCallback(lambda contents:
315             self.failUnlessEqual("contents" * 50000, contents))
316         return d
317
318     def test_max_shares_mdmf(self):
319         # Test how files behave when there are 255 shares.
320         self.nodemaker.default_encoding_parameters['n'] = 255
321         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
322         def _created(n):
323             self.failUnless(isinstance(n, MutableFileNode))
324             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
325             sb = self.nodemaker.storage_broker
326             num_shares = sum([len(self._storage._peers[x].keys()) for x \
327                               in sb.get_all_serverids()])
328             self.failUnlessEqual(num_shares, 255)
329             self._node = n
330             return n
331         d.addCallback(_created)
332         d.addCallback(lambda n:
333             n.overwrite(MutableData("contents" * 50000)))
334         d.addCallback(lambda ignored:
335             self._node.download_best_version())
336         d.addCallback(lambda contents:
337             self.failUnlessEqual(contents, "contents" * 50000))
338         return d
339
340     def test_mdmf_filenode_cap(self):
341         # Test that an MDMF filenode, once created, returns an MDMF URI.
342         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
343         def _created(n):
344             self.failUnless(isinstance(n, MutableFileNode))
345             cap = n.get_cap()
346             self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
347             rcap = n.get_readcap()
348             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
349             vcap = n.get_verify_cap()
350             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
351         d.addCallback(_created)
352         return d
353
354
355     def test_create_from_mdmf_writecap(self):
356         # Test that the nodemaker is capable of creating an MDMF
357         # filenode given an MDMF cap.
358         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
359         def _created(n):
360             self.failUnless(isinstance(n, MutableFileNode))
361             s = n.get_uri()
362             self.failUnless(s.startswith("URI:MDMF"))
363             n2 = self.nodemaker.create_from_cap(s)
364             self.failUnless(isinstance(n2, MutableFileNode))
365             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
366             self.failUnlessEqual(n.get_uri(), n2.get_uri())
367         d.addCallback(_created)
368         return d
369
370
371     def test_create_from_mdmf_writecap_with_extensions(self):
372         # Test that the nodemaker is capable of creating an MDMF
373         # filenode when given a writecap with extension parameters in
374         # them.
375         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
376         def _created(n):
377             self.failUnless(isinstance(n, MutableFileNode))
378             s = n.get_uri()
379             # We need to cheat a little and delete the nodemaker's
380             # cache, otherwise we'll get the same node instance back.
381             self.failUnlessIn(":3:131073", s)
382             n2 = self.nodemaker.create_from_cap(s)
383
384             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
385             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
386             hints = n2._downloader_hints
387             self.failUnlessEqual(hints['k'], 3)
388             self.failUnlessEqual(hints['segsize'], 131073)
389         d.addCallback(_created)
390         return d
391
392
393     def test_create_from_mdmf_readcap(self):
394         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
395         def _created(n):
396             self.failUnless(isinstance(n, MutableFileNode))
397             s = n.get_readonly_uri()
398             n2 = self.nodemaker.create_from_cap(s)
399             self.failUnless(isinstance(n2, MutableFileNode))
400
401             # Check that it's a readonly node
402             self.failUnless(n2.is_readonly())
403         d.addCallback(_created)
404         return d
405
406
407     def test_create_from_mdmf_readcap_with_extensions(self):
408         # We should be able to create an MDMF filenode with the
409         # extension parameters without it breaking.
410         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
411         def _created(n):
412             self.failUnless(isinstance(n, MutableFileNode))
413             s = n.get_readonly_uri()
414             self.failUnlessIn(":3:131073", s)
415
416             n2 = self.nodemaker.create_from_cap(s)
417             self.failUnless(isinstance(n2, MutableFileNode))
418             self.failUnless(n2.is_readonly())
419             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
420             hints = n2._downloader_hints
421             self.failUnlessEqual(hints["k"], 3)
422             self.failUnlessEqual(hints["segsize"], 131073)
423         d.addCallback(_created)
424         return d
425
426
427     def test_internal_version_from_cap(self):
428         # MutableFileNodes and MutableFileVersions have an internal
429         # switch that tells them whether they're dealing with an SDMF or
430         # MDMF mutable file when they start doing stuff. We want to make
431         # sure that this is set appropriately given an MDMF cap.
432         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
433         def _created(n):
434             self.uri = n.get_uri()
435             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
436
437             n2 = self.nodemaker.create_from_cap(self.uri)
438             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
439         d.addCallback(_created)
440         return d
441
442
443     def test_serialize(self):
444         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
445         calls = []
446         def _callback(*args, **kwargs):
447             self.failUnlessEqual(args, (4,) )
448             self.failUnlessEqual(kwargs, {"foo": 5})
449             calls.append(1)
450             return 6
451         d = n._do_serialized(_callback, 4, foo=5)
452         def _check_callback(res):
453             self.failUnlessEqual(res, 6)
454             self.failUnlessEqual(calls, [1])
455         d.addCallback(_check_callback)
456
457         def _errback():
458             raise ValueError("heya")
459         d.addCallback(lambda res:
460                       self.shouldFail(ValueError, "_check_errback", "heya",
461                                       n._do_serialized, _errback))
462         return d
463
464     def test_upload_and_download(self):
465         d = self.nodemaker.create_mutable_file()
466         def _created(n):
467             d = defer.succeed(None)
468             d.addCallback(lambda res: n.get_servermap(MODE_READ))
469             d.addCallback(lambda smap: smap.dump(StringIO()))
470             d.addCallback(lambda sio:
471                           self.failUnless("3-of-10" in sio.getvalue()))
472             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
473             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
474             d.addCallback(lambda res: n.download_best_version())
475             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
476             d.addCallback(lambda res: n.get_size_of_best_version())
477             d.addCallback(lambda size:
478                           self.failUnlessEqual(size, len("contents 1")))
479             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
480             d.addCallback(lambda res: n.download_best_version())
481             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
482             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
483             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
484             d.addCallback(lambda res: n.download_best_version())
485             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
486             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
487             d.addCallback(lambda smap:
488                           n.download_version(smap,
489                                              smap.best_recoverable_version()))
490             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
491             # test a file that is large enough to overcome the
492             # mapupdate-to-retrieve data caching (i.e. make the shares larger
493             # than the default readsize, which is 2000 bytes). A 15kB file
494             # will have 5kB shares.
495             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
496             d.addCallback(lambda res: n.download_best_version())
497             d.addCallback(lambda res:
498                           self.failUnlessEqual(res, "large size file" * 1000))
499             return d
500         d.addCallback(_created)
501         return d
502
503
504     def test_upload_and_download_mdmf(self):
505         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
506         def _created(n):
507             d = defer.succeed(None)
508             d.addCallback(lambda ignored:
509                 n.get_servermap(MODE_READ))
510             def _then(servermap):
511                 dumped = servermap.dump(StringIO())
512                 self.failUnlessIn("3-of-10", dumped.getvalue())
513             d.addCallback(_then)
514             # Now overwrite the contents with some new contents. We want 
515             # to make them big enough to force the file to be uploaded
516             # in more than one segment.
517             big_contents = "contents1" * 100000 # about 900 KiB
518             big_contents_uploadable = MutableData(big_contents)
519             d.addCallback(lambda ignored:
520                 n.overwrite(big_contents_uploadable))
521             d.addCallback(lambda ignored:
522                 n.download_best_version())
523             d.addCallback(lambda data:
524                 self.failUnlessEqual(data, big_contents))
525             # Overwrite the contents again with some new contents. As
526             # before, they need to be big enough to force multiple
527             # segments, so that we make the downloader deal with
528             # multiple segments.
529             bigger_contents = "contents2" * 1000000 # about 9MiB 
530             bigger_contents_uploadable = MutableData(bigger_contents)
531             d.addCallback(lambda ignored:
532                 n.overwrite(bigger_contents_uploadable))
533             d.addCallback(lambda ignored:
534                 n.download_best_version())
535             d.addCallback(lambda data:
536                 self.failUnlessEqual(data, bigger_contents))
537             return d
538         d.addCallback(_created)
539         return d
540
541
542     def test_retrieve_pause(self):
543         # We should make sure that the retriever is able to pause
544         # correctly.
545         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
546         def _created(node):
547             self.node = node
548
549             return node.overwrite(MutableData("contents1" * 100000))
550         d.addCallback(_created)
551         # Now we'll retrieve it into a pausing consumer.
552         d.addCallback(lambda ignored:
553             self.node.get_best_mutable_version())
554         def _got_version(version):
555             self.c = PausingConsumer()
556             return version.read(self.c)
557         d.addCallback(_got_version)
558         d.addCallback(lambda ignored:
559             self.failUnlessEqual(self.c.data, "contents1" * 100000))
560         return d
561     test_retrieve_pause.timeout = 25
562
563
564     def test_download_from_mdmf_cap(self):
565         # We should be able to download an MDMF file given its cap
566         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
567         def _created(node):
568             self.uri = node.get_uri()
569
570             return node.overwrite(MutableData("contents1" * 100000))
571         def _then(ignored):
572             node = self.nodemaker.create_from_cap(self.uri)
573             return node.download_best_version()
574         def _downloaded(data):
575             self.failUnlessEqual(data, "contents1" * 100000)
576         d.addCallback(_created)
577         d.addCallback(_then)
578         d.addCallback(_downloaded)
579         return d
580
581
582     def test_create_and_download_from_bare_mdmf_cap(self):
583         # MDMF caps have extension parameters on them by default. We
584         # need to make sure that they work without extension parameters.
585         contents = MutableData("contents" * 100000)
586         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
587                                                contents=contents)
588         def _created(node):
589             uri = node.get_uri()
590             self._created = node
591             self.failUnlessIn(":3:131073", uri)
592             # Now strip that off the end of the uri, then try creating
593             # and downloading the node again.
594             bare_uri = uri.replace(":3:131073", "")
595             assert ":3:131073" not in bare_uri
596
597             return self.nodemaker.create_from_cap(bare_uri)
598         d.addCallback(_created)
599         def _created_bare(node):
600             self.failUnlessEqual(node.get_writekey(),
601                                  self._created.get_writekey())
602             self.failUnlessEqual(node.get_readkey(),
603                                  self._created.get_readkey())
604             self.failUnlessEqual(node.get_storage_index(),
605                                  self._created.get_storage_index())
606             return node.download_best_version()
607         d.addCallback(_created_bare)
608         d.addCallback(lambda data:
609             self.failUnlessEqual(data, "contents" * 100000))
610         return d
611
612
613     def test_mdmf_write_count(self):
614         # Publishing an MDMF file should only cause one write for each
615         # share that is to be published. Otherwise, we introduce
616         # undesirable semantics that are a regression from SDMF
617         upload = MutableData("MDMF" * 100000) # about 400 KiB
618         d = self.nodemaker.create_mutable_file(upload,
619                                                version=MDMF_VERSION)
620         def _check_server_write_counts(ignored):
621             sb = self.nodemaker.storage_broker
622             for server in sb.servers.itervalues():
623                 self.failUnlessEqual(server.get_rref().queries, 1)
624         d.addCallback(_check_server_write_counts)
625         return d
626
627
628     def test_create_with_initial_contents(self):
629         upload1 = MutableData("contents 1")
630         d = self.nodemaker.create_mutable_file(upload1)
631         def _created(n):
632             d = n.download_best_version()
633             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
634             upload2 = MutableData("contents 2")
635             d.addCallback(lambda res: n.overwrite(upload2))
636             d.addCallback(lambda res: n.download_best_version())
637             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
638             return d
639         d.addCallback(_created)
640         return d
641     test_create_with_initial_contents.timeout = 15
642
643
644     def test_create_mdmf_with_initial_contents(self):
645         initial_contents = "foobarbaz" * 131072 # 900KiB
646         initial_contents_uploadable = MutableData(initial_contents)
647         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
648                                                version=MDMF_VERSION)
649         def _created(n):
650             d = n.download_best_version()
651             d.addCallback(lambda data:
652                 self.failUnlessEqual(data, initial_contents))
653             uploadable2 = MutableData(initial_contents + "foobarbaz")
654             d.addCallback(lambda ignored:
655                 n.overwrite(uploadable2))
656             d.addCallback(lambda ignored:
657                 n.download_best_version())
658             d.addCallback(lambda data:
659                 self.failUnlessEqual(data, initial_contents +
660                                            "foobarbaz"))
661             return d
662         d.addCallback(_created)
663         return d
664     test_create_mdmf_with_initial_contents.timeout = 20
665
666
667     def test_response_cache_memory_leak(self):
668         d = self.nodemaker.create_mutable_file("contents")
669         def _created(n):
670             d = n.download_best_version()
671             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
672             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
673
674             def _check_cache(expected):
675                 # The total size of cache entries should not increase on the second download;
676                 # in fact the cache contents should be identical.
677                 d2 = n.download_best_version()
678                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
679                 return d2
680             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
681             return d
682         d.addCallback(_created)
683         return d
684
685     def test_create_with_initial_contents_function(self):
686         data = "initial contents"
687         def _make_contents(n):
688             self.failUnless(isinstance(n, MutableFileNode))
689             key = n.get_writekey()
690             self.failUnless(isinstance(key, str), key)
691             self.failUnlessEqual(len(key), 16) # AES key size
692             return MutableData(data)
693         d = self.nodemaker.create_mutable_file(_make_contents)
694         def _created(n):
695             return n.download_best_version()
696         d.addCallback(_created)
697         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
698         return d
699
700
701     def test_create_mdmf_with_initial_contents_function(self):
702         data = "initial contents" * 100000
703         def _make_contents(n):
704             self.failUnless(isinstance(n, MutableFileNode))
705             key = n.get_writekey()
706             self.failUnless(isinstance(key, str), key)
707             self.failUnlessEqual(len(key), 16)
708             return MutableData(data)
709         d = self.nodemaker.create_mutable_file(_make_contents,
710                                                version=MDMF_VERSION)
711         d.addCallback(lambda n:
712             n.download_best_version())
713         d.addCallback(lambda data2:
714             self.failUnlessEqual(data2, data))
715         return d
716
717
718     def test_create_with_too_large_contents(self):
719         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
720         BIG_uploadable = MutableData(BIG)
721         d = self.nodemaker.create_mutable_file(BIG_uploadable)
722         def _created(n):
723             other_BIG_uploadable = MutableData(BIG)
724             d = n.overwrite(other_BIG_uploadable)
725             return d
726         d.addCallback(_created)
727         return d
728
729     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
730         d = n.get_servermap(MODE_READ)
731         d.addCallback(lambda servermap: servermap.best_recoverable_version())
732         d.addCallback(lambda verinfo:
733                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
734         return d
735
736     def test_modify(self):
737         def _modifier(old_contents, servermap, first_time):
738             new_contents = old_contents + "line2"
739             return new_contents
740         def _non_modifier(old_contents, servermap, first_time):
741             return old_contents
742         def _none_modifier(old_contents, servermap, first_time):
743             return None
744         def _error_modifier(old_contents, servermap, first_time):
745             raise ValueError("oops")
746         def _toobig_modifier(old_contents, servermap, first_time):
747             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
748             return new_content
749         calls = []
750         def _ucw_error_modifier(old_contents, servermap, first_time):
751             # simulate an UncoordinatedWriteError once
752             calls.append(1)
753             if len(calls) <= 1:
754                 raise UncoordinatedWriteError("simulated")
755             new_contents = old_contents + "line3"
756             return new_contents
757         def _ucw_error_non_modifier(old_contents, servermap, first_time):
758             # simulate an UncoordinatedWriteError once, and don't actually
759             # modify the contents on subsequent invocations
760             calls.append(1)
761             if len(calls) <= 1:
762                 raise UncoordinatedWriteError("simulated")
763             return old_contents
764
765         initial_contents = "line1"
766         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
767         def _created(n):
768             d = n.modify(_modifier)
769             d.addCallback(lambda res: n.download_best_version())
770             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
771             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
772
773             d.addCallback(lambda res: n.modify(_non_modifier))
774             d.addCallback(lambda res: n.download_best_version())
775             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
776             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
777
778             d.addCallback(lambda res: n.modify(_none_modifier))
779             d.addCallback(lambda res: n.download_best_version())
780             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
781             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
782
783             d.addCallback(lambda res:
784                           self.shouldFail(ValueError, "error_modifier", None,
785                                           n.modify, _error_modifier))
786             d.addCallback(lambda res: n.download_best_version())
787             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
788             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
789
790
791             d.addCallback(lambda res: n.download_best_version())
792             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
793             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
794
795             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
796             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
797             d.addCallback(lambda res: n.download_best_version())
798             d.addCallback(lambda res: self.failUnlessEqual(res,
799                                                            "line1line2line3"))
800             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
801
802             def _reset_ucw_error_modifier(res):
803                 calls[:] = []
804                 return res
805             d.addCallback(_reset_ucw_error_modifier)
806
807             # in practice, this n.modify call should publish twice: the first
808             # one gets a UCWE, the second does not. But our test jig (in
809             # which the modifier raises the UCWE) skips over the first one,
810             # so in this test there will be only one publish, and the seqnum
811             # will only be one larger than the previous test, not two (i.e. 4
812             # instead of 5).
813             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
814             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
815             d.addCallback(lambda res: n.download_best_version())
816             d.addCallback(lambda res: self.failUnlessEqual(res,
817                                                            "line1line2line3"))
818             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
819             d.addCallback(lambda res: n.modify(_toobig_modifier))
820             return d
821         d.addCallback(_created)
822         return d
823     test_modify.timeout = 15
824
825
826     def test_modify_backoffer(self):
827         def _modifier(old_contents, servermap, first_time):
828             return old_contents + "line2"
829         calls = []
830         def _ucw_error_modifier(old_contents, servermap, first_time):
831             # simulate an UncoordinatedWriteError once
832             calls.append(1)
833             if len(calls) <= 1:
834                 raise UncoordinatedWriteError("simulated")
835             return old_contents + "line3"
836         def _always_ucw_error_modifier(old_contents, servermap, first_time):
837             raise UncoordinatedWriteError("simulated")
838         def _backoff_stopper(node, f):
839             return f
840         def _backoff_pauser(node, f):
841             d = defer.Deferred()
842             reactor.callLater(0.5, d.callback, None)
843             return d
844
845         # the give-up-er will hit its maximum retry count quickly
846         giveuper = BackoffAgent()
847         giveuper._delay = 0.1
848         giveuper.factor = 1
849
850         d = self.nodemaker.create_mutable_file(MutableData("line1"))
851         def _created(n):
852             d = n.modify(_modifier)
853             d.addCallback(lambda res: n.download_best_version())
854             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
855             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
856
857             d.addCallback(lambda res:
858                           self.shouldFail(UncoordinatedWriteError,
859                                           "_backoff_stopper", None,
860                                           n.modify, _ucw_error_modifier,
861                                           _backoff_stopper))
862             d.addCallback(lambda res: n.download_best_version())
863             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
864             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
865
866             def _reset_ucw_error_modifier(res):
867                 calls[:] = []
868                 return res
869             d.addCallback(_reset_ucw_error_modifier)
870             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
871                                                _backoff_pauser))
872             d.addCallback(lambda res: n.download_best_version())
873             d.addCallback(lambda res: self.failUnlessEqual(res,
874                                                            "line1line2line3"))
875             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
876
877             d.addCallback(lambda res:
878                           self.shouldFail(UncoordinatedWriteError,
879                                           "giveuper", None,
880                                           n.modify, _always_ucw_error_modifier,
881                                           giveuper.delay))
882             d.addCallback(lambda res: n.download_best_version())
883             d.addCallback(lambda res: self.failUnlessEqual(res,
884                                                            "line1line2line3"))
885             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
886
887             return d
888         d.addCallback(_created)
889         return d
890
891     def test_upload_and_download_full_size_keys(self):
892         self.nodemaker.key_generator = client.KeyGenerator()
893         d = self.nodemaker.create_mutable_file()
894         def _created(n):
895             d = defer.succeed(None)
896             d.addCallback(lambda res: n.get_servermap(MODE_READ))
897             d.addCallback(lambda smap: smap.dump(StringIO()))
898             d.addCallback(lambda sio:
899                           self.failUnless("3-of-10" in sio.getvalue()))
900             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
901             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
902             d.addCallback(lambda res: n.download_best_version())
903             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
904             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
905             d.addCallback(lambda res: n.download_best_version())
906             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
907             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
908             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
909             d.addCallback(lambda res: n.download_best_version())
910             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
911             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
912             d.addCallback(lambda smap:
913                           n.download_version(smap,
914                                              smap.best_recoverable_version()))
915             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
916             return d
917         d.addCallback(_created)
918         return d
919
920
921     def test_size_after_servermap_update(self):
922         # a mutable file node should have something to say about how big
923         # it is after a servermap update is performed, since this tells
924         # us how large the best version of that mutable file is.
925         d = self.nodemaker.create_mutable_file()
926         def _created(n):
927             self.n = n
928             return n.get_servermap(MODE_READ)
929         d.addCallback(_created)
930         d.addCallback(lambda ignored:
931             self.failUnlessEqual(self.n.get_size(), 0))
932         d.addCallback(lambda ignored:
933             self.n.overwrite(MutableData("foobarbaz")))
934         d.addCallback(lambda ignored:
935             self.failUnlessEqual(self.n.get_size(), 9))
936         d.addCallback(lambda ignored:
937             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
938         d.addCallback(_created)
939         d.addCallback(lambda ignored:
940             self.failUnlessEqual(self.n.get_size(), 9))
941         return d
942
943
944 class PublishMixin:
945     def publish_one(self):
946         # publish a file and create shares, which can then be manipulated
947         # later.
948         self.CONTENTS = "New contents go here" * 1000
949         self.uploadable = MutableData(self.CONTENTS)
950         self._storage = FakeStorage()
951         self._nodemaker = make_nodemaker(self._storage)
952         self._storage_broker = self._nodemaker.storage_broker
953         d = self._nodemaker.create_mutable_file(self.uploadable)
954         def _created(node):
955             self._fn = node
956             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
957         d.addCallback(_created)
958         return d
959
960     def publish_mdmf(self):
961         # like publish_one, except that the result is guaranteed to be
962         # an MDMF file.
963         # self.CONTENTS should have more than one segment.
964         self.CONTENTS = "This is an MDMF file" * 100000
965         self.uploadable = MutableData(self.CONTENTS)
966         self._storage = FakeStorage()
967         self._nodemaker = make_nodemaker(self._storage)
968         self._storage_broker = self._nodemaker.storage_broker
969         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
970         def _created(node):
971             self._fn = node
972             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
973         d.addCallback(_created)
974         return d
975
976
977     def publish_sdmf(self):
978         # like publish_one, except that the result is guaranteed to be
979         # an SDMF file
980         self.CONTENTS = "This is an SDMF file" * 1000
981         self.uploadable = MutableData(self.CONTENTS)
982         self._storage = FakeStorage()
983         self._nodemaker = make_nodemaker(self._storage)
984         self._storage_broker = self._nodemaker.storage_broker
985         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
986         def _created(node):
987             self._fn = node
988             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
989         d.addCallback(_created)
990         return d
991
992
993     def publish_multiple(self, version=0):
994         self.CONTENTS = ["Contents 0",
995                          "Contents 1",
996                          "Contents 2",
997                          "Contents 3a",
998                          "Contents 3b"]
999         self.uploadables = [MutableData(d) for d in self.CONTENTS]
1000         self._copied_shares = {}
1001         self._storage = FakeStorage()
1002         self._nodemaker = make_nodemaker(self._storage)
1003         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1004         def _created(node):
1005             self._fn = node
1006             # now create multiple versions of the same file, and accumulate
1007             # their shares, so we can mix and match them later.
1008             d = defer.succeed(None)
1009             d.addCallback(self._copy_shares, 0)
1010             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1011             d.addCallback(self._copy_shares, 1)
1012             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1013             d.addCallback(self._copy_shares, 2)
1014             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1015             d.addCallback(self._copy_shares, 3)
1016             # now we replace all the shares with version s3, and upload a new
1017             # version to get s4b.
1018             rollback = dict([(i,2) for i in range(10)])
1019             d.addCallback(lambda res: self._set_versions(rollback))
1020             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1021             d.addCallback(self._copy_shares, 4)
1022             # we leave the storage in state 4
1023             return d
1024         d.addCallback(_created)
1025         return d
1026
1027
1028     def _copy_shares(self, ignored, index):
1029         shares = self._storage._peers
1030         # we need a deep copy
1031         new_shares = {}
1032         for peerid in shares:
1033             new_shares[peerid] = {}
1034             for shnum in shares[peerid]:
1035                 new_shares[peerid][shnum] = shares[peerid][shnum]
1036         self._copied_shares[index] = new_shares
1037
1038     def _set_versions(self, versionmap):
1039         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1040         # share to be at. Any shnum which is left out of the map will stay at
1041         # its current version.
1042         shares = self._storage._peers
1043         oldshares = self._copied_shares
1044         for peerid in shares:
1045             for shnum in shares[peerid]:
1046                 if shnum in versionmap:
1047                     index = versionmap[shnum]
1048                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1049
1050 class PausingConsumer:
1051     implements(IConsumer)
1052     def __init__(self):
1053         self.data = ""
1054         self.already_paused = False
1055
1056     def registerProducer(self, producer, streaming):
1057         self.producer = producer
1058         self.producer.resumeProducing()
1059
1060     def unregisterProducer(self):
1061         self.producer = None
1062
1063     def _unpause(self, ignored):
1064         self.producer.resumeProducing()
1065
1066     def write(self, data):
1067         self.data += data
1068         if not self.already_paused:
1069            self.producer.pauseProducing()
1070            self.already_paused = True
1071            reactor.callLater(15, self._unpause, None)
1072
1073
1074 class Servermap(unittest.TestCase, PublishMixin):
1075     def setUp(self):
1076         return self.publish_one()
1077
1078     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1079                        update_range=None):
1080         if fn is None:
1081             fn = self._fn
1082         if sb is None:
1083             sb = self._storage_broker
1084         smu = ServermapUpdater(fn, sb, Monitor(),
1085                                ServerMap(), mode, update_range=update_range)
1086         d = smu.update()
1087         return d
1088
1089     def update_servermap(self, oldmap, mode=MODE_CHECK):
1090         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1091                                oldmap, mode)
1092         d = smu.update()
1093         return d
1094
1095     def failUnlessOneRecoverable(self, sm, num_shares):
1096         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1097         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1098         best = sm.best_recoverable_version()
1099         self.failIfEqual(best, None)
1100         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1101         self.failUnlessEqual(len(sm.shares_available()), 1)
1102         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1103         shnum, peerids = sm.make_sharemap().items()[0]
1104         peerid = list(peerids)[0]
1105         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1106         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1107         return sm
1108
1109     def test_basic(self):
1110         d = defer.succeed(None)
1111         ms = self.make_servermap
1112         us = self.update_servermap
1113
1114         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1115         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1116         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1117         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1118         d.addCallback(lambda res: ms(mode=MODE_READ))
1119         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1120         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1121         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1122         # this mode stops at 'k' shares
1123         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1124
1125         # and can we re-use the same servermap? Note that these are sorted in
1126         # increasing order of number of servers queried, since once a server
1127         # gets into the servermap, we'll always ask it for an update.
1128         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1129         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1130         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1131         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1132         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1133         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1134         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1135         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1136         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1137
1138         return d
1139
1140     def test_fetch_privkey(self):
1141         d = defer.succeed(None)
1142         # use the sibling filenode (which hasn't been used yet), and make
1143         # sure it can fetch the privkey. The file is small, so the privkey
1144         # will be fetched on the first (query) pass.
1145         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1146         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1147
1148         # create a new file, which is large enough to knock the privkey out
1149         # of the early part of the file
1150         LARGE = "These are Larger contents" * 200 # about 5KB
1151         LARGE_uploadable = MutableData(LARGE)
1152         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1153         def _created(large_fn):
1154             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1155             return self.make_servermap(MODE_WRITE, large_fn2)
1156         d.addCallback(_created)
1157         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1158         return d
1159
1160
1161     def test_mark_bad(self):
1162         d = defer.succeed(None)
1163         ms = self.make_servermap
1164
1165         d.addCallback(lambda res: ms(mode=MODE_READ))
1166         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1167         def _made_map(sm):
1168             v = sm.best_recoverable_version()
1169             vm = sm.make_versionmap()
1170             shares = list(vm[v])
1171             self.failUnlessEqual(len(shares), 6)
1172             self._corrupted = set()
1173             # mark the first 5 shares as corrupt, then update the servermap.
1174             # The map should not have the marked shares it in any more, and
1175             # new shares should be found to replace the missing ones.
1176             for (shnum, peerid, timestamp) in shares:
1177                 if shnum < 5:
1178                     self._corrupted.add( (peerid, shnum) )
1179                     sm.mark_bad_share(peerid, shnum, "")
1180             return self.update_servermap(sm, MODE_WRITE)
1181         d.addCallback(_made_map)
1182         def _check_map(sm):
1183             # this should find all 5 shares that weren't marked bad
1184             v = sm.best_recoverable_version()
1185             vm = sm.make_versionmap()
1186             shares = list(vm[v])
1187             for (peerid, shnum) in self._corrupted:
1188                 peer_shares = sm.shares_on_peer(peerid)
1189                 self.failIf(shnum in peer_shares,
1190                             "%d was in %s" % (shnum, peer_shares))
1191             self.failUnlessEqual(len(shares), 5)
1192         d.addCallback(_check_map)
1193         return d
1194
1195     def failUnlessNoneRecoverable(self, sm):
1196         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1197         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1198         best = sm.best_recoverable_version()
1199         self.failUnlessEqual(best, None)
1200         self.failUnlessEqual(len(sm.shares_available()), 0)
1201
1202     def test_no_shares(self):
1203         self._storage._peers = {} # delete all shares
1204         ms = self.make_servermap
1205         d = defer.succeed(None)
1206 #
1207         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1208         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1209
1210         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1211         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1212
1213         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1214         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1215
1216         d.addCallback(lambda res: ms(mode=MODE_READ))
1217         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1218
1219         return d
1220
1221     def failUnlessNotQuiteEnough(self, sm):
1222         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1223         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1224         best = sm.best_recoverable_version()
1225         self.failUnlessEqual(best, None)
1226         self.failUnlessEqual(len(sm.shares_available()), 1)
1227         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1228         return sm
1229
1230     def test_not_quite_enough_shares(self):
1231         s = self._storage
1232         ms = self.make_servermap
1233         num_shares = len(s._peers)
1234         for peerid in s._peers:
1235             s._peers[peerid] = {}
1236             num_shares -= 1
1237             if num_shares == 2:
1238                 break
1239         # now there ought to be only two shares left
1240         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1241
1242         d = defer.succeed(None)
1243
1244         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1245         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1246         d.addCallback(lambda sm:
1247                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1248         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1249         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1250         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1251         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1252         d.addCallback(lambda res: ms(mode=MODE_READ))
1253         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1254
1255         return d
1256
1257
1258     def test_servermapupdater_finds_mdmf_files(self):
1259         # setUp already published an MDMF file for us. We just need to
1260         # make sure that when we run the ServermapUpdater, the file is
1261         # reported to have one recoverable version.
1262         d = defer.succeed(None)
1263         d.addCallback(lambda ignored:
1264             self.publish_mdmf())
1265         d.addCallback(lambda ignored:
1266             self.make_servermap(mode=MODE_CHECK))
1267         # Calling make_servermap also updates the servermap in the mode
1268         # that we specify, so we just need to see what it says.
1269         def _check_servermap(sm):
1270             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1271         d.addCallback(_check_servermap)
1272         return d
1273
1274
1275     def test_fetch_update(self):
1276         d = defer.succeed(None)
1277         d.addCallback(lambda ignored:
1278             self.publish_mdmf())
1279         d.addCallback(lambda ignored:
1280             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1281         def _check_servermap(sm):
1282             # 10 shares
1283             self.failUnlessEqual(len(sm.update_data), 10)
1284             # one version
1285             for data in sm.update_data.itervalues():
1286                 self.failUnlessEqual(len(data), 1)
1287         d.addCallback(_check_servermap)
1288         return d
1289
1290
1291     def test_servermapupdater_finds_sdmf_files(self):
1292         d = defer.succeed(None)
1293         d.addCallback(lambda ignored:
1294             self.publish_sdmf())
1295         d.addCallback(lambda ignored:
1296             self.make_servermap(mode=MODE_CHECK))
1297         d.addCallback(lambda servermap:
1298             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1299         return d
1300
1301
1302 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1303     def setUp(self):
1304         return self.publish_one()
1305
1306     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1307         if oldmap is None:
1308             oldmap = ServerMap()
1309         if sb is None:
1310             sb = self._storage_broker
1311         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1312         d = smu.update()
1313         return d
1314
1315     def abbrev_verinfo(self, verinfo):
1316         if verinfo is None:
1317             return None
1318         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1319          offsets_tuple) = verinfo
1320         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1321
1322     def abbrev_verinfo_dict(self, verinfo_d):
1323         output = {}
1324         for verinfo,value in verinfo_d.items():
1325             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1326              offsets_tuple) = verinfo
1327             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1328         return output
1329
1330     def dump_servermap(self, servermap):
1331         print "SERVERMAP", servermap
1332         print "RECOVERABLE", [self.abbrev_verinfo(v)
1333                               for v in servermap.recoverable_versions()]
1334         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1335         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1336
1337     def do_download(self, servermap, version=None):
1338         if version is None:
1339             version = servermap.best_recoverable_version()
1340         r = Retrieve(self._fn, servermap, version)
1341         c = consumer.MemoryConsumer()
1342         d = r.download(consumer=c)
1343         d.addCallback(lambda mc: "".join(mc.chunks))
1344         return d
1345
1346
1347     def test_basic(self):
1348         d = self.make_servermap()
1349         def _do_retrieve(servermap):
1350             self._smap = servermap
1351             #self.dump_servermap(servermap)
1352             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1353             return self.do_download(servermap)
1354         d.addCallback(_do_retrieve)
1355         def _retrieved(new_contents):
1356             self.failUnlessEqual(new_contents, self.CONTENTS)
1357         d.addCallback(_retrieved)
1358         # we should be able to re-use the same servermap, both with and
1359         # without updating it.
1360         d.addCallback(lambda res: self.do_download(self._smap))
1361         d.addCallback(_retrieved)
1362         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1363         d.addCallback(lambda res: self.do_download(self._smap))
1364         d.addCallback(_retrieved)
1365         # clobbering the pubkey should make the servermap updater re-fetch it
1366         def _clobber_pubkey(res):
1367             self._fn._pubkey = None
1368         d.addCallback(_clobber_pubkey)
1369         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1370         d.addCallback(lambda res: self.do_download(self._smap))
1371         d.addCallback(_retrieved)
1372         return d
1373
1374     def test_all_shares_vanished(self):
1375         d = self.make_servermap()
1376         def _remove_shares(servermap):
1377             for shares in self._storage._peers.values():
1378                 shares.clear()
1379             d1 = self.shouldFail(NotEnoughSharesError,
1380                                  "test_all_shares_vanished",
1381                                  "ran out of peers",
1382                                  self.do_download, servermap)
1383             return d1
1384         d.addCallback(_remove_shares)
1385         return d
1386
1387     def test_no_servers(self):
1388         sb2 = make_storagebroker(num_peers=0)
1389         # if there are no servers, then a MODE_READ servermap should come
1390         # back empty
1391         d = self.make_servermap(sb=sb2)
1392         def _check_servermap(servermap):
1393             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1394             self.failIf(servermap.recoverable_versions())
1395             self.failIf(servermap.unrecoverable_versions())
1396             self.failIf(servermap.all_peers())
1397         d.addCallback(_check_servermap)
1398         return d
1399     test_no_servers.timeout = 15
1400
1401     def test_no_servers_download(self):
1402         sb2 = make_storagebroker(num_peers=0)
1403         self._fn._storage_broker = sb2
1404         d = self.shouldFail(UnrecoverableFileError,
1405                             "test_no_servers_download",
1406                             "no recoverable versions",
1407                             self._fn.download_best_version)
1408         def _restore(res):
1409             # a failed download that occurs while we aren't connected to
1410             # anybody should not prevent a subsequent download from working.
1411             # This isn't quite the webapi-driven test that #463 wants, but it
1412             # should be close enough.
1413             self._fn._storage_broker = self._storage_broker
1414             return self._fn.download_best_version()
1415         def _retrieved(new_contents):
1416             self.failUnlessEqual(new_contents, self.CONTENTS)
1417         d.addCallback(_restore)
1418         d.addCallback(_retrieved)
1419         return d
1420     test_no_servers_download.timeout = 15
1421
1422
1423     def _test_corrupt_all(self, offset, substring,
1424                           should_succeed=False,
1425                           corrupt_early=True,
1426                           failure_checker=None,
1427                           fetch_privkey=False):
1428         d = defer.succeed(None)
1429         if corrupt_early:
1430             d.addCallback(corrupt, self._storage, offset)
1431         d.addCallback(lambda res: self.make_servermap())
1432         if not corrupt_early:
1433             d.addCallback(corrupt, self._storage, offset)
1434         def _do_retrieve(servermap):
1435             ver = servermap.best_recoverable_version()
1436             if ver is None and not should_succeed:
1437                 # no recoverable versions == not succeeding. The problem
1438                 # should be noted in the servermap's list of problems.
1439                 if substring:
1440                     allproblems = [str(f) for f in servermap.problems]
1441                     self.failUnlessIn(substring, "".join(allproblems))
1442                 return servermap
1443             if should_succeed:
1444                 d1 = self._fn.download_version(servermap, ver,
1445                                                fetch_privkey)
1446                 d1.addCallback(lambda new_contents:
1447                                self.failUnlessEqual(new_contents, self.CONTENTS))
1448             else:
1449                 d1 = self.shouldFail(NotEnoughSharesError,
1450                                      "_corrupt_all(offset=%s)" % (offset,),
1451                                      substring,
1452                                      self._fn.download_version, servermap,
1453                                                                 ver,
1454                                                                 fetch_privkey)
1455             if failure_checker:
1456                 d1.addCallback(failure_checker)
1457             d1.addCallback(lambda res: servermap)
1458             return d1
1459         d.addCallback(_do_retrieve)
1460         return d
1461
1462     def test_corrupt_all_verbyte(self):
1463         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1464         # error in unpack_share().
1465         d = self._test_corrupt_all(0, "UnknownVersionError")
1466         def _check_servermap(servermap):
1467             # and the dump should mention the problems
1468             s = StringIO()
1469             dump = servermap.dump(s).getvalue()
1470             self.failUnless("30 PROBLEMS" in dump, dump)
1471         d.addCallback(_check_servermap)
1472         return d
1473
1474     def test_corrupt_all_seqnum(self):
1475         # a corrupt sequence number will trigger a bad signature
1476         return self._test_corrupt_all(1, "signature is invalid")
1477
1478     def test_corrupt_all_R(self):
1479         # a corrupt root hash will trigger a bad signature
1480         return self._test_corrupt_all(9, "signature is invalid")
1481
1482     def test_corrupt_all_IV(self):
1483         # a corrupt salt/IV will trigger a bad signature
1484         return self._test_corrupt_all(41, "signature is invalid")
1485
1486     def test_corrupt_all_k(self):
1487         # a corrupt 'k' will trigger a bad signature
1488         return self._test_corrupt_all(57, "signature is invalid")
1489
1490     def test_corrupt_all_N(self):
1491         # a corrupt 'N' will trigger a bad signature
1492         return self._test_corrupt_all(58, "signature is invalid")
1493
1494     def test_corrupt_all_segsize(self):
1495         # a corrupt segsize will trigger a bad signature
1496         return self._test_corrupt_all(59, "signature is invalid")
1497
1498     def test_corrupt_all_datalen(self):
1499         # a corrupt data length will trigger a bad signature
1500         return self._test_corrupt_all(67, "signature is invalid")
1501
1502     def test_corrupt_all_pubkey(self):
1503         # a corrupt pubkey won't match the URI's fingerprint. We need to
1504         # remove the pubkey from the filenode, or else it won't bother trying
1505         # to update it.
1506         self._fn._pubkey = None
1507         return self._test_corrupt_all("pubkey",
1508                                       "pubkey doesn't match fingerprint")
1509
1510     def test_corrupt_all_sig(self):
1511         # a corrupt signature is a bad one
1512         # the signature runs from about [543:799], depending upon the length
1513         # of the pubkey
1514         return self._test_corrupt_all("signature", "signature is invalid")
1515
1516     def test_corrupt_all_share_hash_chain_number(self):
1517         # a corrupt share hash chain entry will show up as a bad hash. If we
1518         # mangle the first byte, that will look like a bad hash number,
1519         # causing an IndexError
1520         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1521
1522     def test_corrupt_all_share_hash_chain_hash(self):
1523         # a corrupt share hash chain entry will show up as a bad hash. If we
1524         # mangle a few bytes in, that will look like a bad hash.
1525         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1526
1527     def test_corrupt_all_block_hash_tree(self):
1528         return self._test_corrupt_all("block_hash_tree",
1529                                       "block hash tree failure")
1530
1531     def test_corrupt_all_block(self):
1532         return self._test_corrupt_all("share_data", "block hash tree failure")
1533
1534     def test_corrupt_all_encprivkey(self):
1535         # a corrupted privkey won't even be noticed by the reader, only by a
1536         # writer.
1537         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1538
1539
1540     def test_corrupt_all_encprivkey_late(self):
1541         # this should work for the same reason as above, but we corrupt 
1542         # after the servermap update to exercise the error handling
1543         # code.
1544         # We need to remove the privkey from the node, or the retrieve
1545         # process won't know to update it.
1546         self._fn._privkey = None
1547         return self._test_corrupt_all("enc_privkey",
1548                                       None, # this shouldn't fail
1549                                       should_succeed=True,
1550                                       corrupt_early=False,
1551                                       fetch_privkey=True)
1552
1553
1554     def test_corrupt_all_seqnum_late(self):
1555         # corrupting the seqnum between mapupdate and retrieve should result
1556         # in NotEnoughSharesError, since each share will look invalid
1557         def _check(res):
1558             f = res[0]
1559             self.failUnless(f.check(NotEnoughSharesError))
1560             self.failUnless("uncoordinated write" in str(f))
1561         return self._test_corrupt_all(1, "ran out of peers",
1562                                       corrupt_early=False,
1563                                       failure_checker=_check)
1564
1565     def test_corrupt_all_block_hash_tree_late(self):
1566         def _check(res):
1567             f = res[0]
1568             self.failUnless(f.check(NotEnoughSharesError))
1569         return self._test_corrupt_all("block_hash_tree",
1570                                       "block hash tree failure",
1571                                       corrupt_early=False,
1572                                       failure_checker=_check)
1573
1574
1575     def test_corrupt_all_block_late(self):
1576         def _check(res):
1577             f = res[0]
1578             self.failUnless(f.check(NotEnoughSharesError))
1579         return self._test_corrupt_all("share_data", "block hash tree failure",
1580                                       corrupt_early=False,
1581                                       failure_checker=_check)
1582
1583
1584     def test_basic_pubkey_at_end(self):
1585         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1586         # download to succeed but forcing a bunch of retries first. Note that
1587         # this is rather pessimistic: our Retrieve process will throw away
1588         # the whole share if the pubkey is bad, even though the rest of the
1589         # share might be good.
1590
1591         self._fn._pubkey = None
1592         k = self._fn.get_required_shares()
1593         N = self._fn.get_total_shares()
1594         d = defer.succeed(None)
1595         d.addCallback(corrupt, self._storage, "pubkey",
1596                       shnums_to_corrupt=range(0, N-k))
1597         d.addCallback(lambda res: self.make_servermap())
1598         def _do_retrieve(servermap):
1599             self.failUnless(servermap.problems)
1600             self.failUnless("pubkey doesn't match fingerprint"
1601                             in str(servermap.problems[0]))
1602             ver = servermap.best_recoverable_version()
1603             r = Retrieve(self._fn, servermap, ver)
1604             c = consumer.MemoryConsumer()
1605             return r.download(c)
1606         d.addCallback(_do_retrieve)
1607         d.addCallback(lambda mc: "".join(mc.chunks))
1608         d.addCallback(lambda new_contents:
1609                       self.failUnlessEqual(new_contents, self.CONTENTS))
1610         return d
1611
1612
1613     def _test_corrupt_some(self, offset, mdmf=False):
1614         if mdmf:
1615             d = self.publish_mdmf()
1616         else:
1617             d = defer.succeed(None)
1618         d.addCallback(lambda ignored:
1619             corrupt(None, self._storage, offset, range(5)))
1620         d.addCallback(lambda ignored:
1621             self.make_servermap())
1622         def _do_retrieve(servermap):
1623             ver = servermap.best_recoverable_version()
1624             self.failUnless(ver)
1625             return self._fn.download_best_version()
1626         d.addCallback(_do_retrieve)
1627         d.addCallback(lambda new_contents:
1628             self.failUnlessEqual(new_contents, self.CONTENTS))
1629         return d
1630
1631
1632     def test_corrupt_some(self):
1633         # corrupt the data of first five shares (so the servermap thinks
1634         # they're good but retrieve marks them as bad), so that the
1635         # MODE_READ set of 6 will be insufficient, forcing node.download to
1636         # retry with more servers.
1637         return self._test_corrupt_some("share_data")
1638
1639
1640     def test_download_fails(self):
1641         d = corrupt(None, self._storage, "signature")
1642         d.addCallback(lambda ignored:
1643             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1644                             "no recoverable versions",
1645                             self._fn.download_best_version))
1646         return d
1647
1648
1649
1650     def test_corrupt_mdmf_block_hash_tree(self):
1651         d = self.publish_mdmf()
1652         d.addCallback(lambda ignored:
1653             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1654                                    "block hash tree failure",
1655                                    corrupt_early=False,
1656                                    should_succeed=False))
1657         return d
1658
1659
1660     def test_corrupt_mdmf_block_hash_tree_late(self):
1661         d = self.publish_mdmf()
1662         d.addCallback(lambda ignored:
1663             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1664                                    "block hash tree failure",
1665                                    corrupt_early=True,
1666                                    should_succeed=False))
1667         return d
1668
1669
1670     def test_corrupt_mdmf_share_data(self):
1671         d = self.publish_mdmf()
1672         d.addCallback(lambda ignored:
1673             # TODO: Find out what the block size is and corrupt a
1674             # specific block, rather than just guessing.
1675             self._test_corrupt_all(("share_data", 12 * 40),
1676                                     "block hash tree failure",
1677                                     corrupt_early=True,
1678                                     should_succeed=False))
1679         return d
1680
1681
1682     def test_corrupt_some_mdmf(self):
1683         return self._test_corrupt_some(("share_data", 12 * 40),
1684                                        mdmf=True)
1685
1686
1687 class CheckerMixin:
1688     def check_good(self, r, where):
1689         self.failUnless(r.is_healthy(), where)
1690         return r
1691
1692     def check_bad(self, r, where):
1693         self.failIf(r.is_healthy(), where)
1694         return r
1695
1696     def check_expected_failure(self, r, expected_exception, substring, where):
1697         for (peerid, storage_index, shnum, f) in r.problems:
1698             if f.check(expected_exception):
1699                 self.failUnless(substring in str(f),
1700                                 "%s: substring '%s' not in '%s'" %
1701                                 (where, substring, str(f)))
1702                 return
1703         self.fail("%s: didn't see expected exception %s in problems %s" %
1704                   (where, expected_exception, r.problems))
1705
1706
1707 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1708     def setUp(self):
1709         return self.publish_one()
1710
1711
1712     def test_check_good(self):
1713         d = self._fn.check(Monitor())
1714         d.addCallback(self.check_good, "test_check_good")
1715         return d
1716
1717     def test_check_mdmf_good(self):
1718         d = self.publish_mdmf()
1719         d.addCallback(lambda ignored:
1720             self._fn.check(Monitor()))
1721         d.addCallback(self.check_good, "test_check_mdmf_good")
1722         return d
1723
1724     def test_check_no_shares(self):
1725         for shares in self._storage._peers.values():
1726             shares.clear()
1727         d = self._fn.check(Monitor())
1728         d.addCallback(self.check_bad, "test_check_no_shares")
1729         return d
1730
1731     def test_check_mdmf_no_shares(self):
1732         d = self.publish_mdmf()
1733         def _then(ignored):
1734             for share in self._storage._peers.values():
1735                 share.clear()
1736         d.addCallback(_then)
1737         d.addCallback(lambda ignored:
1738             self._fn.check(Monitor()))
1739         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1740         return d
1741
1742     def test_check_not_enough_shares(self):
1743         for shares in self._storage._peers.values():
1744             for shnum in shares.keys():
1745                 if shnum > 0:
1746                     del shares[shnum]
1747         d = self._fn.check(Monitor())
1748         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1749         return d
1750
1751     def test_check_mdmf_not_enough_shares(self):
1752         d = self.publish_mdmf()
1753         def _then(ignored):
1754             for shares in self._storage._peers.values():
1755                 for shnum in shares.keys():
1756                     if shnum > 0:
1757                         del shares[shnum]
1758         d.addCallback(_then)
1759         d.addCallback(lambda ignored:
1760             self._fn.check(Monitor()))
1761         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1762         return d
1763
1764
1765     def test_check_all_bad_sig(self):
1766         d = corrupt(None, self._storage, 1) # bad sig
1767         d.addCallback(lambda ignored:
1768             self._fn.check(Monitor()))
1769         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1770         return d
1771
1772     def test_check_mdmf_all_bad_sig(self):
1773         d = self.publish_mdmf()
1774         d.addCallback(lambda ignored:
1775             corrupt(None, self._storage, 1))
1776         d.addCallback(lambda ignored:
1777             self._fn.check(Monitor()))
1778         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1779         return d
1780
1781     def test_check_all_bad_blocks(self):
1782         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1783         # the Checker won't notice this.. it doesn't look at actual data
1784         d.addCallback(lambda ignored:
1785             self._fn.check(Monitor()))
1786         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1787         return d
1788
1789
1790     def test_check_mdmf_all_bad_blocks(self):
1791         d = self.publish_mdmf()
1792         d.addCallback(lambda ignored:
1793             corrupt(None, self._storage, "share_data"))
1794         d.addCallback(lambda ignored:
1795             self._fn.check(Monitor()))
1796         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1797         return d
1798
1799     def test_verify_good(self):
1800         d = self._fn.check(Monitor(), verify=True)
1801         d.addCallback(self.check_good, "test_verify_good")
1802         return d
1803     test_verify_good.timeout = 15
1804
1805     def test_verify_all_bad_sig(self):
1806         d = corrupt(None, self._storage, 1) # bad sig
1807         d.addCallback(lambda ignored:
1808             self._fn.check(Monitor(), verify=True))
1809         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1810         return d
1811
1812     def test_verify_one_bad_sig(self):
1813         d = corrupt(None, self._storage, 1, [9]) # bad sig
1814         d.addCallback(lambda ignored:
1815             self._fn.check(Monitor(), verify=True))
1816         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1817         return d
1818
1819     def test_verify_one_bad_block(self):
1820         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1821         # the Verifier *will* notice this, since it examines every byte
1822         d.addCallback(lambda ignored:
1823             self._fn.check(Monitor(), verify=True))
1824         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1825         d.addCallback(self.check_expected_failure,
1826                       CorruptShareError, "block hash tree failure",
1827                       "test_verify_one_bad_block")
1828         return d
1829
1830     def test_verify_one_bad_sharehash(self):
1831         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1832         d.addCallback(lambda ignored:
1833             self._fn.check(Monitor(), verify=True))
1834         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1835         d.addCallback(self.check_expected_failure,
1836                       CorruptShareError, "corrupt hashes",
1837                       "test_verify_one_bad_sharehash")
1838         return d
1839
1840     def test_verify_one_bad_encprivkey(self):
1841         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1842         d.addCallback(lambda ignored:
1843             self._fn.check(Monitor(), verify=True))
1844         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1845         d.addCallback(self.check_expected_failure,
1846                       CorruptShareError, "invalid privkey",
1847                       "test_verify_one_bad_encprivkey")
1848         return d
1849
1850     def test_verify_one_bad_encprivkey_uncheckable(self):
1851         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1852         readonly_fn = self._fn.get_readonly()
1853         # a read-only node has no way to validate the privkey
1854         d.addCallback(lambda ignored:
1855             readonly_fn.check(Monitor(), verify=True))
1856         d.addCallback(self.check_good,
1857                       "test_verify_one_bad_encprivkey_uncheckable")
1858         return d
1859
1860
1861     def test_verify_mdmf_good(self):
1862         d = self.publish_mdmf()
1863         d.addCallback(lambda ignored:
1864             self._fn.check(Monitor(), verify=True))
1865         d.addCallback(self.check_good, "test_verify_mdmf_good")
1866         return d
1867
1868
1869     def test_verify_mdmf_one_bad_block(self):
1870         d = self.publish_mdmf()
1871         d.addCallback(lambda ignored:
1872             corrupt(None, self._storage, "share_data", [1]))
1873         d.addCallback(lambda ignored:
1874             self._fn.check(Monitor(), verify=True))
1875         # We should find one bad block here
1876         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1877         d.addCallback(self.check_expected_failure,
1878                       CorruptShareError, "block hash tree failure",
1879                       "test_verify_mdmf_one_bad_block")
1880         return d
1881
1882
1883     def test_verify_mdmf_bad_encprivkey(self):
1884         d = self.publish_mdmf()
1885         d.addCallback(lambda ignored:
1886             corrupt(None, self._storage, "enc_privkey", [0]))
1887         d.addCallback(lambda ignored:
1888             self._fn.check(Monitor(), verify=True))
1889         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1890         d.addCallback(self.check_expected_failure,
1891                       CorruptShareError, "privkey",
1892                       "test_verify_mdmf_bad_encprivkey")
1893         return d
1894
1895
1896     def test_verify_mdmf_bad_sig(self):
1897         d = self.publish_mdmf()
1898         d.addCallback(lambda ignored:
1899             corrupt(None, self._storage, 1, [1]))
1900         d.addCallback(lambda ignored:
1901             self._fn.check(Monitor(), verify=True))
1902         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1903         return d
1904
1905
1906     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1907         d = self.publish_mdmf()
1908         d.addCallback(lambda ignored:
1909             corrupt(None, self._storage, "enc_privkey", [1]))
1910         d.addCallback(lambda ignored:
1911             self._fn.get_readonly())
1912         d.addCallback(lambda fn:
1913             fn.check(Monitor(), verify=True))
1914         d.addCallback(self.check_good,
1915                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1916         return d
1917
1918
1919 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1920
1921     def get_shares(self, s):
1922         all_shares = {} # maps (peerid, shnum) to share data
1923         for peerid in s._peers:
1924             shares = s._peers[peerid]
1925             for shnum in shares:
1926                 data = shares[shnum]
1927                 all_shares[ (peerid, shnum) ] = data
1928         return all_shares
1929
1930     def copy_shares(self, ignored=None):
1931         self.old_shares.append(self.get_shares(self._storage))
1932
1933     def test_repair_nop(self):
1934         self.old_shares = []
1935         d = self.publish_one()
1936         d.addCallback(self.copy_shares)
1937         d.addCallback(lambda res: self._fn.check(Monitor()))
1938         d.addCallback(lambda check_results: self._fn.repair(check_results))
1939         def _check_results(rres):
1940             self.failUnless(IRepairResults.providedBy(rres))
1941             self.failUnless(rres.get_successful())
1942             # TODO: examine results
1943
1944             self.copy_shares()
1945
1946             initial_shares = self.old_shares[0]
1947             new_shares = self.old_shares[1]
1948             # TODO: this really shouldn't change anything. When we implement
1949             # a "minimal-bandwidth" repairer", change this test to assert:
1950             #self.failUnlessEqual(new_shares, initial_shares)
1951
1952             # all shares should be in the same place as before
1953             self.failUnlessEqual(set(initial_shares.keys()),
1954                                  set(new_shares.keys()))
1955             # but they should all be at a newer seqnum. The IV will be
1956             # different, so the roothash will be too.
1957             for key in initial_shares:
1958                 (version0,
1959                  seqnum0,
1960                  root_hash0,
1961                  IV0,
1962                  k0, N0, segsize0, datalen0,
1963                  o0) = unpack_header(initial_shares[key])
1964                 (version1,
1965                  seqnum1,
1966                  root_hash1,
1967                  IV1,
1968                  k1, N1, segsize1, datalen1,
1969                  o1) = unpack_header(new_shares[key])
1970                 self.failUnlessEqual(version0, version1)
1971                 self.failUnlessEqual(seqnum0+1, seqnum1)
1972                 self.failUnlessEqual(k0, k1)
1973                 self.failUnlessEqual(N0, N1)
1974                 self.failUnlessEqual(segsize0, segsize1)
1975                 self.failUnlessEqual(datalen0, datalen1)
1976         d.addCallback(_check_results)
1977         return d
1978
1979     def failIfSharesChanged(self, ignored=None):
1980         old_shares = self.old_shares[-2]
1981         current_shares = self.old_shares[-1]
1982         self.failUnlessEqual(old_shares, current_shares)
1983
1984
1985     def test_unrepairable_0shares(self):
1986         d = self.publish_one()
1987         def _delete_all_shares(ign):
1988             shares = self._storage._peers
1989             for peerid in shares:
1990                 shares[peerid] = {}
1991         d.addCallback(_delete_all_shares)
1992         d.addCallback(lambda ign: self._fn.check(Monitor()))
1993         d.addCallback(lambda check_results: self._fn.repair(check_results))
1994         def _check(crr):
1995             self.failUnlessEqual(crr.get_successful(), False)
1996         d.addCallback(_check)
1997         return d
1998
1999     def test_mdmf_unrepairable_0shares(self):
2000         d = self.publish_mdmf()
2001         def _delete_all_shares(ign):
2002             shares = self._storage._peers
2003             for peerid in shares:
2004                 shares[peerid] = {}
2005         d.addCallback(_delete_all_shares)
2006         d.addCallback(lambda ign: self._fn.check(Monitor()))
2007         d.addCallback(lambda check_results: self._fn.repair(check_results))
2008         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2009         return d
2010
2011
2012     def test_unrepairable_1share(self):
2013         d = self.publish_one()
2014         def _delete_all_shares(ign):
2015             shares = self._storage._peers
2016             for peerid in shares:
2017                 for shnum in list(shares[peerid]):
2018                     if shnum > 0:
2019                         del shares[peerid][shnum]
2020         d.addCallback(_delete_all_shares)
2021         d.addCallback(lambda ign: self._fn.check(Monitor()))
2022         d.addCallback(lambda check_results: self._fn.repair(check_results))
2023         def _check(crr):
2024             self.failUnlessEqual(crr.get_successful(), False)
2025         d.addCallback(_check)
2026         return d
2027
2028     def test_mdmf_unrepairable_1share(self):
2029         d = self.publish_mdmf()
2030         def _delete_all_shares(ign):
2031             shares = self._storage._peers
2032             for peerid in shares:
2033                 for shnum in list(shares[peerid]):
2034                     if shnum > 0:
2035                         del shares[peerid][shnum]
2036         d.addCallback(_delete_all_shares)
2037         d.addCallback(lambda ign: self._fn.check(Monitor()))
2038         d.addCallback(lambda check_results: self._fn.repair(check_results))
2039         def _check(crr):
2040             self.failUnlessEqual(crr.get_successful(), False)
2041         d.addCallback(_check)
2042         return d
2043
2044     def test_repairable_5shares(self):
2045         d = self.publish_mdmf()
2046         def _delete_all_shares(ign):
2047             shares = self._storage._peers
2048             for peerid in shares:
2049                 for shnum in list(shares[peerid]):
2050                     if shnum > 4:
2051                         del shares[peerid][shnum]
2052         d.addCallback(_delete_all_shares)
2053         d.addCallback(lambda ign: self._fn.check(Monitor()))
2054         d.addCallback(lambda check_results: self._fn.repair(check_results))
2055         def _check(crr):
2056             self.failUnlessEqual(crr.get_successful(), True)
2057         d.addCallback(_check)
2058         return d
2059
2060     def test_mdmf_repairable_5shares(self):
2061         d = self.publish_mdmf()
2062         def _delete_some_shares(ign):
2063             shares = self._storage._peers
2064             for peerid in shares:
2065                 for shnum in list(shares[peerid]):
2066                     if shnum > 5:
2067                         del shares[peerid][shnum]
2068         d.addCallback(_delete_some_shares)
2069         d.addCallback(lambda ign: self._fn.check(Monitor()))
2070         def _check(cr):
2071             self.failIf(cr.is_healthy())
2072             self.failUnless(cr.is_recoverable())
2073             return cr
2074         d.addCallback(_check)
2075         d.addCallback(lambda check_results: self._fn.repair(check_results))
2076         def _check1(crr):
2077             self.failUnlessEqual(crr.get_successful(), True)
2078         d.addCallback(_check1)
2079         return d
2080
2081
2082     def test_merge(self):
2083         self.old_shares = []
2084         d = self.publish_multiple()
2085         # repair will refuse to merge multiple highest seqnums unless you
2086         # pass force=True
2087         d.addCallback(lambda res:
2088                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2089                                           1:4,3:4,5:4,7:4,9:4}))
2090         d.addCallback(self.copy_shares)
2091         d.addCallback(lambda res: self._fn.check(Monitor()))
2092         def _try_repair(check_results):
2093             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2094             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2095                                  self._fn.repair, check_results)
2096             d2.addCallback(self.copy_shares)
2097             d2.addCallback(self.failIfSharesChanged)
2098             d2.addCallback(lambda res: check_results)
2099             return d2
2100         d.addCallback(_try_repair)
2101         d.addCallback(lambda check_results:
2102                       self._fn.repair(check_results, force=True))
2103         # this should give us 10 shares of the highest roothash
2104         def _check_repair_results(rres):
2105             self.failUnless(rres.get_successful())
2106             pass # TODO
2107         d.addCallback(_check_repair_results)
2108         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2109         def _check_smap(smap):
2110             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2111             self.failIf(smap.unrecoverable_versions())
2112             # now, which should have won?
2113             roothash_s4a = self.get_roothash_for(3)
2114             roothash_s4b = self.get_roothash_for(4)
2115             if roothash_s4b > roothash_s4a:
2116                 expected_contents = self.CONTENTS[4]
2117             else:
2118                 expected_contents = self.CONTENTS[3]
2119             new_versionid = smap.best_recoverable_version()
2120             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2121             d2 = self._fn.download_version(smap, new_versionid)
2122             d2.addCallback(self.failUnlessEqual, expected_contents)
2123             return d2
2124         d.addCallback(_check_smap)
2125         return d
2126
2127     def test_non_merge(self):
2128         self.old_shares = []
2129         d = self.publish_multiple()
2130         # repair should not refuse a repair that doesn't need to merge. In
2131         # this case, we combine v2 with v3. The repair should ignore v2 and
2132         # copy v3 into a new v5.
2133         d.addCallback(lambda res:
2134                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2135                                           1:3,3:3,5:3,7:3,9:3}))
2136         d.addCallback(lambda res: self._fn.check(Monitor()))
2137         d.addCallback(lambda check_results: self._fn.repair(check_results))
2138         # this should give us 10 shares of v3
2139         def _check_repair_results(rres):
2140             self.failUnless(rres.get_successful())
2141             pass # TODO
2142         d.addCallback(_check_repair_results)
2143         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2144         def _check_smap(smap):
2145             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2146             self.failIf(smap.unrecoverable_versions())
2147             # now, which should have won?
2148             expected_contents = self.CONTENTS[3]
2149             new_versionid = smap.best_recoverable_version()
2150             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2151             d2 = self._fn.download_version(smap, new_versionid)
2152             d2.addCallback(self.failUnlessEqual, expected_contents)
2153             return d2
2154         d.addCallback(_check_smap)
2155         return d
2156
2157     def get_roothash_for(self, index):
2158         # return the roothash for the first share we see in the saved set
2159         shares = self._copied_shares[index]
2160         for peerid in shares:
2161             for shnum in shares[peerid]:
2162                 share = shares[peerid][shnum]
2163                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2164                           unpack_header(share)
2165                 return root_hash
2166
2167     def test_check_and_repair_readcap(self):
2168         # we can't currently repair from a mutable readcap: #625
2169         self.old_shares = []
2170         d = self.publish_one()
2171         d.addCallback(self.copy_shares)
2172         def _get_readcap(res):
2173             self._fn3 = self._fn.get_readonly()
2174             # also delete some shares
2175             for peerid,shares in self._storage._peers.items():
2176                 shares.pop(0, None)
2177         d.addCallback(_get_readcap)
2178         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2179         def _check_results(crr):
2180             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2181             # we should detect the unhealthy, but skip over mutable-readcap
2182             # repairs until #625 is fixed
2183             self.failIf(crr.get_pre_repair_results().is_healthy())
2184             self.failIf(crr.get_repair_attempted())
2185             self.failIf(crr.get_post_repair_results().is_healthy())
2186         d.addCallback(_check_results)
2187         return d
2188
2189 class DevNullDictionary(dict):
2190     def __setitem__(self, key, value):
2191         return
2192
2193 class MultipleEncodings(unittest.TestCase):
2194     def setUp(self):
2195         self.CONTENTS = "New contents go here"
2196         self.uploadable = MutableData(self.CONTENTS)
2197         self._storage = FakeStorage()
2198         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2199         self._storage_broker = self._nodemaker.storage_broker
2200         d = self._nodemaker.create_mutable_file(self.uploadable)
2201         def _created(node):
2202             self._fn = node
2203         d.addCallback(_created)
2204         return d
2205
2206     def _encode(self, k, n, data, version=SDMF_VERSION):
2207         # encode 'data' into a peerid->shares dict.
2208
2209         fn = self._fn
2210         # disable the nodecache, since for these tests we explicitly need
2211         # multiple nodes pointing at the same file
2212         self._nodemaker._node_cache = DevNullDictionary()
2213         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2214         # then we copy over other fields that are normally fetched from the
2215         # existing shares
2216         fn2._pubkey = fn._pubkey
2217         fn2._privkey = fn._privkey
2218         fn2._encprivkey = fn._encprivkey
2219         # and set the encoding parameters to something completely different
2220         fn2._required_shares = k
2221         fn2._total_shares = n
2222
2223         s = self._storage
2224         s._peers = {} # clear existing storage
2225         p2 = Publish(fn2, self._storage_broker, None)
2226         uploadable = MutableData(data)
2227         d = p2.publish(uploadable)
2228         def _published(res):
2229             shares = s._peers
2230             s._peers = {}
2231             return shares
2232         d.addCallback(_published)
2233         return d
2234
2235     def make_servermap(self, mode=MODE_READ, oldmap=None):
2236         if oldmap is None:
2237             oldmap = ServerMap()
2238         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2239                                oldmap, mode)
2240         d = smu.update()
2241         return d
2242
2243     def test_multiple_encodings(self):
2244         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2245         # then mix up the shares, to make sure that download survives seeing
2246         # a variety of encodings. This is actually kind of tricky to set up.
2247
2248         contents1 = "Contents for encoding 1 (3-of-10) go here"
2249         contents2 = "Contents for encoding 2 (4-of-9) go here"
2250         contents3 = "Contents for encoding 3 (4-of-7) go here"
2251
2252         # we make a retrieval object that doesn't know what encoding
2253         # parameters to use
2254         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2255
2256         # now we upload a file through fn1, and grab its shares
2257         d = self._encode(3, 10, contents1)
2258         def _encoded_1(shares):
2259             self._shares1 = shares
2260         d.addCallback(_encoded_1)
2261         d.addCallback(lambda res: self._encode(4, 9, contents2))
2262         def _encoded_2(shares):
2263             self._shares2 = shares
2264         d.addCallback(_encoded_2)
2265         d.addCallback(lambda res: self._encode(4, 7, contents3))
2266         def _encoded_3(shares):
2267             self._shares3 = shares
2268         d.addCallback(_encoded_3)
2269
2270         def _merge(res):
2271             log.msg("merging sharelists")
2272             # we merge the shares from the two sets, leaving each shnum in
2273             # its original location, but using a share from set1 or set2
2274             # according to the following sequence:
2275             #
2276             #  4-of-9  a  s2
2277             #  4-of-9  b  s2
2278             #  4-of-7  c   s3
2279             #  4-of-9  d  s2
2280             #  3-of-9  e s1
2281             #  3-of-9  f s1
2282             #  3-of-9  g s1
2283             #  4-of-9  h  s2
2284             #
2285             # so that neither form can be recovered until fetch [f], at which
2286             # point version-s1 (the 3-of-10 form) should be recoverable. If
2287             # the implementation latches on to the first version it sees,
2288             # then s2 will be recoverable at fetch [g].
2289
2290             # Later, when we implement code that handles multiple versions,
2291             # we can use this framework to assert that all recoverable
2292             # versions are retrieved, and test that 'epsilon' does its job
2293
2294             places = [2, 2, 3, 2, 1, 1, 1, 2]
2295
2296             sharemap = {}
2297             sb = self._storage_broker
2298
2299             for peerid in sorted(sb.get_all_serverids()):
2300                 for shnum in self._shares1.get(peerid, {}):
2301                     if shnum < len(places):
2302                         which = places[shnum]
2303                     else:
2304                         which = "x"
2305                     self._storage._peers[peerid] = peers = {}
2306                     in_1 = shnum in self._shares1[peerid]
2307                     in_2 = shnum in self._shares2.get(peerid, {})
2308                     in_3 = shnum in self._shares3.get(peerid, {})
2309                     if which == 1:
2310                         if in_1:
2311                             peers[shnum] = self._shares1[peerid][shnum]
2312                             sharemap[shnum] = peerid
2313                     elif which == 2:
2314                         if in_2:
2315                             peers[shnum] = self._shares2[peerid][shnum]
2316                             sharemap[shnum] = peerid
2317                     elif which == 3:
2318                         if in_3:
2319                             peers[shnum] = self._shares3[peerid][shnum]
2320                             sharemap[shnum] = peerid
2321
2322             # we don't bother placing any other shares
2323             # now sort the sequence so that share 0 is returned first
2324             new_sequence = [sharemap[shnum]
2325                             for shnum in sorted(sharemap.keys())]
2326             self._storage._sequence = new_sequence
2327             log.msg("merge done")
2328         d.addCallback(_merge)
2329         d.addCallback(lambda res: fn3.download_best_version())
2330         def _retrieved(new_contents):
2331             # the current specified behavior is "first version recoverable"
2332             self.failUnlessEqual(new_contents, contents1)
2333         d.addCallback(_retrieved)
2334         return d
2335
2336
2337 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2338
2339     def setUp(self):
2340         return self.publish_multiple()
2341
2342     def test_multiple_versions(self):
2343         # if we see a mix of versions in the grid, download_best_version
2344         # should get the latest one
2345         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2346         d = self._fn.download_best_version()
2347         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2348         # and the checker should report problems
2349         d.addCallback(lambda res: self._fn.check(Monitor()))
2350         d.addCallback(self.check_bad, "test_multiple_versions")
2351
2352         # but if everything is at version 2, that's what we should download
2353         d.addCallback(lambda res:
2354                       self._set_versions(dict([(i,2) for i in range(10)])))
2355         d.addCallback(lambda res: self._fn.download_best_version())
2356         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2357         # if exactly one share is at version 3, we should still get v2
2358         d.addCallback(lambda res:
2359                       self._set_versions({0:3}))
2360         d.addCallback(lambda res: self._fn.download_best_version())
2361         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2362         # but the servermap should see the unrecoverable version. This
2363         # depends upon the single newer share being queried early.
2364         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2365         def _check_smap(smap):
2366             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2367             newer = smap.unrecoverable_newer_versions()
2368             self.failUnlessEqual(len(newer), 1)
2369             verinfo, health = newer.items()[0]
2370             self.failUnlessEqual(verinfo[0], 4)
2371             self.failUnlessEqual(health, (1,3))
2372             self.failIf(smap.needs_merge())
2373         d.addCallback(_check_smap)
2374         # if we have a mix of two parallel versions (s4a and s4b), we could
2375         # recover either
2376         d.addCallback(lambda res:
2377                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2378                                           1:4,3:4,5:4,7:4,9:4}))
2379         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2380         def _check_smap_mixed(smap):
2381             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2382             newer = smap.unrecoverable_newer_versions()
2383             self.failUnlessEqual(len(newer), 0)
2384             self.failUnless(smap.needs_merge())
2385         d.addCallback(_check_smap_mixed)
2386         d.addCallback(lambda res: self._fn.download_best_version())
2387         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2388                                                   res == self.CONTENTS[4]))
2389         return d
2390
2391     def test_replace(self):
2392         # if we see a mix of versions in the grid, we should be able to
2393         # replace them all with a newer version
2394
2395         # if exactly one share is at version 3, we should download (and
2396         # replace) v2, and the result should be v4. Note that the index we
2397         # give to _set_versions is different than the sequence number.
2398         target = dict([(i,2) for i in range(10)]) # seqnum3
2399         target[0] = 3 # seqnum4
2400         self._set_versions(target)
2401
2402         def _modify(oldversion, servermap, first_time):
2403             return oldversion + " modified"
2404         d = self._fn.modify(_modify)
2405         d.addCallback(lambda res: self._fn.download_best_version())
2406         expected = self.CONTENTS[2] + " modified"
2407         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2408         # and the servermap should indicate that the outlier was replaced too
2409         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2410         def _check_smap(smap):
2411             self.failUnlessEqual(smap.highest_seqnum(), 5)
2412             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2413             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2414         d.addCallback(_check_smap)
2415         return d
2416
2417
2418 class Utils(unittest.TestCase):
2419     def test_cache(self):
2420         c = ResponseCache()
2421         # xdata = base62.b2a(os.urandom(100))[:100]
2422         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2423         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2424         c.add("v1", 1, 0, xdata)
2425         c.add("v1", 1, 2000, ydata)
2426         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2427         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2428         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2429         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2430         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2431         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2432         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2433         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2434         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2435         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2436         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2437         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2438         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2439         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2440         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2441         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2442         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2443         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2444
2445         # test joining fragments
2446         c = ResponseCache()
2447         c.add("v1", 1, 0, xdata[:10])
2448         c.add("v1", 1, 10, xdata[10:20])
2449         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2450
2451 class Exceptions(unittest.TestCase):
2452     def test_repr(self):
2453         nmde = NeedMoreDataError(100, 50, 100)
2454         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2455         ucwe = UncoordinatedWriteError()
2456         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2457
2458 class SameKeyGenerator:
2459     def __init__(self, pubkey, privkey):
2460         self.pubkey = pubkey
2461         self.privkey = privkey
2462     def generate(self, keysize=None):
2463         return defer.succeed( (self.pubkey, self.privkey) )
2464
2465 class FirstServerGetsKilled:
2466     done = False
2467     def notify(self, retval, wrapper, methname):
2468         if not self.done:
2469             wrapper.broken = True
2470             self.done = True
2471         return retval
2472
2473 class FirstServerGetsDeleted:
2474     def __init__(self):
2475         self.done = False
2476         self.silenced = None
2477     def notify(self, retval, wrapper, methname):
2478         if not self.done:
2479             # this query will work, but later queries should think the share
2480             # has been deleted
2481             self.done = True
2482             self.silenced = wrapper
2483             return retval
2484         if wrapper == self.silenced:
2485             assert methname == "slot_testv_and_readv_and_writev"
2486             return (True, {})
2487         return retval
2488
2489 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2490     def test_publish_surprise(self):
2491         self.basedir = "mutable/Problems/test_publish_surprise"
2492         self.set_up_grid()
2493         nm = self.g.clients[0].nodemaker
2494         d = nm.create_mutable_file(MutableData("contents 1"))
2495         def _created(n):
2496             d = defer.succeed(None)
2497             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2498             def _got_smap1(smap):
2499                 # stash the old state of the file
2500                 self.old_map = smap
2501             d.addCallback(_got_smap1)
2502             # then modify the file, leaving the old map untouched
2503             d.addCallback(lambda res: log.msg("starting winning write"))
2504             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2505             # now attempt to modify the file with the old servermap. This
2506             # will look just like an uncoordinated write, in which every
2507             # single share got updated between our mapupdate and our publish
2508             d.addCallback(lambda res: log.msg("starting doomed write"))
2509             d.addCallback(lambda res:
2510                           self.shouldFail(UncoordinatedWriteError,
2511                                           "test_publish_surprise", None,
2512                                           n.upload,
2513                                           MutableData("contents 2a"), self.old_map))
2514             return d
2515         d.addCallback(_created)
2516         return d
2517
2518     def test_retrieve_surprise(self):
2519         self.basedir = "mutable/Problems/test_retrieve_surprise"
2520         self.set_up_grid()
2521         nm = self.g.clients[0].nodemaker
2522         d = nm.create_mutable_file(MutableData("contents 1"))
2523         def _created(n):
2524             d = defer.succeed(None)
2525             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2526             def _got_smap1(smap):
2527                 # stash the old state of the file
2528                 self.old_map = smap
2529             d.addCallback(_got_smap1)
2530             # then modify the file, leaving the old map untouched
2531             d.addCallback(lambda res: log.msg("starting winning write"))
2532             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2533             # now attempt to retrieve the old version with the old servermap.
2534             # This will look like someone has changed the file since we
2535             # updated the servermap.
2536             d.addCallback(lambda res: n._cache._clear())
2537             d.addCallback(lambda res: log.msg("starting doomed read"))
2538             d.addCallback(lambda res:
2539                           self.shouldFail(NotEnoughSharesError,
2540                                           "test_retrieve_surprise",
2541                                           "ran out of peers: have 0 of 1",
2542                                           n.download_version,
2543                                           self.old_map,
2544                                           self.old_map.best_recoverable_version(),
2545                                           ))
2546             return d
2547         d.addCallback(_created)
2548         return d
2549
2550
2551     def test_unexpected_shares(self):
2552         # upload the file, take a servermap, shut down one of the servers,
2553         # upload it again (causing shares to appear on a new server), then
2554         # upload using the old servermap. The last upload should fail with an
2555         # UncoordinatedWriteError, because of the shares that didn't appear
2556         # in the servermap.
2557         self.basedir = "mutable/Problems/test_unexpected_shares"
2558         self.set_up_grid()
2559         nm = self.g.clients[0].nodemaker
2560         d = nm.create_mutable_file(MutableData("contents 1"))
2561         def _created(n):
2562             d = defer.succeed(None)
2563             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2564             def _got_smap1(smap):
2565                 # stash the old state of the file
2566                 self.old_map = smap
2567                 # now shut down one of the servers
2568                 peer0 = list(smap.make_sharemap()[0])[0]
2569                 self.g.remove_server(peer0)
2570                 # then modify the file, leaving the old map untouched
2571                 log.msg("starting winning write")
2572                 return n.overwrite(MutableData("contents 2"))
2573             d.addCallback(_got_smap1)
2574             # now attempt to modify the file with the old servermap. This
2575             # will look just like an uncoordinated write, in which every
2576             # single share got updated between our mapupdate and our publish
2577             d.addCallback(lambda res: log.msg("starting doomed write"))
2578             d.addCallback(lambda res:
2579                           self.shouldFail(UncoordinatedWriteError,
2580                                           "test_surprise", None,
2581                                           n.upload,
2582                                           MutableData("contents 2a"), self.old_map))
2583             return d
2584         d.addCallback(_created)
2585         return d
2586     test_unexpected_shares.timeout = 15
2587
2588     def test_bad_server(self):
2589         # Break one server, then create the file: the initial publish should
2590         # complete with an alternate server. Breaking a second server should
2591         # not prevent an update from succeeding either.
2592         self.basedir = "mutable/Problems/test_bad_server"
2593         self.set_up_grid()
2594         nm = self.g.clients[0].nodemaker
2595
2596         # to make sure that one of the initial peers is broken, we have to
2597         # get creative. We create an RSA key and compute its storage-index.
2598         # Then we make a KeyGenerator that always returns that one key, and
2599         # use it to create the mutable file. This will get easier when we can
2600         # use #467 static-server-selection to disable permutation and force
2601         # the choice of server for share[0].
2602
2603         d = nm.key_generator.generate(522)
2604         def _got_key( (pubkey, privkey) ):
2605             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2606             pubkey_s = pubkey.serialize()
2607             privkey_s = privkey.serialize()
2608             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2609                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2610             self._storage_index = u.get_storage_index()
2611         d.addCallback(_got_key)
2612         def _break_peer0(res):
2613             si = self._storage_index
2614             servers = nm.storage_broker.get_servers_for_psi(si)
2615             self.g.break_server(servers[0].get_serverid())
2616             self.server1 = servers[1]
2617         d.addCallback(_break_peer0)
2618         # now "create" the file, using the pre-established key, and let the
2619         # initial publish finally happen
2620         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2621         # that ought to work
2622         def _got_node(n):
2623             d = n.download_best_version()
2624             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2625             # now break the second peer
2626             def _break_peer1(res):
2627                 self.g.break_server(self.server1.get_serverid())
2628             d.addCallback(_break_peer1)
2629             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2630             # that ought to work too
2631             d.addCallback(lambda res: n.download_best_version())
2632             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2633             def _explain_error(f):
2634                 print f
2635                 if f.check(NotEnoughServersError):
2636                     print "first_error:", f.value.first_error
2637                 return f
2638             d.addErrback(_explain_error)
2639             return d
2640         d.addCallback(_got_node)
2641         return d
2642
2643     def test_bad_server_overlap(self):
2644         # like test_bad_server, but with no extra unused servers to fall back
2645         # upon. This means that we must re-use a server which we've already
2646         # used. If we don't remember the fact that we sent them one share
2647         # already, we'll mistakenly think we're experiencing an
2648         # UncoordinatedWriteError.
2649
2650         # Break one server, then create the file: the initial publish should
2651         # complete with an alternate server. Breaking a second server should
2652         # not prevent an update from succeeding either.
2653         self.basedir = "mutable/Problems/test_bad_server_overlap"
2654         self.set_up_grid()
2655         nm = self.g.clients[0].nodemaker
2656         sb = nm.storage_broker
2657
2658         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2659         self.g.break_server(peerids[0])
2660
2661         d = nm.create_mutable_file(MutableData("contents 1"))
2662         def _created(n):
2663             d = n.download_best_version()
2664             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2665             # now break one of the remaining servers
2666             def _break_second_server(res):
2667                 self.g.break_server(peerids[1])
2668             d.addCallback(_break_second_server)
2669             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2670             # that ought to work too
2671             d.addCallback(lambda res: n.download_best_version())
2672             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2673             return d
2674         d.addCallback(_created)
2675         return d
2676
2677     def test_publish_all_servers_bad(self):
2678         # Break all servers: the publish should fail
2679         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2680         self.set_up_grid()
2681         nm = self.g.clients[0].nodemaker
2682         for s in nm.storage_broker.get_connected_servers():
2683             s.get_rref().broken = True
2684
2685         d = self.shouldFail(NotEnoughServersError,
2686                             "test_publish_all_servers_bad",
2687                             "ran out of good servers",
2688                             nm.create_mutable_file, MutableData("contents"))
2689         return d
2690
2691     def test_publish_no_servers(self):
2692         # no servers at all: the publish should fail
2693         self.basedir = "mutable/Problems/test_publish_no_servers"
2694         self.set_up_grid(num_servers=0)
2695         nm = self.g.clients[0].nodemaker
2696
2697         d = self.shouldFail(NotEnoughServersError,
2698                             "test_publish_no_servers",
2699                             "Ran out of non-bad servers",
2700                             nm.create_mutable_file, MutableData("contents"))
2701         return d
2702     test_publish_no_servers.timeout = 30
2703
2704
2705     def test_privkey_query_error(self):
2706         # when a servermap is updated with MODE_WRITE, it tries to get the
2707         # privkey. Something might go wrong during this query attempt.
2708         # Exercise the code in _privkey_query_failed which tries to handle
2709         # such an error.
2710         self.basedir = "mutable/Problems/test_privkey_query_error"
2711         self.set_up_grid(num_servers=20)
2712         nm = self.g.clients[0].nodemaker
2713         nm._node_cache = DevNullDictionary() # disable the nodecache
2714
2715         # we need some contents that are large enough to push the privkey out
2716         # of the early part of the file
2717         LARGE = "These are Larger contents" * 2000 # about 50KB
2718         LARGE_uploadable = MutableData(LARGE)
2719         d = nm.create_mutable_file(LARGE_uploadable)
2720         def _created(n):
2721             self.uri = n.get_uri()
2722             self.n2 = nm.create_from_cap(self.uri)
2723
2724             # When a mapupdate is performed on a node that doesn't yet know
2725             # the privkey, a short read is sent to a batch of servers, to get
2726             # the verinfo and (hopefully, if the file is short enough) the
2727             # encprivkey. Our file is too large to let this first read
2728             # contain the encprivkey. Each non-encprivkey-bearing response
2729             # that arrives (until the node gets the encprivkey) will trigger
2730             # a second read to specifically read the encprivkey.
2731             #
2732             # So, to exercise this case:
2733             #  1. notice which server gets a read() call first
2734             #  2. tell that server to start throwing errors
2735             killer = FirstServerGetsKilled()
2736             for s in nm.storage_broker.get_connected_servers():
2737                 s.get_rref().post_call_notifier = killer.notify
2738         d.addCallback(_created)
2739
2740         # now we update a servermap from a new node (which doesn't have the
2741         # privkey yet, forcing it to use a separate privkey query). Note that
2742         # the map-update will succeed, since we'll just get a copy from one
2743         # of the other shares.
2744         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2745
2746         return d
2747
2748     def test_privkey_query_missing(self):
2749         # like test_privkey_query_error, but the shares are deleted by the
2750         # second query, instead of raising an exception.
2751         self.basedir = "mutable/Problems/test_privkey_query_missing"
2752         self.set_up_grid(num_servers=20)
2753         nm = self.g.clients[0].nodemaker
2754         LARGE = "These are Larger contents" * 2000 # about 50KiB
2755         LARGE_uploadable = MutableData(LARGE)
2756         nm._node_cache = DevNullDictionary() # disable the nodecache
2757
2758         d = nm.create_mutable_file(LARGE_uploadable)
2759         def _created(n):
2760             self.uri = n.get_uri()
2761             self.n2 = nm.create_from_cap(self.uri)
2762             deleter = FirstServerGetsDeleted()
2763             for s in nm.storage_broker.get_connected_servers():
2764                 s.get_rref().post_call_notifier = deleter.notify
2765         d.addCallback(_created)
2766         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2767         return d
2768
2769
2770     def test_block_and_hash_query_error(self):
2771         # This tests for what happens when a query to a remote server
2772         # fails in either the hash validation step or the block getting
2773         # step (because of batching, this is the same actual query).
2774         # We need to have the storage server persist up until the point
2775         # that its prefix is validated, then suddenly die. This
2776         # exercises some exception handling code in Retrieve.
2777         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2778         self.set_up_grid(num_servers=20)
2779         nm = self.g.clients[0].nodemaker
2780         CONTENTS = "contents" * 2000
2781         CONTENTS_uploadable = MutableData(CONTENTS)
2782         d = nm.create_mutable_file(CONTENTS_uploadable)
2783         def _created(node):
2784             self._node = node
2785         d.addCallback(_created)
2786         d.addCallback(lambda ignored:
2787             self._node.get_servermap(MODE_READ))
2788         def _then(servermap):
2789             # we have our servermap. Now we set up the servers like the
2790             # tests above -- the first one that gets a read call should
2791             # start throwing errors, but only after returning its prefix
2792             # for validation. Since we'll download without fetching the
2793             # private key, the next query to the remote server will be
2794             # for either a block and salt or for hashes, either of which
2795             # will exercise the error handling code.
2796             killer = FirstServerGetsKilled()
2797             for s in nm.storage_broker.get_connected_servers():
2798                 s.get_rref().post_call_notifier = killer.notify
2799             ver = servermap.best_recoverable_version()
2800             assert ver
2801             return self._node.download_version(servermap, ver)
2802         d.addCallback(_then)
2803         d.addCallback(lambda data:
2804             self.failUnlessEqual(data, CONTENTS))
2805         return d
2806
2807
2808 class FileHandle(unittest.TestCase):
2809     def setUp(self):
2810         self.test_data = "Test Data" * 50000
2811         self.sio = StringIO(self.test_data)
2812         self.uploadable = MutableFileHandle(self.sio)
2813
2814
2815     def test_filehandle_read(self):
2816         self.basedir = "mutable/FileHandle/test_filehandle_read"
2817         chunk_size = 10
2818         for i in xrange(0, len(self.test_data), chunk_size):
2819             data = self.uploadable.read(chunk_size)
2820             data = "".join(data)
2821             start = i
2822             end = i + chunk_size
2823             self.failUnlessEqual(data, self.test_data[start:end])
2824
2825
2826     def test_filehandle_get_size(self):
2827         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2828         actual_size = len(self.test_data)
2829         size = self.uploadable.get_size()
2830         self.failUnlessEqual(size, actual_size)
2831
2832
2833     def test_filehandle_get_size_out_of_order(self):
2834         # We should be able to call get_size whenever we want without
2835         # disturbing the location of the seek pointer.
2836         chunk_size = 100
2837         data = self.uploadable.read(chunk_size)
2838         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2839
2840         # Now get the size.
2841         size = self.uploadable.get_size()
2842         self.failUnlessEqual(size, len(self.test_data))
2843
2844         # Now get more data. We should be right where we left off.
2845         more_data = self.uploadable.read(chunk_size)
2846         start = chunk_size
2847         end = chunk_size * 2
2848         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2849
2850
2851     def test_filehandle_file(self):
2852         # Make sure that the MutableFileHandle works on a file as well
2853         # as a StringIO object, since in some cases it will be asked to
2854         # deal with files.
2855         self.basedir = self.mktemp()
2856         # necessary? What am I doing wrong here?
2857         os.mkdir(self.basedir)
2858         f_path = os.path.join(self.basedir, "test_file")
2859         f = open(f_path, "w")
2860         f.write(self.test_data)
2861         f.close()
2862         f = open(f_path, "r")
2863
2864         uploadable = MutableFileHandle(f)
2865
2866         data = uploadable.read(len(self.test_data))
2867         self.failUnlessEqual("".join(data), self.test_data)
2868         size = uploadable.get_size()
2869         self.failUnlessEqual(size, len(self.test_data))
2870
2871
2872     def test_close(self):
2873         # Make sure that the MutableFileHandle closes its handle when
2874         # told to do so.
2875         self.uploadable.close()
2876         self.failUnless(self.sio.closed)
2877
2878
2879 class DataHandle(unittest.TestCase):
2880     def setUp(self):
2881         self.test_data = "Test Data" * 50000
2882         self.uploadable = MutableData(self.test_data)
2883
2884
2885     def test_datahandle_read(self):
2886         chunk_size = 10
2887         for i in xrange(0, len(self.test_data), chunk_size):
2888             data = self.uploadable.read(chunk_size)
2889             data = "".join(data)
2890             start = i
2891             end = i + chunk_size
2892             self.failUnlessEqual(data, self.test_data[start:end])
2893
2894
2895     def test_datahandle_get_size(self):
2896         actual_size = len(self.test_data)
2897         size = self.uploadable.get_size()
2898         self.failUnlessEqual(size, actual_size)
2899
2900
2901     def test_datahandle_get_size_out_of_order(self):
2902         # We should be able to call get_size whenever we want without
2903         # disturbing the location of the seek pointer.
2904         chunk_size = 100
2905         data = self.uploadable.read(chunk_size)
2906         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2907
2908         # Now get the size.
2909         size = self.uploadable.get_size()
2910         self.failUnlessEqual(size, len(self.test_data))
2911
2912         # Now get more data. We should be right where we left off.
2913         more_data = self.uploadable.read(chunk_size)
2914         start = chunk_size
2915         end = chunk_size * 2
2916         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2917
2918
2919 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2920               PublishMixin):
2921     def setUp(self):
2922         GridTestMixin.setUp(self)
2923         self.basedir = self.mktemp()
2924         self.set_up_grid()
2925         self.c = self.g.clients[0]
2926         self.nm = self.c.nodemaker
2927         self.data = "test data" * 100000 # about 900 KiB; MDMF
2928         self.small_data = "test data" * 10 # about 90 B; SDMF
2929         return self.do_upload()
2930
2931
2932     def do_upload(self):
2933         d1 = self.nm.create_mutable_file(MutableData(self.data),
2934                                          version=MDMF_VERSION)
2935         d2 = self.nm.create_mutable_file(MutableData(self.small_data))
2936         dl = gatherResults([d1, d2])
2937         def _then((n1, n2)):
2938             assert isinstance(n1, MutableFileNode)
2939             assert isinstance(n2, MutableFileNode)
2940
2941             self.mdmf_node = n1
2942             self.sdmf_node = n2
2943         dl.addCallback(_then)
2944         return dl
2945
2946
2947     def test_get_readonly_mutable_version(self):
2948         # Attempting to get a mutable version of a mutable file from a
2949         # filenode initialized with a readcap should return a readonly
2950         # version of that same node.
2951         ro = self.mdmf_node.get_readonly()
2952         d = ro.get_best_mutable_version()
2953         d.addCallback(lambda version:
2954             self.failUnless(version.is_readonly()))
2955         d.addCallback(lambda ignored:
2956             self.sdmf_node.get_readonly())
2957         d.addCallback(lambda version:
2958             self.failUnless(version.is_readonly()))
2959         return d
2960
2961
2962     def test_get_sequence_number(self):
2963         d = self.mdmf_node.get_best_readable_version()
2964         d.addCallback(lambda bv:
2965             self.failUnlessEqual(bv.get_sequence_number(), 1))
2966         d.addCallback(lambda ignored:
2967             self.sdmf_node.get_best_readable_version())
2968         d.addCallback(lambda bv:
2969             self.failUnlessEqual(bv.get_sequence_number(), 1))
2970         # Now update. The sequence number in both cases should be 1 in
2971         # both cases.
2972         def _do_update(ignored):
2973             new_data = MutableData("foo bar baz" * 100000)
2974             new_small_data = MutableData("foo bar baz" * 10)
2975             d1 = self.mdmf_node.overwrite(new_data)
2976             d2 = self.sdmf_node.overwrite(new_small_data)
2977             dl = gatherResults([d1, d2])
2978             return dl
2979         d.addCallback(_do_update)
2980         d.addCallback(lambda ignored:
2981             self.mdmf_node.get_best_readable_version())
2982         d.addCallback(lambda bv:
2983             self.failUnlessEqual(bv.get_sequence_number(), 2))
2984         d.addCallback(lambda ignored:
2985             self.sdmf_node.get_best_readable_version())
2986         d.addCallback(lambda bv:
2987             self.failUnlessEqual(bv.get_sequence_number(), 2))
2988         return d
2989
2990
2991     def test_version_extension_api(self):
2992         # We need to define an API by which an uploader can set the
2993         # extension parameters, and by which a downloader can retrieve
2994         # extensions.
2995         d = self.mdmf_node.get_best_mutable_version()
2996         def _got_version(version):
2997             hints = version.get_downloader_hints()
2998             # Should be empty at this point.
2999             self.failUnlessIn("k", hints)
3000             self.failUnlessEqual(hints['k'], 3)
3001             self.failUnlessIn('segsize', hints)
3002             self.failUnlessEqual(hints['segsize'], 131073)
3003         d.addCallback(_got_version)
3004         return d
3005
3006
3007     def test_extensions_from_cap(self):
3008         # If we initialize a mutable file with a cap that has extension
3009         # parameters in it and then grab the extension parameters using
3010         # our API, we should see that they're set correctly.
3011         mdmf_uri = self.mdmf_node.get_uri()
3012         new_node = self.nm.create_from_cap(mdmf_uri)
3013         d = new_node.get_best_mutable_version()
3014         def _got_version(version):
3015             hints = version.get_downloader_hints()
3016             self.failUnlessIn("k", hints)
3017             self.failUnlessEqual(hints["k"], 3)
3018             self.failUnlessIn("segsize", hints)
3019             self.failUnlessEqual(hints["segsize"], 131073)
3020         d.addCallback(_got_version)
3021         return d
3022
3023
3024     def test_extensions_from_upload(self):
3025         # If we create a new mutable file with some contents, we should
3026         # get back an MDMF cap with the right hints in place.
3027         contents = "foo bar baz" * 100000
3028         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3029         def _got_mutable_file(n):
3030             rw_uri = n.get_uri()
3031             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3032             self.failUnlessIn(expected_k, rw_uri)
3033             # XXX: Get this more intelligently.
3034             self.failUnlessIn("131073", rw_uri)
3035
3036             ro_uri = n.get_readonly_uri()
3037             self.failUnlessIn(expected_k, ro_uri)
3038             self.failUnlessIn("131073", ro_uri)
3039         d.addCallback(_got_mutable_file)
3040         return d
3041
3042
3043     def test_cap_after_upload(self):
3044         # If we create a new mutable file and upload things to it, and
3045         # it's an MDMF file, we should get an MDMF cap back from that
3046         # file and should be able to use that.
3047         # That's essentially what MDMF node is, so just check that.
3048         mdmf_uri = self.mdmf_node.get_uri()
3049         cap = uri.from_string(mdmf_uri)
3050         self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
3051         readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3052         cap = uri.from_string(readonly_mdmf_uri)
3053         self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3054
3055
3056     def test_get_writekey(self):
3057         d = self.mdmf_node.get_best_mutable_version()
3058         d.addCallback(lambda bv:
3059             self.failUnlessEqual(bv.get_writekey(),
3060                                  self.mdmf_node.get_writekey()))
3061         d.addCallback(lambda ignored:
3062             self.sdmf_node.get_best_mutable_version())
3063         d.addCallback(lambda bv:
3064             self.failUnlessEqual(bv.get_writekey(),
3065                                  self.sdmf_node.get_writekey()))
3066         return d
3067
3068
3069     def test_get_storage_index(self):
3070         d = self.mdmf_node.get_best_mutable_version()
3071         d.addCallback(lambda bv:
3072             self.failUnlessEqual(bv.get_storage_index(),
3073                                  self.mdmf_node.get_storage_index()))
3074         d.addCallback(lambda ignored:
3075             self.sdmf_node.get_best_mutable_version())
3076         d.addCallback(lambda bv:
3077             self.failUnlessEqual(bv.get_storage_index(),
3078                                  self.sdmf_node.get_storage_index()))
3079         return d
3080
3081
3082     def test_get_readonly_version(self):
3083         d = self.mdmf_node.get_best_readable_version()
3084         d.addCallback(lambda bv:
3085             self.failUnless(bv.is_readonly()))
3086         d.addCallback(lambda ignored:
3087             self.sdmf_node.get_best_readable_version())
3088         d.addCallback(lambda bv:
3089             self.failUnless(bv.is_readonly()))
3090         return d
3091
3092
3093     def test_get_mutable_version(self):
3094         d = self.mdmf_node.get_best_mutable_version()
3095         d.addCallback(lambda bv:
3096             self.failIf(bv.is_readonly()))
3097         d.addCallback(lambda ignored:
3098             self.sdmf_node.get_best_mutable_version())
3099         d.addCallback(lambda bv:
3100             self.failIf(bv.is_readonly()))
3101         return d
3102
3103
3104     def test_toplevel_overwrite(self):
3105         new_data = MutableData("foo bar baz" * 100000)
3106         new_small_data = MutableData("foo bar baz" * 10)
3107         d = self.mdmf_node.overwrite(new_data)
3108         d.addCallback(lambda ignored:
3109             self.mdmf_node.download_best_version())
3110         d.addCallback(lambda data:
3111             self.failUnlessEqual(data, "foo bar baz" * 100000))
3112         d.addCallback(lambda ignored:
3113             self.sdmf_node.overwrite(new_small_data))
3114         d.addCallback(lambda ignored:
3115             self.sdmf_node.download_best_version())
3116         d.addCallback(lambda data:
3117             self.failUnlessEqual(data, "foo bar baz" * 10))
3118         return d
3119
3120
3121     def test_toplevel_modify(self):
3122         def modifier(old_contents, servermap, first_time):
3123             return old_contents + "modified"
3124         d = self.mdmf_node.modify(modifier)
3125         d.addCallback(lambda ignored:
3126             self.mdmf_node.download_best_version())
3127         d.addCallback(lambda data:
3128             self.failUnlessIn("modified", data))
3129         d.addCallback(lambda ignored:
3130             self.sdmf_node.modify(modifier))
3131         d.addCallback(lambda ignored:
3132             self.sdmf_node.download_best_version())
3133         d.addCallback(lambda data:
3134             self.failUnlessIn("modified", data))
3135         return d
3136
3137
3138     def test_version_modify(self):
3139         # TODO: When we can publish multiple versions, alter this test
3140         # to modify a version other than the best usable version, then
3141         # test to see that the best recoverable version is that.
3142         def modifier(old_contents, servermap, first_time):
3143             return old_contents + "modified"
3144         d = self.mdmf_node.modify(modifier)
3145         d.addCallback(lambda ignored:
3146             self.mdmf_node.download_best_version())
3147         d.addCallback(lambda data:
3148             self.failUnlessIn("modified", data))
3149         d.addCallback(lambda ignored:
3150             self.sdmf_node.modify(modifier))
3151         d.addCallback(lambda ignored:
3152             self.sdmf_node.download_best_version())
3153         d.addCallback(lambda data:
3154             self.failUnlessIn("modified", data))
3155         return d
3156
3157
3158     def test_download_version(self):
3159         d = self.publish_multiple()
3160         # We want to have two recoverable versions on the grid.
3161         d.addCallback(lambda res:
3162                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3163                                           1:1,3:1,5:1,7:1,9:1}))
3164         # Now try to download each version. We should get the plaintext
3165         # associated with that version.
3166         d.addCallback(lambda ignored:
3167             self._fn.get_servermap(mode=MODE_READ))
3168         def _got_servermap(smap):
3169             versions = smap.recoverable_versions()
3170             assert len(versions) == 2
3171
3172             self.servermap = smap
3173             self.version1, self.version2 = versions
3174             assert self.version1 != self.version2
3175
3176             self.version1_seqnum = self.version1[0]
3177             self.version2_seqnum = self.version2[0]
3178             self.version1_index = self.version1_seqnum - 1
3179             self.version2_index = self.version2_seqnum - 1
3180
3181         d.addCallback(_got_servermap)
3182         d.addCallback(lambda ignored:
3183             self._fn.download_version(self.servermap, self.version1))
3184         d.addCallback(lambda results:
3185             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3186                                  results))
3187         d.addCallback(lambda ignored:
3188             self._fn.download_version(self.servermap, self.version2))
3189         d.addCallback(lambda results:
3190             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3191                                  results))
3192         return d
3193
3194
3195     def test_download_nonexistent_version(self):
3196         d = self.mdmf_node.get_servermap(mode=MODE_WRITE)
3197         def _set_servermap(servermap):
3198             self.servermap = servermap
3199         d.addCallback(_set_servermap)
3200         d.addCallback(lambda ignored:
3201            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3202                            None,
3203                            self.mdmf_node.download_version, self.servermap,
3204                            "not a version"))
3205         return d
3206
3207
3208     def test_partial_read(self):
3209         # read only a few bytes at a time, and see that the results are
3210         # what we expect.
3211         d = self.mdmf_node.get_best_readable_version()
3212         def _read_data(version):
3213             c = consumer.MemoryConsumer()
3214             d2 = defer.succeed(None)
3215             for i in xrange(0, len(self.data), 10000):
3216                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3217             d2.addCallback(lambda ignored:
3218                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3219             return d2
3220         d.addCallback(_read_data)
3221         return d
3222
3223
3224     def test_read(self):
3225         d = self.mdmf_node.get_best_readable_version()
3226         def _read_data(version):
3227             c = consumer.MemoryConsumer()
3228             d2 = defer.succeed(None)
3229             d2.addCallback(lambda ignored: version.read(c))
3230             d2.addCallback(lambda ignored:
3231                 self.failUnlessEqual("".join(c.chunks), self.data))
3232             return d2
3233         d.addCallback(_read_data)
3234         return d
3235
3236
3237     def test_download_best_version(self):
3238         d = self.mdmf_node.download_best_version()
3239         d.addCallback(lambda data:
3240             self.failUnlessEqual(data, self.data))
3241         d.addCallback(lambda ignored:
3242             self.sdmf_node.download_best_version())
3243         d.addCallback(lambda data:
3244             self.failUnlessEqual(data, self.small_data))
3245         return d
3246
3247
3248 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3249     def setUp(self):
3250         GridTestMixin.setUp(self)
3251         self.basedir = self.mktemp()
3252         self.set_up_grid()
3253         self.c = self.g.clients[0]
3254         self.nm = self.c.nodemaker
3255         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3256         self.small_data = "test data" * 10 # about 90 B; SDMF
3257         return self.do_upload()
3258
3259
3260     def do_upload(self):
3261         d1 = self.nm.create_mutable_file(MutableData(self.data),
3262                                          version=MDMF_VERSION)
3263         d2 = self.nm.create_mutable_file(MutableData(self.small_data))
3264         dl = gatherResults([d1, d2])
3265         def _then((n1, n2)):
3266             assert isinstance(n1, MutableFileNode)
3267             assert isinstance(n2, MutableFileNode)
3268
3269             self.mdmf_node = n1
3270             self.sdmf_node = n2
3271         dl.addCallback(_then)
3272         # Make SDMF and MDMF mutable file nodes that have 255 shares.
3273         def _make_max_shares(ign):
3274             self.nm.default_encoding_parameters['n'] = 255
3275             self.nm.default_encoding_parameters['k'] = 127
3276             d1 = self.nm.create_mutable_file(MutableData(self.data),
3277                                              version=MDMF_VERSION)
3278             d2 = \
3279                 self.nm.create_mutable_file(MutableData(self.small_data))
3280             return gatherResults([d1, d2])
3281         dl.addCallback(_make_max_shares)
3282         def _stash((n1, n2)):
3283             assert isinstance(n1, MutableFileNode)
3284             assert isinstance(n2, MutableFileNode)
3285
3286             self.mdmf_max_shares_node = n1
3287             self.sdmf_max_shares_node = n2
3288         dl.addCallback(_stash)
3289         return dl
3290
3291     def test_append(self):
3292         # We should be able to append data to the middle of a mutable
3293         # file and get what we expect.
3294         new_data = self.data + "appended"
3295         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3296             d = node.get_best_mutable_version()
3297             d.addCallback(lambda mv:
3298                 mv.update(MutableData("appended"), len(self.data)))
3299             d.addCallback(lambda ignored, node=node:
3300                 node.download_best_version())
3301             d.addCallback(lambda results:
3302                 self.failUnlessEqual(results, new_data))
3303         return d
3304
3305     def test_replace(self):
3306         # We should be able to replace data in the middle of a mutable
3307         # file and get what we expect back. 
3308         new_data = self.data[:100]
3309         new_data += "appended"
3310         new_data += self.data[108:]
3311         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3312             d = node.get_best_mutable_version()
3313             d.addCallback(lambda mv:
3314                 mv.update(MutableData("appended"), 100))
3315             d.addCallback(lambda ignored, node=node:
3316                 node.download_best_version())
3317             d.addCallback(lambda results:
3318                 self.failUnlessEqual(results, new_data))
3319         return d
3320
3321     def test_replace_beginning(self):
3322         # We should be able to replace data at the beginning of the file
3323         # without truncating the file
3324         B = "beginning"
3325         new_data = B + self.data[len(B):]
3326         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3327             d = node.get_best_mutable_version()
3328             d.addCallback(lambda mv: mv.update(MutableData(B), 0))
3329             d.addCallback(lambda ignored, node=node:
3330                 node.download_best_version())
3331             d.addCallback(lambda results: self.failUnlessEqual(results, new_data))
3332         return d
3333
3334     def test_replace_segstart1(self):
3335         offset = 128*1024+1
3336         new_data = "NNNN"
3337         expected = self.data[:offset]+new_data+self.data[offset+4:]
3338         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3339             d = node.get_best_mutable_version()
3340             d.addCallback(lambda mv:
3341                 mv.update(MutableData(new_data), offset))
3342             # close around node.
3343             d.addCallback(lambda ignored, node=node:
3344                 node.download_best_version())
3345             def _check(results):
3346                 if results != expected:
3347                     print
3348                     print "got: %s ... %s" % (results[:20], results[-20:])
3349                     print "exp: %s ... %s" % (expected[:20], expected[-20:])
3350                     self.fail("results != expected")
3351             d.addCallback(_check)
3352         return d
3353
3354     def _check_differences(self, got, expected):
3355         # displaying arbitrary file corruption is tricky for a
3356         # 1MB file of repeating data,, so look for likely places
3357         # with problems and display them separately
3358         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3359         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3360         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3361                     for (start,end) in gotmods]
3362         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3363                     for (start,end) in expmods]
3364         #print "expecting: %s" % expspans
3365
3366         SEGSIZE = 128*1024
3367         if got != expected:
3368             print "differences:"
3369             for segnum in range(len(expected)//SEGSIZE):
3370                 start = segnum * SEGSIZE
3371                 end = (segnum+1) * SEGSIZE
3372                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3373                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3374                 if got_ends != exp_ends:
3375                     print "expected[%d]: %s" % (start, exp_ends)
3376                     print "got     [%d]: %s" % (start, got_ends)
3377             if expspans != gotspans:
3378                 print "expected: %s" % expspans
3379                 print "got     : %s" % gotspans
3380             open("EXPECTED","wb").write(expected)
3381             open("GOT","wb").write(got)
3382             print "wrote data to EXPECTED and GOT"
3383             self.fail("didn't get expected data")
3384
3385
3386     def test_replace_locations(self):
3387         # exercise fencepost conditions
3388         expected = self.data
3389         SEGSIZE = 128*1024
3390         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3391         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3392         d = defer.succeed(None)
3393         for offset in suspects:
3394             new_data = letters.next()*2 # "AA", then "BB", etc
3395             expected = expected[:offset]+new_data+expected[offset+2:]
3396             d.addCallback(lambda ign:
3397                           self.mdmf_node.get_best_mutable_version())
3398             def _modify(mv, offset=offset, new_data=new_data):
3399                 # close over 'offset','new_data'
3400                 md = MutableData(new_data)
3401                 return mv.update(md, offset)
3402             d.addCallback(_modify)
3403             d.addCallback(lambda ignored:
3404                           self.mdmf_node.download_best_version())
3405             d.addCallback(self._check_differences, expected)
3406         return d
3407
3408     def test_replace_locations_max_shares(self):
3409         # exercise fencepost conditions
3410         expected = self.data
3411         SEGSIZE = 128*1024
3412         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3413         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3414         d = defer.succeed(None)
3415         for offset in suspects:
3416             new_data = letters.next()*2 # "AA", then "BB", etc
3417             expected = expected[:offset]+new_data+expected[offset+2:]
3418             d.addCallback(lambda ign:
3419                           self.mdmf_max_shares_node.get_best_mutable_version())
3420             def _modify(mv, offset=offset, new_data=new_data):
3421                 # close over 'offset','new_data'
3422                 md = MutableData(new_data)
3423                 return mv.update(md, offset)
3424             d.addCallback(_modify)
3425             d.addCallback(lambda ignored:
3426                           self.mdmf_max_shares_node.download_best_version())
3427             d.addCallback(self._check_differences, expected)
3428         return d
3429
3430     def test_replace_and_extend(self):
3431         # We should be able to replace data in the middle of a mutable
3432         # file and extend that mutable file and get what we expect.
3433         new_data = self.data[:100]
3434         new_data += "modified " * 100000
3435         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3436             d = node.get_best_mutable_version()
3437             d.addCallback(lambda mv:
3438                 mv.update(MutableData("modified " * 100000), 100))
3439             d.addCallback(lambda ignored, node=node:
3440                 node.download_best_version())
3441             d.addCallback(lambda results:
3442                 self.failUnlessEqual(results, new_data))
3443         return d
3444
3445
3446     def test_append_power_of_two(self):
3447         # If we attempt to extend a mutable file so that its segment
3448         # count crosses a power-of-two boundary, the update operation
3449         # should know how to reencode the file.
3450
3451         # Note that the data populating self.mdmf_node is about 900 KiB
3452         # long -- this is 7 segments in the default segment size. So we
3453         # need to add 2 segments worth of data to push it over a
3454         # power-of-two boundary.
3455         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3456         new_data = self.data + (segment * 2)
3457         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3458             d = node.get_best_mutable_version()
3459             d.addCallback(lambda mv:
3460                 mv.update(MutableData(segment * 2), len(self.data)))
3461             d.addCallback(lambda ignored, node=node:
3462                 node.download_best_version())
3463             d.addCallback(lambda results:
3464                 self.failUnlessEqual(results, new_data))
3465         return d
3466     test_append_power_of_two.timeout = 15
3467
3468
3469     def test_update_sdmf(self):
3470         # Running update on a single-segment file should still work.
3471         new_data = self.small_data + "appended"
3472         for node in (self.sdmf_node, self.sdmf_max_shares_node):
3473             d = node.get_best_mutable_version()
3474             d.addCallback(lambda mv:
3475                 mv.update(MutableData("appended"), len(self.small_data)))
3476             d.addCallback(lambda ignored, node=node:
3477                 node.download_best_version())
3478             d.addCallback(lambda results:
3479                 self.failUnlessEqual(results, new_data))
3480         return d
3481
3482     def test_replace_in_last_segment(self):
3483         # The wrapper should know how to handle the tail segment
3484         # appropriately.
3485         replace_offset = len(self.data) - 100
3486         new_data = self.data[:replace_offset] + "replaced"
3487         rest_offset = replace_offset + len("replaced")
3488         new_data += self.data[rest_offset:]
3489         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3490             d = node.get_best_mutable_version()
3491             d.addCallback(lambda mv:
3492                 mv.update(MutableData("replaced"), replace_offset))
3493             d.addCallback(lambda ignored, node=node:
3494                 node.download_best_version())
3495             d.addCallback(lambda results:
3496                 self.failUnlessEqual(results, new_data))
3497         return d
3498
3499
3500     def test_multiple_segment_replace(self):
3501         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3502         new_data = self.data[:replace_offset]
3503         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3504         new_data += 2 * new_segment
3505         new_data += "replaced"
3506         rest_offset = len(new_data)
3507         new_data += self.data[rest_offset:]
3508         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3509             d = node.get_best_mutable_version()
3510             d.addCallback(lambda mv:
3511                 mv.update(MutableData((2 * new_segment) + "replaced"),
3512                           replace_offset))
3513             d.addCallback(lambda ignored, node=node:
3514                 node.download_best_version())
3515             d.addCallback(lambda results:
3516                 self.failUnlessEqual(results, new_data))
3517         return d
3518
3519 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3520     sdmf_old_shares = {}
3521     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3522     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3523     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3524     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3525     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3526     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3527     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3528     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3529     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3530     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3531     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3532     sdmf_old_contents = "This is a test file.\n"
3533     def copy_sdmf_shares(self):
3534         # We'll basically be short-circuiting the upload process.
3535         servernums = self.g.servers_by_number.keys()
3536         assert len(servernums) == 10
3537
3538         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3539         # Get the storage index.
3540         cap = uri.from_string(self.sdmf_old_cap)
3541         si = cap.get_storage_index()
3542
3543         # Now execute each assignment by writing the storage.
3544         for (share, servernum) in assignments:
3545             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3546             storedir = self.get_serverdir(servernum)
3547             storage_path = os.path.join(storedir, "shares",
3548                                         storage_index_to_dir(si))
3549             fileutil.make_dirs(storage_path)
3550             fileutil.write(os.path.join(storage_path, "%d" % share),
3551                            sharedata)
3552         # ...and verify that the shares are there.
3553         shares = self.find_uri_shares(self.sdmf_old_cap)
3554         assert len(shares) == 10
3555
3556     def test_new_downloader_can_read_old_shares(self):
3557         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3558         self.set_up_grid()
3559         self.copy_sdmf_shares()
3560         nm = self.g.clients[0].nodemaker
3561         n = nm.create_from_cap(self.sdmf_old_cap)
3562         d = n.download_best_version()
3563         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3564         return d