]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
e6eea1eebbe9918b593911e67330003eea1efb45
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12      ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28      NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31                                       MutableData, \
32                                       DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
36
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39
40
41 # this "FakeStorage" exists to put the share data in RAM and avoid using real
42 # network connections, both to speed up the tests and to reduce the amount of
43 # non-mutable.py code being exercised.
44
45 class FakeStorage:
46     # this class replaces the collection of storage servers, allowing the
47     # tests to examine and manipulate the published shares. It also lets us
48     # control the order in which read queries are answered, to exercise more
49     # of the error-handling code in Retrieve .
50     #
51     # Note that we ignore the storage index: this FakeStorage instance can
52     # only be used for a single storage index.
53
54
55     def __init__(self):
56         self._peers = {}
57         # _sequence is used to cause the responses to occur in a specific
58         # order. If it is in use, then we will defer queries instead of
59         # answering them right away, accumulating the Deferreds in a dict. We
60         # don't know exactly how many queries we'll get, so exactly one
61         # second after the first query arrives, we will release them all (in
62         # order).
63         self._sequence = None
64         self._pending = {}
65         self._pending_timer = None
66
67     def read(self, peerid, storage_index):
68         shares = self._peers.get(peerid, {})
69         if self._sequence is None:
70             return defer.succeed(shares)
71         d = defer.Deferred()
72         if not self._pending:
73             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
74         self._pending[peerid] = (d, shares)
75         return d
76
77     def _fire_readers(self):
78         self._pending_timer = None
79         pending = self._pending
80         self._pending = {}
81         for peerid in self._sequence:
82             if peerid in pending:
83                 d, shares = pending.pop(peerid)
84                 eventually(d.callback, shares)
85         for (d, shares) in pending.values():
86             eventually(d.callback, shares)
87
88     def write(self, peerid, storage_index, shnum, offset, data):
89         if peerid not in self._peers:
90             self._peers[peerid] = {}
91         shares = self._peers[peerid]
92         f = StringIO()
93         f.write(shares.get(shnum, ""))
94         f.seek(offset)
95         f.write(data)
96         shares[shnum] = f.getvalue()
97
98
99 class FakeStorageServer:
100     def __init__(self, peerid, storage):
101         self.peerid = peerid
102         self.storage = storage
103         self.queries = 0
104     def callRemote(self, methname, *args, **kwargs):
105         self.queries += 1
106         def _call():
107             meth = getattr(self, methname)
108             return meth(*args, **kwargs)
109         d = fireEventually()
110         d.addCallback(lambda res: _call())
111         return d
112
113     def callRemoteOnly(self, methname, *args, **kwargs):
114         self.queries += 1
115         d = self.callRemote(methname, *args, **kwargs)
116         d.addBoth(lambda ignore: None)
117         pass
118
119     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
120         pass
121
122     def slot_readv(self, storage_index, shnums, readv):
123         d = self.storage.read(self.peerid, storage_index)
124         def _read(shares):
125             response = {}
126             for shnum in shares:
127                 if shnums and shnum not in shnums:
128                     continue
129                 vector = response[shnum] = []
130                 for (offset, length) in readv:
131                     assert isinstance(offset, (int, long)), offset
132                     assert isinstance(length, (int, long)), length
133                     vector.append(shares[shnum][offset:offset+length])
134             return response
135         d.addCallback(_read)
136         return d
137
138     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
139                                         tw_vectors, read_vector):
140         # always-pass: parrot the test vectors back to them.
141         readv = {}
142         for shnum, (testv, writev, new_length) in tw_vectors.items():
143             for (offset, length, op, specimen) in testv:
144                 assert op in ("le", "eq", "ge")
145             # TODO: this isn't right, the read is controlled by read_vector,
146             # not by testv
147             readv[shnum] = [ specimen
148                              for (offset, length, op, specimen)
149                              in testv ]
150             for (offset, data) in writev:
151                 self.storage.write(self.peerid, storage_index, shnum,
152                                    offset, data)
153         answer = (True, readv)
154         return fireEventually(answer)
155
156
157 def flip_bit(original, byte_offset):
158     return (original[:byte_offset] +
159             chr(ord(original[byte_offset]) ^ 0x01) +
160             original[byte_offset+1:])
161
162 def add_two(original, byte_offset):
163     # It isn't enough to simply flip the bit for the version number,
164     # because 1 is a valid version number. So we add two instead.
165     return (original[:byte_offset] +
166             chr(ord(original[byte_offset]) ^ 0x02) +
167             original[byte_offset+1:])
168
169 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
170     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
171     # list of shnums to corrupt.
172     ds = []
173     for peerid in s._peers:
174         shares = s._peers[peerid]
175         for shnum in shares:
176             if (shnums_to_corrupt is not None
177                 and shnum not in shnums_to_corrupt):
178                 continue
179             data = shares[shnum]
180             # We're feeding the reader all of the share data, so it
181             # won't need to use the rref that we didn't provide, nor the
182             # storage index that we didn't provide. We do this because
183             # the reader will work for both MDMF and SDMF.
184             reader = MDMFSlotReadProxy(None, None, shnum, data)
185             # We need to get the offsets for the next part.
186             d = reader.get_verinfo()
187             def _do_corruption(verinfo, data, shnum):
188                 (seqnum,
189                  root_hash,
190                  IV,
191                  segsize,
192                  datalen,
193                  k, n, prefix, o) = verinfo
194                 if isinstance(offset, tuple):
195                     offset1, offset2 = offset
196                 else:
197                     offset1 = offset
198                     offset2 = 0
199                 if offset1 == "pubkey" and IV:
200                     real_offset = 107
201                 elif offset1 in o:
202                     real_offset = o[offset1]
203                 else:
204                     real_offset = offset1
205                 real_offset = int(real_offset) + offset2 + offset_offset
206                 assert isinstance(real_offset, int), offset
207                 if offset1 == 0: # verbyte
208                     f = add_two
209                 else:
210                     f = flip_bit
211                 shares[shnum] = f(data, real_offset)
212             d.addCallback(_do_corruption, data, shnum)
213             ds.append(d)
214     dl = defer.DeferredList(ds)
215     dl.addCallback(lambda ignored: res)
216     return dl
217
218 def make_storagebroker(s=None, num_peers=10):
219     if not s:
220         s = FakeStorage()
221     peerids = [tagged_hash("peerid", "%d" % i)[:20]
222                for i in range(num_peers)]
223     storage_broker = StorageFarmBroker(None, True)
224     for peerid in peerids:
225         fss = FakeStorageServer(peerid, s)
226         storage_broker.test_add_rref(peerid, fss)
227     return storage_broker
228
229 def make_nodemaker(s=None, num_peers=10):
230     storage_broker = make_storagebroker(s, num_peers)
231     sh = client.SecretHolder("lease secret", "convergence secret")
232     keygen = client.KeyGenerator()
233     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
234     nodemaker = NodeMaker(storage_broker, sh, None,
235                           None, None,
236                           {"k": 3, "n": 10}, keygen)
237     return nodemaker
238
239 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
240     # this used to be in Publish, but we removed the limit. Some of
241     # these tests test whether the new code correctly allows files
242     # larger than the limit.
243     OLD_MAX_SEGMENT_SIZE = 3500000
244     def setUp(self):
245         self._storage = s = FakeStorage()
246         self.nodemaker = make_nodemaker(s)
247
248     def test_create(self):
249         d = self.nodemaker.create_mutable_file()
250         def _created(n):
251             self.failUnless(isinstance(n, MutableFileNode))
252             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
253             sb = self.nodemaker.storage_broker
254             peer0 = sorted(sb.get_all_serverids())[0]
255             shnums = self._storage._peers[peer0].keys()
256             self.failUnlessEqual(len(shnums), 1)
257         d.addCallback(_created)
258         return d
259
260
261     def test_create_mdmf(self):
262         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
263         def _created(n):
264             self.failUnless(isinstance(n, MutableFileNode))
265             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
266             sb = self.nodemaker.storage_broker
267             peer0 = sorted(sb.get_all_serverids())[0]
268             shnums = self._storage._peers[peer0].keys()
269             self.failUnlessEqual(len(shnums), 1)
270         d.addCallback(_created)
271         return d
272
273     def test_single_share(self):
274         # Make sure that we tolerate publishing a single share.
275         self.nodemaker.default_encoding_parameters['k'] = 1
276         self.nodemaker.default_encoding_parameters['happy'] = 1
277         self.nodemaker.default_encoding_parameters['n'] = 1
278         d = defer.succeed(None)
279         for v in (SDMF_VERSION, MDMF_VERSION):
280             d.addCallback(lambda ignored:
281                 self.nodemaker.create_mutable_file(version=v))
282             def _created(n):
283                 self.failUnless(isinstance(n, MutableFileNode))
284                 self._node = n
285                 return n
286             d.addCallback(_created)
287             d.addCallback(lambda n:
288                 n.overwrite(MutableData("Contents" * 50000)))
289             d.addCallback(lambda ignored:
290                 self._node.download_best_version())
291             d.addCallback(lambda contents:
292                 self.failUnlessEqual(contents, "Contents" * 50000))
293         return d
294
295     def test_max_shares(self):
296         self.nodemaker.default_encoding_parameters['n'] = 255
297         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
298         def _created(n):
299             self.failUnless(isinstance(n, MutableFileNode))
300             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
301             sb = self.nodemaker.storage_broker
302             num_shares = sum([len(self._storage._peers[x].keys()) for x \
303                               in sb.get_all_serverids()])
304             self.failUnlessEqual(num_shares, 255)
305             self._node = n
306             return n
307         d.addCallback(_created)
308         # Now we upload some contents
309         d.addCallback(lambda n:
310             n.overwrite(MutableData("contents" * 50000)))
311         # ...then download contents
312         d.addCallback(lambda ignored:
313             self._node.download_best_version())
314         # ...and check to make sure everything went okay.
315         d.addCallback(lambda contents:
316             self.failUnlessEqual("contents" * 50000, contents))
317         return d
318
319     def test_max_shares_mdmf(self):
320         # Test how files behave when there are 255 shares.
321         self.nodemaker.default_encoding_parameters['n'] = 255
322         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
323         def _created(n):
324             self.failUnless(isinstance(n, MutableFileNode))
325             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
326             sb = self.nodemaker.storage_broker
327             num_shares = sum([len(self._storage._peers[x].keys()) for x \
328                               in sb.get_all_serverids()])
329             self.failUnlessEqual(num_shares, 255)
330             self._node = n
331             return n
332         d.addCallback(_created)
333         d.addCallback(lambda n:
334             n.overwrite(MutableData("contents" * 50000)))
335         d.addCallback(lambda ignored:
336             self._node.download_best_version())
337         d.addCallback(lambda contents:
338             self.failUnlessEqual(contents, "contents" * 50000))
339         return d
340
341     def test_mdmf_filenode_cap(self):
342         # Test that an MDMF filenode, once created, returns an MDMF URI.
343         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
344         def _created(n):
345             self.failUnless(isinstance(n, MutableFileNode))
346             cap = n.get_cap()
347             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
348             rcap = n.get_readcap()
349             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
350             vcap = n.get_verify_cap()
351             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
352         d.addCallback(_created)
353         return d
354
355
356     def test_create_from_mdmf_writecap(self):
357         # Test that the nodemaker is capable of creating an MDMF
358         # filenode given an MDMF cap.
359         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
360         def _created(n):
361             self.failUnless(isinstance(n, MutableFileNode))
362             s = n.get_uri()
363             self.failUnless(s.startswith("URI:MDMF"))
364             n2 = self.nodemaker.create_from_cap(s)
365             self.failUnless(isinstance(n2, MutableFileNode))
366             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
367             self.failUnlessEqual(n.get_uri(), n2.get_uri())
368         d.addCallback(_created)
369         return d
370
371
372     def test_create_from_mdmf_writecap_with_extensions(self):
373         # Test that the nodemaker is capable of creating an MDMF
374         # filenode when given a writecap with extension parameters in
375         # them.
376         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
377         def _created(n):
378             self.failUnless(isinstance(n, MutableFileNode))
379             s = n.get_uri()
380             # We need to cheat a little and delete the nodemaker's
381             # cache, otherwise we'll get the same node instance back.
382             self.failUnlessIn(":3:131073", s)
383             n2 = self.nodemaker.create_from_cap(s)
384
385             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
386             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
387             hints = n2._downloader_hints
388             self.failUnlessEqual(hints['k'], 3)
389             self.failUnlessEqual(hints['segsize'], 131073)
390         d.addCallback(_created)
391         return d
392
393
394     def test_create_from_mdmf_readcap(self):
395         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
396         def _created(n):
397             self.failUnless(isinstance(n, MutableFileNode))
398             s = n.get_readonly_uri()
399             n2 = self.nodemaker.create_from_cap(s)
400             self.failUnless(isinstance(n2, MutableFileNode))
401
402             # Check that it's a readonly node
403             self.failUnless(n2.is_readonly())
404         d.addCallback(_created)
405         return d
406
407
408     def test_create_from_mdmf_readcap_with_extensions(self):
409         # We should be able to create an MDMF filenode with the
410         # extension parameters without it breaking.
411         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
412         def _created(n):
413             self.failUnless(isinstance(n, MutableFileNode))
414             s = n.get_readonly_uri()
415             self.failUnlessIn(":3:131073", s)
416
417             n2 = self.nodemaker.create_from_cap(s)
418             self.failUnless(isinstance(n2, MutableFileNode))
419             self.failUnless(n2.is_readonly())
420             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
421             hints = n2._downloader_hints
422             self.failUnlessEqual(hints["k"], 3)
423             self.failUnlessEqual(hints["segsize"], 131073)
424         d.addCallback(_created)
425         return d
426
427
428     def test_internal_version_from_cap(self):
429         # MutableFileNodes and MutableFileVersions have an internal
430         # switch that tells them whether they're dealing with an SDMF or
431         # MDMF mutable file when they start doing stuff. We want to make
432         # sure that this is set appropriately given an MDMF cap.
433         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
434         def _created(n):
435             self.uri = n.get_uri()
436             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
437
438             n2 = self.nodemaker.create_from_cap(self.uri)
439             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
440         d.addCallback(_created)
441         return d
442
443
444     def test_serialize(self):
445         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
446         calls = []
447         def _callback(*args, **kwargs):
448             self.failUnlessEqual(args, (4,) )
449             self.failUnlessEqual(kwargs, {"foo": 5})
450             calls.append(1)
451             return 6
452         d = n._do_serialized(_callback, 4, foo=5)
453         def _check_callback(res):
454             self.failUnlessEqual(res, 6)
455             self.failUnlessEqual(calls, [1])
456         d.addCallback(_check_callback)
457
458         def _errback():
459             raise ValueError("heya")
460         d.addCallback(lambda res:
461                       self.shouldFail(ValueError, "_check_errback", "heya",
462                                       n._do_serialized, _errback))
463         return d
464
465     def test_upload_and_download(self):
466         d = self.nodemaker.create_mutable_file()
467         def _created(n):
468             d = defer.succeed(None)
469             d.addCallback(lambda res: n.get_servermap(MODE_READ))
470             d.addCallback(lambda smap: smap.dump(StringIO()))
471             d.addCallback(lambda sio:
472                           self.failUnless("3-of-10" in sio.getvalue()))
473             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
474             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
475             d.addCallback(lambda res: n.download_best_version())
476             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
477             d.addCallback(lambda res: n.get_size_of_best_version())
478             d.addCallback(lambda size:
479                           self.failUnlessEqual(size, len("contents 1")))
480             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
481             d.addCallback(lambda res: n.download_best_version())
482             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
483             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
484             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
485             d.addCallback(lambda res: n.download_best_version())
486             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
487             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
488             d.addCallback(lambda smap:
489                           n.download_version(smap,
490                                              smap.best_recoverable_version()))
491             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
492             # test a file that is large enough to overcome the
493             # mapupdate-to-retrieve data caching (i.e. make the shares larger
494             # than the default readsize, which is 2000 bytes). A 15kB file
495             # will have 5kB shares.
496             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
497             d.addCallback(lambda res: n.download_best_version())
498             d.addCallback(lambda res:
499                           self.failUnlessEqual(res, "large size file" * 1000))
500             return d
501         d.addCallback(_created)
502         return d
503
504
505     def test_upload_and_download_mdmf(self):
506         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
507         def _created(n):
508             d = defer.succeed(None)
509             d.addCallback(lambda ignored:
510                 n.get_servermap(MODE_READ))
511             def _then(servermap):
512                 dumped = servermap.dump(StringIO())
513                 self.failUnlessIn("3-of-10", dumped.getvalue())
514             d.addCallback(_then)
515             # Now overwrite the contents with some new contents. We want 
516             # to make them big enough to force the file to be uploaded
517             # in more than one segment.
518             big_contents = "contents1" * 100000 # about 900 KiB
519             big_contents_uploadable = MutableData(big_contents)
520             d.addCallback(lambda ignored:
521                 n.overwrite(big_contents_uploadable))
522             d.addCallback(lambda ignored:
523                 n.download_best_version())
524             d.addCallback(lambda data:
525                 self.failUnlessEqual(data, big_contents))
526             # Overwrite the contents again with some new contents. As
527             # before, they need to be big enough to force multiple
528             # segments, so that we make the downloader deal with
529             # multiple segments.
530             bigger_contents = "contents2" * 1000000 # about 9MiB 
531             bigger_contents_uploadable = MutableData(bigger_contents)
532             d.addCallback(lambda ignored:
533                 n.overwrite(bigger_contents_uploadable))
534             d.addCallback(lambda ignored:
535                 n.download_best_version())
536             d.addCallback(lambda data:
537                 self.failUnlessEqual(data, bigger_contents))
538             return d
539         d.addCallback(_created)
540         return d
541
542
543     def test_retrieve_pause(self):
544         # We should make sure that the retriever is able to pause
545         # correctly.
546         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
547         def _created(node):
548             self.node = node
549
550             return node.overwrite(MutableData("contents1" * 100000))
551         d.addCallback(_created)
552         # Now we'll retrieve it into a pausing consumer.
553         d.addCallback(lambda ignored:
554             self.node.get_best_mutable_version())
555         def _got_version(version):
556             self.c = PausingConsumer()
557             return version.read(self.c)
558         d.addCallback(_got_version)
559         d.addCallback(lambda ignored:
560             self.failUnlessEqual(self.c.data, "contents1" * 100000))
561         return d
562
563
564     def test_download_from_mdmf_cap(self):
565         # We should be able to download an MDMF file given its cap
566         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
567         def _created(node):
568             self.uri = node.get_uri()
569
570             return node.overwrite(MutableData("contents1" * 100000))
571         def _then(ignored):
572             node = self.nodemaker.create_from_cap(self.uri)
573             return node.download_best_version()
574         def _downloaded(data):
575             self.failUnlessEqual(data, "contents1" * 100000)
576         d.addCallback(_created)
577         d.addCallback(_then)
578         d.addCallback(_downloaded)
579         return d
580
581
582     def test_create_and_download_from_bare_mdmf_cap(self):
583         # MDMF caps have extension parameters on them by default. We
584         # need to make sure that they work without extension parameters.
585         contents = MutableData("contents" * 100000)
586         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
587                                                contents=contents)
588         def _created(node):
589             uri = node.get_uri()
590             self._created = node
591             self.failUnlessIn(":3:131073", uri)
592             # Now strip that off the end of the uri, then try creating
593             # and downloading the node again.
594             bare_uri = uri.replace(":3:131073", "")
595             assert ":3:131073" not in bare_uri
596
597             return self.nodemaker.create_from_cap(bare_uri)
598         d.addCallback(_created)
599         def _created_bare(node):
600             self.failUnlessEqual(node.get_writekey(),
601                                  self._created.get_writekey())
602             self.failUnlessEqual(node.get_readkey(),
603                                  self._created.get_readkey())
604             self.failUnlessEqual(node.get_storage_index(),
605                                  self._created.get_storage_index())
606             return node.download_best_version()
607         d.addCallback(_created_bare)
608         d.addCallback(lambda data:
609             self.failUnlessEqual(data, "contents" * 100000))
610         return d
611
612
613     def test_mdmf_write_count(self):
614         # Publishing an MDMF file should only cause one write for each
615         # share that is to be published. Otherwise, we introduce
616         # undesirable semantics that are a regression from SDMF
617         upload = MutableData("MDMF" * 100000) # about 400 KiB
618         d = self.nodemaker.create_mutable_file(upload,
619                                                version=MDMF_VERSION)
620         def _check_server_write_counts(ignored):
621             sb = self.nodemaker.storage_broker
622             for server in sb.servers.itervalues():
623                 self.failUnlessEqual(server.get_rref().queries, 1)
624         d.addCallback(_check_server_write_counts)
625         return d
626
627
628     def test_create_with_initial_contents(self):
629         upload1 = MutableData("contents 1")
630         d = self.nodemaker.create_mutable_file(upload1)
631         def _created(n):
632             d = n.download_best_version()
633             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
634             upload2 = MutableData("contents 2")
635             d.addCallback(lambda res: n.overwrite(upload2))
636             d.addCallback(lambda res: n.download_best_version())
637             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
638             return d
639         d.addCallback(_created)
640         return d
641
642
643     def test_create_mdmf_with_initial_contents(self):
644         initial_contents = "foobarbaz" * 131072 # 900KiB
645         initial_contents_uploadable = MutableData(initial_contents)
646         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
647                                                version=MDMF_VERSION)
648         def _created(n):
649             d = n.download_best_version()
650             d.addCallback(lambda data:
651                 self.failUnlessEqual(data, initial_contents))
652             uploadable2 = MutableData(initial_contents + "foobarbaz")
653             d.addCallback(lambda ignored:
654                 n.overwrite(uploadable2))
655             d.addCallback(lambda ignored:
656                 n.download_best_version())
657             d.addCallback(lambda data:
658                 self.failUnlessEqual(data, initial_contents +
659                                            "foobarbaz"))
660             return d
661         d.addCallback(_created)
662         return d
663
664
665     def test_response_cache_memory_leak(self):
666         d = self.nodemaker.create_mutable_file("contents")
667         def _created(n):
668             d = n.download_best_version()
669             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
670             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
671
672             def _check_cache(expected):
673                 # The total size of cache entries should not increase on the second download;
674                 # in fact the cache contents should be identical.
675                 d2 = n.download_best_version()
676                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
677                 return d2
678             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
679             return d
680         d.addCallback(_created)
681         return d
682
683     def test_create_with_initial_contents_function(self):
684         data = "initial contents"
685         def _make_contents(n):
686             self.failUnless(isinstance(n, MutableFileNode))
687             key = n.get_writekey()
688             self.failUnless(isinstance(key, str), key)
689             self.failUnlessEqual(len(key), 16) # AES key size
690             return MutableData(data)
691         d = self.nodemaker.create_mutable_file(_make_contents)
692         def _created(n):
693             return n.download_best_version()
694         d.addCallback(_created)
695         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
696         return d
697
698
699     def test_create_mdmf_with_initial_contents_function(self):
700         data = "initial contents" * 100000
701         def _make_contents(n):
702             self.failUnless(isinstance(n, MutableFileNode))
703             key = n.get_writekey()
704             self.failUnless(isinstance(key, str), key)
705             self.failUnlessEqual(len(key), 16)
706             return MutableData(data)
707         d = self.nodemaker.create_mutable_file(_make_contents,
708                                                version=MDMF_VERSION)
709         d.addCallback(lambda n:
710             n.download_best_version())
711         d.addCallback(lambda data2:
712             self.failUnlessEqual(data2, data))
713         return d
714
715
716     def test_create_with_too_large_contents(self):
717         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
718         BIG_uploadable = MutableData(BIG)
719         d = self.nodemaker.create_mutable_file(BIG_uploadable)
720         def _created(n):
721             other_BIG_uploadable = MutableData(BIG)
722             d = n.overwrite(other_BIG_uploadable)
723             return d
724         d.addCallback(_created)
725         return d
726
727     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
728         d = n.get_servermap(MODE_READ)
729         d.addCallback(lambda servermap: servermap.best_recoverable_version())
730         d.addCallback(lambda verinfo:
731                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
732         return d
733
734     def test_modify(self):
735         def _modifier(old_contents, servermap, first_time):
736             new_contents = old_contents + "line2"
737             return new_contents
738         def _non_modifier(old_contents, servermap, first_time):
739             return old_contents
740         def _none_modifier(old_contents, servermap, first_time):
741             return None
742         def _error_modifier(old_contents, servermap, first_time):
743             raise ValueError("oops")
744         def _toobig_modifier(old_contents, servermap, first_time):
745             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
746             return new_content
747         calls = []
748         def _ucw_error_modifier(old_contents, servermap, first_time):
749             # simulate an UncoordinatedWriteError once
750             calls.append(1)
751             if len(calls) <= 1:
752                 raise UncoordinatedWriteError("simulated")
753             new_contents = old_contents + "line3"
754             return new_contents
755         def _ucw_error_non_modifier(old_contents, servermap, first_time):
756             # simulate an UncoordinatedWriteError once, and don't actually
757             # modify the contents on subsequent invocations
758             calls.append(1)
759             if len(calls) <= 1:
760                 raise UncoordinatedWriteError("simulated")
761             return old_contents
762
763         initial_contents = "line1"
764         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
765         def _created(n):
766             d = n.modify(_modifier)
767             d.addCallback(lambda res: n.download_best_version())
768             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
769             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
770
771             d.addCallback(lambda res: n.modify(_non_modifier))
772             d.addCallback(lambda res: n.download_best_version())
773             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
774             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
775
776             d.addCallback(lambda res: n.modify(_none_modifier))
777             d.addCallback(lambda res: n.download_best_version())
778             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
779             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
780
781             d.addCallback(lambda res:
782                           self.shouldFail(ValueError, "error_modifier", None,
783                                           n.modify, _error_modifier))
784             d.addCallback(lambda res: n.download_best_version())
785             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
786             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
787
788
789             d.addCallback(lambda res: n.download_best_version())
790             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
791             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
792
793             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
794             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
795             d.addCallback(lambda res: n.download_best_version())
796             d.addCallback(lambda res: self.failUnlessEqual(res,
797                                                            "line1line2line3"))
798             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
799
800             def _reset_ucw_error_modifier(res):
801                 calls[:] = []
802                 return res
803             d.addCallback(_reset_ucw_error_modifier)
804
805             # in practice, this n.modify call should publish twice: the first
806             # one gets a UCWE, the second does not. But our test jig (in
807             # which the modifier raises the UCWE) skips over the first one,
808             # so in this test there will be only one publish, and the seqnum
809             # will only be one larger than the previous test, not two (i.e. 4
810             # instead of 5).
811             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
812             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
813             d.addCallback(lambda res: n.download_best_version())
814             d.addCallback(lambda res: self.failUnlessEqual(res,
815                                                            "line1line2line3"))
816             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
817             d.addCallback(lambda res: n.modify(_toobig_modifier))
818             return d
819         d.addCallback(_created)
820         return d
821
822
823     def test_modify_backoffer(self):
824         def _modifier(old_contents, servermap, first_time):
825             return old_contents + "line2"
826         calls = []
827         def _ucw_error_modifier(old_contents, servermap, first_time):
828             # simulate an UncoordinatedWriteError once
829             calls.append(1)
830             if len(calls) <= 1:
831                 raise UncoordinatedWriteError("simulated")
832             return old_contents + "line3"
833         def _always_ucw_error_modifier(old_contents, servermap, first_time):
834             raise UncoordinatedWriteError("simulated")
835         def _backoff_stopper(node, f):
836             return f
837         def _backoff_pauser(node, f):
838             d = defer.Deferred()
839             reactor.callLater(0.5, d.callback, None)
840             return d
841
842         # the give-up-er will hit its maximum retry count quickly
843         giveuper = BackoffAgent()
844         giveuper._delay = 0.1
845         giveuper.factor = 1
846
847         d = self.nodemaker.create_mutable_file(MutableData("line1"))
848         def _created(n):
849             d = n.modify(_modifier)
850             d.addCallback(lambda res: n.download_best_version())
851             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
852             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
853
854             d.addCallback(lambda res:
855                           self.shouldFail(UncoordinatedWriteError,
856                                           "_backoff_stopper", None,
857                                           n.modify, _ucw_error_modifier,
858                                           _backoff_stopper))
859             d.addCallback(lambda res: n.download_best_version())
860             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
861             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
862
863             def _reset_ucw_error_modifier(res):
864                 calls[:] = []
865                 return res
866             d.addCallback(_reset_ucw_error_modifier)
867             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
868                                                _backoff_pauser))
869             d.addCallback(lambda res: n.download_best_version())
870             d.addCallback(lambda res: self.failUnlessEqual(res,
871                                                            "line1line2line3"))
872             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
873
874             d.addCallback(lambda res:
875                           self.shouldFail(UncoordinatedWriteError,
876                                           "giveuper", None,
877                                           n.modify, _always_ucw_error_modifier,
878                                           giveuper.delay))
879             d.addCallback(lambda res: n.download_best_version())
880             d.addCallback(lambda res: self.failUnlessEqual(res,
881                                                            "line1line2line3"))
882             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
883
884             return d
885         d.addCallback(_created)
886         return d
887
888     def test_upload_and_download_full_size_keys(self):
889         self.nodemaker.key_generator = client.KeyGenerator()
890         d = self.nodemaker.create_mutable_file()
891         def _created(n):
892             d = defer.succeed(None)
893             d.addCallback(lambda res: n.get_servermap(MODE_READ))
894             d.addCallback(lambda smap: smap.dump(StringIO()))
895             d.addCallback(lambda sio:
896                           self.failUnless("3-of-10" in sio.getvalue()))
897             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
898             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
899             d.addCallback(lambda res: n.download_best_version())
900             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
901             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
902             d.addCallback(lambda res: n.download_best_version())
903             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
904             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
905             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
906             d.addCallback(lambda res: n.download_best_version())
907             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
908             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
909             d.addCallback(lambda smap:
910                           n.download_version(smap,
911                                              smap.best_recoverable_version()))
912             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
913             return d
914         d.addCallback(_created)
915         return d
916
917
918     def test_size_after_servermap_update(self):
919         # a mutable file node should have something to say about how big
920         # it is after a servermap update is performed, since this tells
921         # us how large the best version of that mutable file is.
922         d = self.nodemaker.create_mutable_file()
923         def _created(n):
924             self.n = n
925             return n.get_servermap(MODE_READ)
926         d.addCallback(_created)
927         d.addCallback(lambda ignored:
928             self.failUnlessEqual(self.n.get_size(), 0))
929         d.addCallback(lambda ignored:
930             self.n.overwrite(MutableData("foobarbaz")))
931         d.addCallback(lambda ignored:
932             self.failUnlessEqual(self.n.get_size(), 9))
933         d.addCallback(lambda ignored:
934             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
935         d.addCallback(_created)
936         d.addCallback(lambda ignored:
937             self.failUnlessEqual(self.n.get_size(), 9))
938         return d
939
940
941 class PublishMixin:
942     def publish_one(self):
943         # publish a file and create shares, which can then be manipulated
944         # later.
945         self.CONTENTS = "New contents go here" * 1000
946         self.uploadable = MutableData(self.CONTENTS)
947         self._storage = FakeStorage()
948         self._nodemaker = make_nodemaker(self._storage)
949         self._storage_broker = self._nodemaker.storage_broker
950         d = self._nodemaker.create_mutable_file(self.uploadable)
951         def _created(node):
952             self._fn = node
953             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
954         d.addCallback(_created)
955         return d
956
957     def publish_mdmf(self):
958         # like publish_one, except that the result is guaranteed to be
959         # an MDMF file.
960         # self.CONTENTS should have more than one segment.
961         self.CONTENTS = "This is an MDMF file" * 100000
962         self.uploadable = MutableData(self.CONTENTS)
963         self._storage = FakeStorage()
964         self._nodemaker = make_nodemaker(self._storage)
965         self._storage_broker = self._nodemaker.storage_broker
966         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
967         def _created(node):
968             self._fn = node
969             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
970         d.addCallback(_created)
971         return d
972
973
974     def publish_sdmf(self):
975         # like publish_one, except that the result is guaranteed to be
976         # an SDMF file
977         self.CONTENTS = "This is an SDMF file" * 1000
978         self.uploadable = MutableData(self.CONTENTS)
979         self._storage = FakeStorage()
980         self._nodemaker = make_nodemaker(self._storage)
981         self._storage_broker = self._nodemaker.storage_broker
982         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
983         def _created(node):
984             self._fn = node
985             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
986         d.addCallback(_created)
987         return d
988
989
990     def publish_multiple(self, version=0):
991         self.CONTENTS = ["Contents 0",
992                          "Contents 1",
993                          "Contents 2",
994                          "Contents 3a",
995                          "Contents 3b"]
996         self.uploadables = [MutableData(d) for d in self.CONTENTS]
997         self._copied_shares = {}
998         self._storage = FakeStorage()
999         self._nodemaker = make_nodemaker(self._storage)
1000         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1001         def _created(node):
1002             self._fn = node
1003             # now create multiple versions of the same file, and accumulate
1004             # their shares, so we can mix and match them later.
1005             d = defer.succeed(None)
1006             d.addCallback(self._copy_shares, 0)
1007             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1008             d.addCallback(self._copy_shares, 1)
1009             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1010             d.addCallback(self._copy_shares, 2)
1011             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1012             d.addCallback(self._copy_shares, 3)
1013             # now we replace all the shares with version s3, and upload a new
1014             # version to get s4b.
1015             rollback = dict([(i,2) for i in range(10)])
1016             d.addCallback(lambda res: self._set_versions(rollback))
1017             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1018             d.addCallback(self._copy_shares, 4)
1019             # we leave the storage in state 4
1020             return d
1021         d.addCallback(_created)
1022         return d
1023
1024
1025     def _copy_shares(self, ignored, index):
1026         shares = self._storage._peers
1027         # we need a deep copy
1028         new_shares = {}
1029         for peerid in shares:
1030             new_shares[peerid] = {}
1031             for shnum in shares[peerid]:
1032                 new_shares[peerid][shnum] = shares[peerid][shnum]
1033         self._copied_shares[index] = new_shares
1034
1035     def _set_versions(self, versionmap):
1036         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1037         # share to be at. Any shnum which is left out of the map will stay at
1038         # its current version.
1039         shares = self._storage._peers
1040         oldshares = self._copied_shares
1041         for peerid in shares:
1042             for shnum in shares[peerid]:
1043                 if shnum in versionmap:
1044                     index = versionmap[shnum]
1045                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1046
1047 class PausingConsumer:
1048     implements(IConsumer)
1049     def __init__(self):
1050         self.data = ""
1051         self.already_paused = False
1052
1053     def registerProducer(self, producer, streaming):
1054         self.producer = producer
1055         self.producer.resumeProducing()
1056
1057     def unregisterProducer(self):
1058         self.producer = None
1059
1060     def _unpause(self, ignored):
1061         self.producer.resumeProducing()
1062
1063     def write(self, data):
1064         self.data += data
1065         if not self.already_paused:
1066            self.producer.pauseProducing()
1067            self.already_paused = True
1068            reactor.callLater(15, self._unpause, None)
1069
1070
1071 class Servermap(unittest.TestCase, PublishMixin):
1072     def setUp(self):
1073         return self.publish_one()
1074
1075     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1076                        update_range=None):
1077         if fn is None:
1078             fn = self._fn
1079         if sb is None:
1080             sb = self._storage_broker
1081         smu = ServermapUpdater(fn, sb, Monitor(),
1082                                ServerMap(), mode, update_range=update_range)
1083         d = smu.update()
1084         return d
1085
1086     def update_servermap(self, oldmap, mode=MODE_CHECK):
1087         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1088                                oldmap, mode)
1089         d = smu.update()
1090         return d
1091
1092     def failUnlessOneRecoverable(self, sm, num_shares):
1093         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1094         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1095         best = sm.best_recoverable_version()
1096         self.failIfEqual(best, None)
1097         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1098         self.failUnlessEqual(len(sm.shares_available()), 1)
1099         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1100         shnum, peerids = sm.make_sharemap().items()[0]
1101         peerid = list(peerids)[0]
1102         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1103         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1104         return sm
1105
1106     def test_basic(self):
1107         d = defer.succeed(None)
1108         ms = self.make_servermap
1109         us = self.update_servermap
1110
1111         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1112         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1113         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1114         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1115         d.addCallback(lambda res: ms(mode=MODE_READ))
1116         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1117         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1118         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1119         # this mode stops at 'k' shares
1120         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1121
1122         # and can we re-use the same servermap? Note that these are sorted in
1123         # increasing order of number of servers queried, since once a server
1124         # gets into the servermap, we'll always ask it for an update.
1125         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1126         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1127         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1128         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1129         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1130         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1131         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1132         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1133         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1134
1135         return d
1136
1137     def test_fetch_privkey(self):
1138         d = defer.succeed(None)
1139         # use the sibling filenode (which hasn't been used yet), and make
1140         # sure it can fetch the privkey. The file is small, so the privkey
1141         # will be fetched on the first (query) pass.
1142         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1143         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1144
1145         # create a new file, which is large enough to knock the privkey out
1146         # of the early part of the file
1147         LARGE = "These are Larger contents" * 200 # about 5KB
1148         LARGE_uploadable = MutableData(LARGE)
1149         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1150         def _created(large_fn):
1151             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1152             return self.make_servermap(MODE_WRITE, large_fn2)
1153         d.addCallback(_created)
1154         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1155         return d
1156
1157
1158     def test_mark_bad(self):
1159         d = defer.succeed(None)
1160         ms = self.make_servermap
1161
1162         d.addCallback(lambda res: ms(mode=MODE_READ))
1163         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1164         def _made_map(sm):
1165             v = sm.best_recoverable_version()
1166             vm = sm.make_versionmap()
1167             shares = list(vm[v])
1168             self.failUnlessEqual(len(shares), 6)
1169             self._corrupted = set()
1170             # mark the first 5 shares as corrupt, then update the servermap.
1171             # The map should not have the marked shares it in any more, and
1172             # new shares should be found to replace the missing ones.
1173             for (shnum, peerid, timestamp) in shares:
1174                 if shnum < 5:
1175                     self._corrupted.add( (peerid, shnum) )
1176                     sm.mark_bad_share(peerid, shnum, "")
1177             return self.update_servermap(sm, MODE_WRITE)
1178         d.addCallback(_made_map)
1179         def _check_map(sm):
1180             # this should find all 5 shares that weren't marked bad
1181             v = sm.best_recoverable_version()
1182             vm = sm.make_versionmap()
1183             shares = list(vm[v])
1184             for (peerid, shnum) in self._corrupted:
1185                 peer_shares = sm.shares_on_peer(peerid)
1186                 self.failIf(shnum in peer_shares,
1187                             "%d was in %s" % (shnum, peer_shares))
1188             self.failUnlessEqual(len(shares), 5)
1189         d.addCallback(_check_map)
1190         return d
1191
1192     def failUnlessNoneRecoverable(self, sm):
1193         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1194         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1195         best = sm.best_recoverable_version()
1196         self.failUnlessEqual(best, None)
1197         self.failUnlessEqual(len(sm.shares_available()), 0)
1198
1199     def test_no_shares(self):
1200         self._storage._peers = {} # delete all shares
1201         ms = self.make_servermap
1202         d = defer.succeed(None)
1203 #
1204         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1205         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1206
1207         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1208         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1209
1210         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1211         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1212
1213         d.addCallback(lambda res: ms(mode=MODE_READ))
1214         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1215
1216         return d
1217
1218     def failUnlessNotQuiteEnough(self, sm):
1219         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1220         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1221         best = sm.best_recoverable_version()
1222         self.failUnlessEqual(best, None)
1223         self.failUnlessEqual(len(sm.shares_available()), 1)
1224         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1225         return sm
1226
1227     def test_not_quite_enough_shares(self):
1228         s = self._storage
1229         ms = self.make_servermap
1230         num_shares = len(s._peers)
1231         for peerid in s._peers:
1232             s._peers[peerid] = {}
1233             num_shares -= 1
1234             if num_shares == 2:
1235                 break
1236         # now there ought to be only two shares left
1237         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1238
1239         d = defer.succeed(None)
1240
1241         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1242         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1243         d.addCallback(lambda sm:
1244                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1245         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1246         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1247         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1248         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1249         d.addCallback(lambda res: ms(mode=MODE_READ))
1250         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1251
1252         return d
1253
1254
1255     def test_servermapupdater_finds_mdmf_files(self):
1256         # setUp already published an MDMF file for us. We just need to
1257         # make sure that when we run the ServermapUpdater, the file is
1258         # reported to have one recoverable version.
1259         d = defer.succeed(None)
1260         d.addCallback(lambda ignored:
1261             self.publish_mdmf())
1262         d.addCallback(lambda ignored:
1263             self.make_servermap(mode=MODE_CHECK))
1264         # Calling make_servermap also updates the servermap in the mode
1265         # that we specify, so we just need to see what it says.
1266         def _check_servermap(sm):
1267             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1268         d.addCallback(_check_servermap)
1269         return d
1270
1271
1272     def test_fetch_update(self):
1273         d = defer.succeed(None)
1274         d.addCallback(lambda ignored:
1275             self.publish_mdmf())
1276         d.addCallback(lambda ignored:
1277             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1278         def _check_servermap(sm):
1279             # 10 shares
1280             self.failUnlessEqual(len(sm.update_data), 10)
1281             # one version
1282             for data in sm.update_data.itervalues():
1283                 self.failUnlessEqual(len(data), 1)
1284         d.addCallback(_check_servermap)
1285         return d
1286
1287
1288     def test_servermapupdater_finds_sdmf_files(self):
1289         d = defer.succeed(None)
1290         d.addCallback(lambda ignored:
1291             self.publish_sdmf())
1292         d.addCallback(lambda ignored:
1293             self.make_servermap(mode=MODE_CHECK))
1294         d.addCallback(lambda servermap:
1295             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1296         return d
1297
1298
1299 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1300     def setUp(self):
1301         return self.publish_one()
1302
1303     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1304         if oldmap is None:
1305             oldmap = ServerMap()
1306         if sb is None:
1307             sb = self._storage_broker
1308         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1309         d = smu.update()
1310         return d
1311
1312     def abbrev_verinfo(self, verinfo):
1313         if verinfo is None:
1314             return None
1315         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1316          offsets_tuple) = verinfo
1317         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1318
1319     def abbrev_verinfo_dict(self, verinfo_d):
1320         output = {}
1321         for verinfo,value in verinfo_d.items():
1322             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1323              offsets_tuple) = verinfo
1324             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1325         return output
1326
1327     def dump_servermap(self, servermap):
1328         print "SERVERMAP", servermap
1329         print "RECOVERABLE", [self.abbrev_verinfo(v)
1330                               for v in servermap.recoverable_versions()]
1331         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1332         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1333
1334     def do_download(self, servermap, version=None):
1335         if version is None:
1336             version = servermap.best_recoverable_version()
1337         r = Retrieve(self._fn, servermap, version)
1338         c = consumer.MemoryConsumer()
1339         d = r.download(consumer=c)
1340         d.addCallback(lambda mc: "".join(mc.chunks))
1341         return d
1342
1343
1344     def test_basic(self):
1345         d = self.make_servermap()
1346         def _do_retrieve(servermap):
1347             self._smap = servermap
1348             #self.dump_servermap(servermap)
1349             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1350             return self.do_download(servermap)
1351         d.addCallback(_do_retrieve)
1352         def _retrieved(new_contents):
1353             self.failUnlessEqual(new_contents, self.CONTENTS)
1354         d.addCallback(_retrieved)
1355         # we should be able to re-use the same servermap, both with and
1356         # without updating it.
1357         d.addCallback(lambda res: self.do_download(self._smap))
1358         d.addCallback(_retrieved)
1359         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1360         d.addCallback(lambda res: self.do_download(self._smap))
1361         d.addCallback(_retrieved)
1362         # clobbering the pubkey should make the servermap updater re-fetch it
1363         def _clobber_pubkey(res):
1364             self._fn._pubkey = None
1365         d.addCallback(_clobber_pubkey)
1366         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1367         d.addCallback(lambda res: self.do_download(self._smap))
1368         d.addCallback(_retrieved)
1369         return d
1370
1371     def test_all_shares_vanished(self):
1372         d = self.make_servermap()
1373         def _remove_shares(servermap):
1374             for shares in self._storage._peers.values():
1375                 shares.clear()
1376             d1 = self.shouldFail(NotEnoughSharesError,
1377                                  "test_all_shares_vanished",
1378                                  "ran out of peers",
1379                                  self.do_download, servermap)
1380             return d1
1381         d.addCallback(_remove_shares)
1382         return d
1383
1384     def test_no_servers(self):
1385         sb2 = make_storagebroker(num_peers=0)
1386         # if there are no servers, then a MODE_READ servermap should come
1387         # back empty
1388         d = self.make_servermap(sb=sb2)
1389         def _check_servermap(servermap):
1390             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1391             self.failIf(servermap.recoverable_versions())
1392             self.failIf(servermap.unrecoverable_versions())
1393             self.failIf(servermap.all_peers())
1394         d.addCallback(_check_servermap)
1395         return d
1396
1397     def test_no_servers_download(self):
1398         sb2 = make_storagebroker(num_peers=0)
1399         self._fn._storage_broker = sb2
1400         d = self.shouldFail(UnrecoverableFileError,
1401                             "test_no_servers_download",
1402                             "no recoverable versions",
1403                             self._fn.download_best_version)
1404         def _restore(res):
1405             # a failed download that occurs while we aren't connected to
1406             # anybody should not prevent a subsequent download from working.
1407             # This isn't quite the webapi-driven test that #463 wants, but it
1408             # should be close enough.
1409             self._fn._storage_broker = self._storage_broker
1410             return self._fn.download_best_version()
1411         def _retrieved(new_contents):
1412             self.failUnlessEqual(new_contents, self.CONTENTS)
1413         d.addCallback(_restore)
1414         d.addCallback(_retrieved)
1415         return d
1416
1417
1418     def _test_corrupt_all(self, offset, substring,
1419                           should_succeed=False,
1420                           corrupt_early=True,
1421                           failure_checker=None,
1422                           fetch_privkey=False):
1423         d = defer.succeed(None)
1424         if corrupt_early:
1425             d.addCallback(corrupt, self._storage, offset)
1426         d.addCallback(lambda res: self.make_servermap())
1427         if not corrupt_early:
1428             d.addCallback(corrupt, self._storage, offset)
1429         def _do_retrieve(servermap):
1430             ver = servermap.best_recoverable_version()
1431             if ver is None and not should_succeed:
1432                 # no recoverable versions == not succeeding. The problem
1433                 # should be noted in the servermap's list of problems.
1434                 if substring:
1435                     allproblems = [str(f) for f in servermap.problems]
1436                     self.failUnlessIn(substring, "".join(allproblems))
1437                 return servermap
1438             if should_succeed:
1439                 d1 = self._fn.download_version(servermap, ver,
1440                                                fetch_privkey)
1441                 d1.addCallback(lambda new_contents:
1442                                self.failUnlessEqual(new_contents, self.CONTENTS))
1443             else:
1444                 d1 = self.shouldFail(NotEnoughSharesError,
1445                                      "_corrupt_all(offset=%s)" % (offset,),
1446                                      substring,
1447                                      self._fn.download_version, servermap,
1448                                                                 ver,
1449                                                                 fetch_privkey)
1450             if failure_checker:
1451                 d1.addCallback(failure_checker)
1452             d1.addCallback(lambda res: servermap)
1453             return d1
1454         d.addCallback(_do_retrieve)
1455         return d
1456
1457     def test_corrupt_all_verbyte(self):
1458         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1459         # error in unpack_share().
1460         d = self._test_corrupt_all(0, "UnknownVersionError")
1461         def _check_servermap(servermap):
1462             # and the dump should mention the problems
1463             s = StringIO()
1464             dump = servermap.dump(s).getvalue()
1465             self.failUnless("30 PROBLEMS" in dump, dump)
1466         d.addCallback(_check_servermap)
1467         return d
1468
1469     def test_corrupt_all_seqnum(self):
1470         # a corrupt sequence number will trigger a bad signature
1471         return self._test_corrupt_all(1, "signature is invalid")
1472
1473     def test_corrupt_all_R(self):
1474         # a corrupt root hash will trigger a bad signature
1475         return self._test_corrupt_all(9, "signature is invalid")
1476
1477     def test_corrupt_all_IV(self):
1478         # a corrupt salt/IV will trigger a bad signature
1479         return self._test_corrupt_all(41, "signature is invalid")
1480
1481     def test_corrupt_all_k(self):
1482         # a corrupt 'k' will trigger a bad signature
1483         return self._test_corrupt_all(57, "signature is invalid")
1484
1485     def test_corrupt_all_N(self):
1486         # a corrupt 'N' will trigger a bad signature
1487         return self._test_corrupt_all(58, "signature is invalid")
1488
1489     def test_corrupt_all_segsize(self):
1490         # a corrupt segsize will trigger a bad signature
1491         return self._test_corrupt_all(59, "signature is invalid")
1492
1493     def test_corrupt_all_datalen(self):
1494         # a corrupt data length will trigger a bad signature
1495         return self._test_corrupt_all(67, "signature is invalid")
1496
1497     def test_corrupt_all_pubkey(self):
1498         # a corrupt pubkey won't match the URI's fingerprint. We need to
1499         # remove the pubkey from the filenode, or else it won't bother trying
1500         # to update it.
1501         self._fn._pubkey = None
1502         return self._test_corrupt_all("pubkey",
1503                                       "pubkey doesn't match fingerprint")
1504
1505     def test_corrupt_all_sig(self):
1506         # a corrupt signature is a bad one
1507         # the signature runs from about [543:799], depending upon the length
1508         # of the pubkey
1509         return self._test_corrupt_all("signature", "signature is invalid")
1510
1511     def test_corrupt_all_share_hash_chain_number(self):
1512         # a corrupt share hash chain entry will show up as a bad hash. If we
1513         # mangle the first byte, that will look like a bad hash number,
1514         # causing an IndexError
1515         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1516
1517     def test_corrupt_all_share_hash_chain_hash(self):
1518         # a corrupt share hash chain entry will show up as a bad hash. If we
1519         # mangle a few bytes in, that will look like a bad hash.
1520         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1521
1522     def test_corrupt_all_block_hash_tree(self):
1523         return self._test_corrupt_all("block_hash_tree",
1524                                       "block hash tree failure")
1525
1526     def test_corrupt_all_block(self):
1527         return self._test_corrupt_all("share_data", "block hash tree failure")
1528
1529     def test_corrupt_all_encprivkey(self):
1530         # a corrupted privkey won't even be noticed by the reader, only by a
1531         # writer.
1532         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1533
1534
1535     def test_corrupt_all_encprivkey_late(self):
1536         # this should work for the same reason as above, but we corrupt 
1537         # after the servermap update to exercise the error handling
1538         # code.
1539         # We need to remove the privkey from the node, or the retrieve
1540         # process won't know to update it.
1541         self._fn._privkey = None
1542         return self._test_corrupt_all("enc_privkey",
1543                                       None, # this shouldn't fail
1544                                       should_succeed=True,
1545                                       corrupt_early=False,
1546                                       fetch_privkey=True)
1547
1548
1549     def test_corrupt_all_seqnum_late(self):
1550         # corrupting the seqnum between mapupdate and retrieve should result
1551         # in NotEnoughSharesError, since each share will look invalid
1552         def _check(res):
1553             f = res[0]
1554             self.failUnless(f.check(NotEnoughSharesError))
1555             self.failUnless("uncoordinated write" in str(f))
1556         return self._test_corrupt_all(1, "ran out of peers",
1557                                       corrupt_early=False,
1558                                       failure_checker=_check)
1559
1560     def test_corrupt_all_block_hash_tree_late(self):
1561         def _check(res):
1562             f = res[0]
1563             self.failUnless(f.check(NotEnoughSharesError))
1564         return self._test_corrupt_all("block_hash_tree",
1565                                       "block hash tree failure",
1566                                       corrupt_early=False,
1567                                       failure_checker=_check)
1568
1569
1570     def test_corrupt_all_block_late(self):
1571         def _check(res):
1572             f = res[0]
1573             self.failUnless(f.check(NotEnoughSharesError))
1574         return self._test_corrupt_all("share_data", "block hash tree failure",
1575                                       corrupt_early=False,
1576                                       failure_checker=_check)
1577
1578
1579     def test_basic_pubkey_at_end(self):
1580         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1581         # download to succeed but forcing a bunch of retries first. Note that
1582         # this is rather pessimistic: our Retrieve process will throw away
1583         # the whole share if the pubkey is bad, even though the rest of the
1584         # share might be good.
1585
1586         self._fn._pubkey = None
1587         k = self._fn.get_required_shares()
1588         N = self._fn.get_total_shares()
1589         d = defer.succeed(None)
1590         d.addCallback(corrupt, self._storage, "pubkey",
1591                       shnums_to_corrupt=range(0, N-k))
1592         d.addCallback(lambda res: self.make_servermap())
1593         def _do_retrieve(servermap):
1594             self.failUnless(servermap.problems)
1595             self.failUnless("pubkey doesn't match fingerprint"
1596                             in str(servermap.problems[0]))
1597             ver = servermap.best_recoverable_version()
1598             r = Retrieve(self._fn, servermap, ver)
1599             c = consumer.MemoryConsumer()
1600             return r.download(c)
1601         d.addCallback(_do_retrieve)
1602         d.addCallback(lambda mc: "".join(mc.chunks))
1603         d.addCallback(lambda new_contents:
1604                       self.failUnlessEqual(new_contents, self.CONTENTS))
1605         return d
1606
1607
1608     def _test_corrupt_some(self, offset, mdmf=False):
1609         if mdmf:
1610             d = self.publish_mdmf()
1611         else:
1612             d = defer.succeed(None)
1613         d.addCallback(lambda ignored:
1614             corrupt(None, self._storage, offset, range(5)))
1615         d.addCallback(lambda ignored:
1616             self.make_servermap())
1617         def _do_retrieve(servermap):
1618             ver = servermap.best_recoverable_version()
1619             self.failUnless(ver)
1620             return self._fn.download_best_version()
1621         d.addCallback(_do_retrieve)
1622         d.addCallback(lambda new_contents:
1623             self.failUnlessEqual(new_contents, self.CONTENTS))
1624         return d
1625
1626
1627     def test_corrupt_some(self):
1628         # corrupt the data of first five shares (so the servermap thinks
1629         # they're good but retrieve marks them as bad), so that the
1630         # MODE_READ set of 6 will be insufficient, forcing node.download to
1631         # retry with more servers.
1632         return self._test_corrupt_some("share_data")
1633
1634
1635     def test_download_fails(self):
1636         d = corrupt(None, self._storage, "signature")
1637         d.addCallback(lambda ignored:
1638             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1639                             "no recoverable versions",
1640                             self._fn.download_best_version))
1641         return d
1642
1643
1644
1645     def test_corrupt_mdmf_block_hash_tree(self):
1646         d = self.publish_mdmf()
1647         d.addCallback(lambda ignored:
1648             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1649                                    "block hash tree failure",
1650                                    corrupt_early=False,
1651                                    should_succeed=False))
1652         return d
1653
1654
1655     def test_corrupt_mdmf_block_hash_tree_late(self):
1656         d = self.publish_mdmf()
1657         d.addCallback(lambda ignored:
1658             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1659                                    "block hash tree failure",
1660                                    corrupt_early=True,
1661                                    should_succeed=False))
1662         return d
1663
1664
1665     def test_corrupt_mdmf_share_data(self):
1666         d = self.publish_mdmf()
1667         d.addCallback(lambda ignored:
1668             # TODO: Find out what the block size is and corrupt a
1669             # specific block, rather than just guessing.
1670             self._test_corrupt_all(("share_data", 12 * 40),
1671                                     "block hash tree failure",
1672                                     corrupt_early=True,
1673                                     should_succeed=False))
1674         return d
1675
1676
1677     def test_corrupt_some_mdmf(self):
1678         return self._test_corrupt_some(("share_data", 12 * 40),
1679                                        mdmf=True)
1680
1681
1682 class CheckerMixin:
1683     def check_good(self, r, where):
1684         self.failUnless(r.is_healthy(), where)
1685         return r
1686
1687     def check_bad(self, r, where):
1688         self.failIf(r.is_healthy(), where)
1689         return r
1690
1691     def check_expected_failure(self, r, expected_exception, substring, where):
1692         for (peerid, storage_index, shnum, f) in r.problems:
1693             if f.check(expected_exception):
1694                 self.failUnless(substring in str(f),
1695                                 "%s: substring '%s' not in '%s'" %
1696                                 (where, substring, str(f)))
1697                 return
1698         self.fail("%s: didn't see expected exception %s in problems %s" %
1699                   (where, expected_exception, r.problems))
1700
1701
1702 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1703     def setUp(self):
1704         return self.publish_one()
1705
1706
1707     def test_check_good(self):
1708         d = self._fn.check(Monitor())
1709         d.addCallback(self.check_good, "test_check_good")
1710         return d
1711
1712     def test_check_mdmf_good(self):
1713         d = self.publish_mdmf()
1714         d.addCallback(lambda ignored:
1715             self._fn.check(Monitor()))
1716         d.addCallback(self.check_good, "test_check_mdmf_good")
1717         return d
1718
1719     def test_check_no_shares(self):
1720         for shares in self._storage._peers.values():
1721             shares.clear()
1722         d = self._fn.check(Monitor())
1723         d.addCallback(self.check_bad, "test_check_no_shares")
1724         return d
1725
1726     def test_check_mdmf_no_shares(self):
1727         d = self.publish_mdmf()
1728         def _then(ignored):
1729             for share in self._storage._peers.values():
1730                 share.clear()
1731         d.addCallback(_then)
1732         d.addCallback(lambda ignored:
1733             self._fn.check(Monitor()))
1734         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1735         return d
1736
1737     def test_check_not_enough_shares(self):
1738         for shares in self._storage._peers.values():
1739             for shnum in shares.keys():
1740                 if shnum > 0:
1741                     del shares[shnum]
1742         d = self._fn.check(Monitor())
1743         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1744         return d
1745
1746     def test_check_mdmf_not_enough_shares(self):
1747         d = self.publish_mdmf()
1748         def _then(ignored):
1749             for shares in self._storage._peers.values():
1750                 for shnum in shares.keys():
1751                     if shnum > 0:
1752                         del shares[shnum]
1753         d.addCallback(_then)
1754         d.addCallback(lambda ignored:
1755             self._fn.check(Monitor()))
1756         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1757         return d
1758
1759
1760     def test_check_all_bad_sig(self):
1761         d = corrupt(None, self._storage, 1) # bad sig
1762         d.addCallback(lambda ignored:
1763             self._fn.check(Monitor()))
1764         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1765         return d
1766
1767     def test_check_mdmf_all_bad_sig(self):
1768         d = self.publish_mdmf()
1769         d.addCallback(lambda ignored:
1770             corrupt(None, self._storage, 1))
1771         d.addCallback(lambda ignored:
1772             self._fn.check(Monitor()))
1773         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1774         return d
1775
1776     def test_check_all_bad_blocks(self):
1777         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1778         # the Checker won't notice this.. it doesn't look at actual data
1779         d.addCallback(lambda ignored:
1780             self._fn.check(Monitor()))
1781         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1782         return d
1783
1784
1785     def test_check_mdmf_all_bad_blocks(self):
1786         d = self.publish_mdmf()
1787         d.addCallback(lambda ignored:
1788             corrupt(None, self._storage, "share_data"))
1789         d.addCallback(lambda ignored:
1790             self._fn.check(Monitor()))
1791         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1792         return d
1793
1794     def test_verify_good(self):
1795         d = self._fn.check(Monitor(), verify=True)
1796         d.addCallback(self.check_good, "test_verify_good")
1797         return d
1798
1799     def test_verify_all_bad_sig(self):
1800         d = corrupt(None, self._storage, 1) # bad sig
1801         d.addCallback(lambda ignored:
1802             self._fn.check(Monitor(), verify=True))
1803         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1804         return d
1805
1806     def test_verify_one_bad_sig(self):
1807         d = corrupt(None, self._storage, 1, [9]) # bad sig
1808         d.addCallback(lambda ignored:
1809             self._fn.check(Monitor(), verify=True))
1810         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1811         return d
1812
1813     def test_verify_one_bad_block(self):
1814         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1815         # the Verifier *will* notice this, since it examines every byte
1816         d.addCallback(lambda ignored:
1817             self._fn.check(Monitor(), verify=True))
1818         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1819         d.addCallback(self.check_expected_failure,
1820                       CorruptShareError, "block hash tree failure",
1821                       "test_verify_one_bad_block")
1822         return d
1823
1824     def test_verify_one_bad_sharehash(self):
1825         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1826         d.addCallback(lambda ignored:
1827             self._fn.check(Monitor(), verify=True))
1828         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1829         d.addCallback(self.check_expected_failure,
1830                       CorruptShareError, "corrupt hashes",
1831                       "test_verify_one_bad_sharehash")
1832         return d
1833
1834     def test_verify_one_bad_encprivkey(self):
1835         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1836         d.addCallback(lambda ignored:
1837             self._fn.check(Monitor(), verify=True))
1838         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1839         d.addCallback(self.check_expected_failure,
1840                       CorruptShareError, "invalid privkey",
1841                       "test_verify_one_bad_encprivkey")
1842         return d
1843
1844     def test_verify_one_bad_encprivkey_uncheckable(self):
1845         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1846         readonly_fn = self._fn.get_readonly()
1847         # a read-only node has no way to validate the privkey
1848         d.addCallback(lambda ignored:
1849             readonly_fn.check(Monitor(), verify=True))
1850         d.addCallback(self.check_good,
1851                       "test_verify_one_bad_encprivkey_uncheckable")
1852         return d
1853
1854
1855     def test_verify_mdmf_good(self):
1856         d = self.publish_mdmf()
1857         d.addCallback(lambda ignored:
1858             self._fn.check(Monitor(), verify=True))
1859         d.addCallback(self.check_good, "test_verify_mdmf_good")
1860         return d
1861
1862
1863     def test_verify_mdmf_one_bad_block(self):
1864         d = self.publish_mdmf()
1865         d.addCallback(lambda ignored:
1866             corrupt(None, self._storage, "share_data", [1]))
1867         d.addCallback(lambda ignored:
1868             self._fn.check(Monitor(), verify=True))
1869         # We should find one bad block here
1870         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1871         d.addCallback(self.check_expected_failure,
1872                       CorruptShareError, "block hash tree failure",
1873                       "test_verify_mdmf_one_bad_block")
1874         return d
1875
1876
1877     def test_verify_mdmf_bad_encprivkey(self):
1878         d = self.publish_mdmf()
1879         d.addCallback(lambda ignored:
1880             corrupt(None, self._storage, "enc_privkey", [0]))
1881         d.addCallback(lambda ignored:
1882             self._fn.check(Monitor(), verify=True))
1883         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1884         d.addCallback(self.check_expected_failure,
1885                       CorruptShareError, "privkey",
1886                       "test_verify_mdmf_bad_encprivkey")
1887         return d
1888
1889
1890     def test_verify_mdmf_bad_sig(self):
1891         d = self.publish_mdmf()
1892         d.addCallback(lambda ignored:
1893             corrupt(None, self._storage, 1, [1]))
1894         d.addCallback(lambda ignored:
1895             self._fn.check(Monitor(), verify=True))
1896         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1897         return d
1898
1899
1900     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1901         d = self.publish_mdmf()
1902         d.addCallback(lambda ignored:
1903             corrupt(None, self._storage, "enc_privkey", [1]))
1904         d.addCallback(lambda ignored:
1905             self._fn.get_readonly())
1906         d.addCallback(lambda fn:
1907             fn.check(Monitor(), verify=True))
1908         d.addCallback(self.check_good,
1909                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1910         return d
1911
1912
1913 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1914
1915     def get_shares(self, s):
1916         all_shares = {} # maps (peerid, shnum) to share data
1917         for peerid in s._peers:
1918             shares = s._peers[peerid]
1919             for shnum in shares:
1920                 data = shares[shnum]
1921                 all_shares[ (peerid, shnum) ] = data
1922         return all_shares
1923
1924     def copy_shares(self, ignored=None):
1925         self.old_shares.append(self.get_shares(self._storage))
1926
1927     def test_repair_nop(self):
1928         self.old_shares = []
1929         d = self.publish_one()
1930         d.addCallback(self.copy_shares)
1931         d.addCallback(lambda res: self._fn.check(Monitor()))
1932         d.addCallback(lambda check_results: self._fn.repair(check_results))
1933         def _check_results(rres):
1934             self.failUnless(IRepairResults.providedBy(rres))
1935             self.failUnless(rres.get_successful())
1936             # TODO: examine results
1937
1938             self.copy_shares()
1939
1940             initial_shares = self.old_shares[0]
1941             new_shares = self.old_shares[1]
1942             # TODO: this really shouldn't change anything. When we implement
1943             # a "minimal-bandwidth" repairer", change this test to assert:
1944             #self.failUnlessEqual(new_shares, initial_shares)
1945
1946             # all shares should be in the same place as before
1947             self.failUnlessEqual(set(initial_shares.keys()),
1948                                  set(new_shares.keys()))
1949             # but they should all be at a newer seqnum. The IV will be
1950             # different, so the roothash will be too.
1951             for key in initial_shares:
1952                 (version0,
1953                  seqnum0,
1954                  root_hash0,
1955                  IV0,
1956                  k0, N0, segsize0, datalen0,
1957                  o0) = unpack_header(initial_shares[key])
1958                 (version1,
1959                  seqnum1,
1960                  root_hash1,
1961                  IV1,
1962                  k1, N1, segsize1, datalen1,
1963                  o1) = unpack_header(new_shares[key])
1964                 self.failUnlessEqual(version0, version1)
1965                 self.failUnlessEqual(seqnum0+1, seqnum1)
1966                 self.failUnlessEqual(k0, k1)
1967                 self.failUnlessEqual(N0, N1)
1968                 self.failUnlessEqual(segsize0, segsize1)
1969                 self.failUnlessEqual(datalen0, datalen1)
1970         d.addCallback(_check_results)
1971         return d
1972
1973     def failIfSharesChanged(self, ignored=None):
1974         old_shares = self.old_shares[-2]
1975         current_shares = self.old_shares[-1]
1976         self.failUnlessEqual(old_shares, current_shares)
1977
1978
1979     def test_unrepairable_0shares(self):
1980         d = self.publish_one()
1981         def _delete_all_shares(ign):
1982             shares = self._storage._peers
1983             for peerid in shares:
1984                 shares[peerid] = {}
1985         d.addCallback(_delete_all_shares)
1986         d.addCallback(lambda ign: self._fn.check(Monitor()))
1987         d.addCallback(lambda check_results: self._fn.repair(check_results))
1988         def _check(crr):
1989             self.failUnlessEqual(crr.get_successful(), False)
1990         d.addCallback(_check)
1991         return d
1992
1993     def test_mdmf_unrepairable_0shares(self):
1994         d = self.publish_mdmf()
1995         def _delete_all_shares(ign):
1996             shares = self._storage._peers
1997             for peerid in shares:
1998                 shares[peerid] = {}
1999         d.addCallback(_delete_all_shares)
2000         d.addCallback(lambda ign: self._fn.check(Monitor()))
2001         d.addCallback(lambda check_results: self._fn.repair(check_results))
2002         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2003         return d
2004
2005
2006     def test_unrepairable_1share(self):
2007         d = self.publish_one()
2008         def _delete_all_shares(ign):
2009             shares = self._storage._peers
2010             for peerid in shares:
2011                 for shnum in list(shares[peerid]):
2012                     if shnum > 0:
2013                         del shares[peerid][shnum]
2014         d.addCallback(_delete_all_shares)
2015         d.addCallback(lambda ign: self._fn.check(Monitor()))
2016         d.addCallback(lambda check_results: self._fn.repair(check_results))
2017         def _check(crr):
2018             self.failUnlessEqual(crr.get_successful(), False)
2019         d.addCallback(_check)
2020         return d
2021
2022     def test_mdmf_unrepairable_1share(self):
2023         d = self.publish_mdmf()
2024         def _delete_all_shares(ign):
2025             shares = self._storage._peers
2026             for peerid in shares:
2027                 for shnum in list(shares[peerid]):
2028                     if shnum > 0:
2029                         del shares[peerid][shnum]
2030         d.addCallback(_delete_all_shares)
2031         d.addCallback(lambda ign: self._fn.check(Monitor()))
2032         d.addCallback(lambda check_results: self._fn.repair(check_results))
2033         def _check(crr):
2034             self.failUnlessEqual(crr.get_successful(), False)
2035         d.addCallback(_check)
2036         return d
2037
2038     def test_repairable_5shares(self):
2039         d = self.publish_mdmf()
2040         def _delete_all_shares(ign):
2041             shares = self._storage._peers
2042             for peerid in shares:
2043                 for shnum in list(shares[peerid]):
2044                     if shnum > 4:
2045                         del shares[peerid][shnum]
2046         d.addCallback(_delete_all_shares)
2047         d.addCallback(lambda ign: self._fn.check(Monitor()))
2048         d.addCallback(lambda check_results: self._fn.repair(check_results))
2049         def _check(crr):
2050             self.failUnlessEqual(crr.get_successful(), True)
2051         d.addCallback(_check)
2052         return d
2053
2054     def test_mdmf_repairable_5shares(self):
2055         d = self.publish_mdmf()
2056         def _delete_some_shares(ign):
2057             shares = self._storage._peers
2058             for peerid in shares:
2059                 for shnum in list(shares[peerid]):
2060                     if shnum > 5:
2061                         del shares[peerid][shnum]
2062         d.addCallback(_delete_some_shares)
2063         d.addCallback(lambda ign: self._fn.check(Monitor()))
2064         def _check(cr):
2065             self.failIf(cr.is_healthy())
2066             self.failUnless(cr.is_recoverable())
2067             return cr
2068         d.addCallback(_check)
2069         d.addCallback(lambda check_results: self._fn.repair(check_results))
2070         def _check1(crr):
2071             self.failUnlessEqual(crr.get_successful(), True)
2072         d.addCallback(_check1)
2073         return d
2074
2075
2076     def test_merge(self):
2077         self.old_shares = []
2078         d = self.publish_multiple()
2079         # repair will refuse to merge multiple highest seqnums unless you
2080         # pass force=True
2081         d.addCallback(lambda res:
2082                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2083                                           1:4,3:4,5:4,7:4,9:4}))
2084         d.addCallback(self.copy_shares)
2085         d.addCallback(lambda res: self._fn.check(Monitor()))
2086         def _try_repair(check_results):
2087             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2088             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2089                                  self._fn.repair, check_results)
2090             d2.addCallback(self.copy_shares)
2091             d2.addCallback(self.failIfSharesChanged)
2092             d2.addCallback(lambda res: check_results)
2093             return d2
2094         d.addCallback(_try_repair)
2095         d.addCallback(lambda check_results:
2096                       self._fn.repair(check_results, force=True))
2097         # this should give us 10 shares of the highest roothash
2098         def _check_repair_results(rres):
2099             self.failUnless(rres.get_successful())
2100             pass # TODO
2101         d.addCallback(_check_repair_results)
2102         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2103         def _check_smap(smap):
2104             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2105             self.failIf(smap.unrecoverable_versions())
2106             # now, which should have won?
2107             roothash_s4a = self.get_roothash_for(3)
2108             roothash_s4b = self.get_roothash_for(4)
2109             if roothash_s4b > roothash_s4a:
2110                 expected_contents = self.CONTENTS[4]
2111             else:
2112                 expected_contents = self.CONTENTS[3]
2113             new_versionid = smap.best_recoverable_version()
2114             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2115             d2 = self._fn.download_version(smap, new_versionid)
2116             d2.addCallback(self.failUnlessEqual, expected_contents)
2117             return d2
2118         d.addCallback(_check_smap)
2119         return d
2120
2121     def test_non_merge(self):
2122         self.old_shares = []
2123         d = self.publish_multiple()
2124         # repair should not refuse a repair that doesn't need to merge. In
2125         # this case, we combine v2 with v3. The repair should ignore v2 and
2126         # copy v3 into a new v5.
2127         d.addCallback(lambda res:
2128                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2129                                           1:3,3:3,5:3,7:3,9:3}))
2130         d.addCallback(lambda res: self._fn.check(Monitor()))
2131         d.addCallback(lambda check_results: self._fn.repair(check_results))
2132         # this should give us 10 shares of v3
2133         def _check_repair_results(rres):
2134             self.failUnless(rres.get_successful())
2135             pass # TODO
2136         d.addCallback(_check_repair_results)
2137         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2138         def _check_smap(smap):
2139             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2140             self.failIf(smap.unrecoverable_versions())
2141             # now, which should have won?
2142             expected_contents = self.CONTENTS[3]
2143             new_versionid = smap.best_recoverable_version()
2144             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2145             d2 = self._fn.download_version(smap, new_versionid)
2146             d2.addCallback(self.failUnlessEqual, expected_contents)
2147             return d2
2148         d.addCallback(_check_smap)
2149         return d
2150
2151     def get_roothash_for(self, index):
2152         # return the roothash for the first share we see in the saved set
2153         shares = self._copied_shares[index]
2154         for peerid in shares:
2155             for shnum in shares[peerid]:
2156                 share = shares[peerid][shnum]
2157                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2158                           unpack_header(share)
2159                 return root_hash
2160
2161     def test_check_and_repair_readcap(self):
2162         # we can't currently repair from a mutable readcap: #625
2163         self.old_shares = []
2164         d = self.publish_one()
2165         d.addCallback(self.copy_shares)
2166         def _get_readcap(res):
2167             self._fn3 = self._fn.get_readonly()
2168             # also delete some shares
2169             for peerid,shares in self._storage._peers.items():
2170                 shares.pop(0, None)
2171         d.addCallback(_get_readcap)
2172         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2173         def _check_results(crr):
2174             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2175             # we should detect the unhealthy, but skip over mutable-readcap
2176             # repairs until #625 is fixed
2177             self.failIf(crr.get_pre_repair_results().is_healthy())
2178             self.failIf(crr.get_repair_attempted())
2179             self.failIf(crr.get_post_repair_results().is_healthy())
2180         d.addCallback(_check_results)
2181         return d
2182
2183 class DevNullDictionary(dict):
2184     def __setitem__(self, key, value):
2185         return
2186
2187 class MultipleEncodings(unittest.TestCase):
2188     def setUp(self):
2189         self.CONTENTS = "New contents go here"
2190         self.uploadable = MutableData(self.CONTENTS)
2191         self._storage = FakeStorage()
2192         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2193         self._storage_broker = self._nodemaker.storage_broker
2194         d = self._nodemaker.create_mutable_file(self.uploadable)
2195         def _created(node):
2196             self._fn = node
2197         d.addCallback(_created)
2198         return d
2199
2200     def _encode(self, k, n, data, version=SDMF_VERSION):
2201         # encode 'data' into a peerid->shares dict.
2202
2203         fn = self._fn
2204         # disable the nodecache, since for these tests we explicitly need
2205         # multiple nodes pointing at the same file
2206         self._nodemaker._node_cache = DevNullDictionary()
2207         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2208         # then we copy over other fields that are normally fetched from the
2209         # existing shares
2210         fn2._pubkey = fn._pubkey
2211         fn2._privkey = fn._privkey
2212         fn2._encprivkey = fn._encprivkey
2213         # and set the encoding parameters to something completely different
2214         fn2._required_shares = k
2215         fn2._total_shares = n
2216
2217         s = self._storage
2218         s._peers = {} # clear existing storage
2219         p2 = Publish(fn2, self._storage_broker, None)
2220         uploadable = MutableData(data)
2221         d = p2.publish(uploadable)
2222         def _published(res):
2223             shares = s._peers
2224             s._peers = {}
2225             return shares
2226         d.addCallback(_published)
2227         return d
2228
2229     def make_servermap(self, mode=MODE_READ, oldmap=None):
2230         if oldmap is None:
2231             oldmap = ServerMap()
2232         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2233                                oldmap, mode)
2234         d = smu.update()
2235         return d
2236
2237     def test_multiple_encodings(self):
2238         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2239         # then mix up the shares, to make sure that download survives seeing
2240         # a variety of encodings. This is actually kind of tricky to set up.
2241
2242         contents1 = "Contents for encoding 1 (3-of-10) go here"
2243         contents2 = "Contents for encoding 2 (4-of-9) go here"
2244         contents3 = "Contents for encoding 3 (4-of-7) go here"
2245
2246         # we make a retrieval object that doesn't know what encoding
2247         # parameters to use
2248         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2249
2250         # now we upload a file through fn1, and grab its shares
2251         d = self._encode(3, 10, contents1)
2252         def _encoded_1(shares):
2253             self._shares1 = shares
2254         d.addCallback(_encoded_1)
2255         d.addCallback(lambda res: self._encode(4, 9, contents2))
2256         def _encoded_2(shares):
2257             self._shares2 = shares
2258         d.addCallback(_encoded_2)
2259         d.addCallback(lambda res: self._encode(4, 7, contents3))
2260         def _encoded_3(shares):
2261             self._shares3 = shares
2262         d.addCallback(_encoded_3)
2263
2264         def _merge(res):
2265             log.msg("merging sharelists")
2266             # we merge the shares from the two sets, leaving each shnum in
2267             # its original location, but using a share from set1 or set2
2268             # according to the following sequence:
2269             #
2270             #  4-of-9  a  s2
2271             #  4-of-9  b  s2
2272             #  4-of-7  c   s3
2273             #  4-of-9  d  s2
2274             #  3-of-9  e s1
2275             #  3-of-9  f s1
2276             #  3-of-9  g s1
2277             #  4-of-9  h  s2
2278             #
2279             # so that neither form can be recovered until fetch [f], at which
2280             # point version-s1 (the 3-of-10 form) should be recoverable. If
2281             # the implementation latches on to the first version it sees,
2282             # then s2 will be recoverable at fetch [g].
2283
2284             # Later, when we implement code that handles multiple versions,
2285             # we can use this framework to assert that all recoverable
2286             # versions are retrieved, and test that 'epsilon' does its job
2287
2288             places = [2, 2, 3, 2, 1, 1, 1, 2]
2289
2290             sharemap = {}
2291             sb = self._storage_broker
2292
2293             for peerid in sorted(sb.get_all_serverids()):
2294                 for shnum in self._shares1.get(peerid, {}):
2295                     if shnum < len(places):
2296                         which = places[shnum]
2297                     else:
2298                         which = "x"
2299                     self._storage._peers[peerid] = peers = {}
2300                     in_1 = shnum in self._shares1[peerid]
2301                     in_2 = shnum in self._shares2.get(peerid, {})
2302                     in_3 = shnum in self._shares3.get(peerid, {})
2303                     if which == 1:
2304                         if in_1:
2305                             peers[shnum] = self._shares1[peerid][shnum]
2306                             sharemap[shnum] = peerid
2307                     elif which == 2:
2308                         if in_2:
2309                             peers[shnum] = self._shares2[peerid][shnum]
2310                             sharemap[shnum] = peerid
2311                     elif which == 3:
2312                         if in_3:
2313                             peers[shnum] = self._shares3[peerid][shnum]
2314                             sharemap[shnum] = peerid
2315
2316             # we don't bother placing any other shares
2317             # now sort the sequence so that share 0 is returned first
2318             new_sequence = [sharemap[shnum]
2319                             for shnum in sorted(sharemap.keys())]
2320             self._storage._sequence = new_sequence
2321             log.msg("merge done")
2322         d.addCallback(_merge)
2323         d.addCallback(lambda res: fn3.download_best_version())
2324         def _retrieved(new_contents):
2325             # the current specified behavior is "first version recoverable"
2326             self.failUnlessEqual(new_contents, contents1)
2327         d.addCallback(_retrieved)
2328         return d
2329
2330
2331 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2332
2333     def setUp(self):
2334         return self.publish_multiple()
2335
2336     def test_multiple_versions(self):
2337         # if we see a mix of versions in the grid, download_best_version
2338         # should get the latest one
2339         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2340         d = self._fn.download_best_version()
2341         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2342         # and the checker should report problems
2343         d.addCallback(lambda res: self._fn.check(Monitor()))
2344         d.addCallback(self.check_bad, "test_multiple_versions")
2345
2346         # but if everything is at version 2, that's what we should download
2347         d.addCallback(lambda res:
2348                       self._set_versions(dict([(i,2) for i in range(10)])))
2349         d.addCallback(lambda res: self._fn.download_best_version())
2350         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2351         # if exactly one share is at version 3, we should still get v2
2352         d.addCallback(lambda res:
2353                       self._set_versions({0:3}))
2354         d.addCallback(lambda res: self._fn.download_best_version())
2355         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2356         # but the servermap should see the unrecoverable version. This
2357         # depends upon the single newer share being queried early.
2358         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2359         def _check_smap(smap):
2360             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2361             newer = smap.unrecoverable_newer_versions()
2362             self.failUnlessEqual(len(newer), 1)
2363             verinfo, health = newer.items()[0]
2364             self.failUnlessEqual(verinfo[0], 4)
2365             self.failUnlessEqual(health, (1,3))
2366             self.failIf(smap.needs_merge())
2367         d.addCallback(_check_smap)
2368         # if we have a mix of two parallel versions (s4a and s4b), we could
2369         # recover either
2370         d.addCallback(lambda res:
2371                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2372                                           1:4,3:4,5:4,7:4,9:4}))
2373         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2374         def _check_smap_mixed(smap):
2375             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2376             newer = smap.unrecoverable_newer_versions()
2377             self.failUnlessEqual(len(newer), 0)
2378             self.failUnless(smap.needs_merge())
2379         d.addCallback(_check_smap_mixed)
2380         d.addCallback(lambda res: self._fn.download_best_version())
2381         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2382                                                   res == self.CONTENTS[4]))
2383         return d
2384
2385     def test_replace(self):
2386         # if we see a mix of versions in the grid, we should be able to
2387         # replace them all with a newer version
2388
2389         # if exactly one share is at version 3, we should download (and
2390         # replace) v2, and the result should be v4. Note that the index we
2391         # give to _set_versions is different than the sequence number.
2392         target = dict([(i,2) for i in range(10)]) # seqnum3
2393         target[0] = 3 # seqnum4
2394         self._set_versions(target)
2395
2396         def _modify(oldversion, servermap, first_time):
2397             return oldversion + " modified"
2398         d = self._fn.modify(_modify)
2399         d.addCallback(lambda res: self._fn.download_best_version())
2400         expected = self.CONTENTS[2] + " modified"
2401         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2402         # and the servermap should indicate that the outlier was replaced too
2403         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2404         def _check_smap(smap):
2405             self.failUnlessEqual(smap.highest_seqnum(), 5)
2406             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2407             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2408         d.addCallback(_check_smap)
2409         return d
2410
2411
2412 class Utils(unittest.TestCase):
2413     def test_cache(self):
2414         c = ResponseCache()
2415         # xdata = base62.b2a(os.urandom(100))[:100]
2416         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2417         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2418         c.add("v1", 1, 0, xdata)
2419         c.add("v1", 1, 2000, ydata)
2420         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2421         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2422         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2423         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2424         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2425         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2426         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2427         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2428         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2429         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2430         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2431         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2432         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2433         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2434         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2435         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2436         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2437         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2438
2439         # test joining fragments
2440         c = ResponseCache()
2441         c.add("v1", 1, 0, xdata[:10])
2442         c.add("v1", 1, 10, xdata[10:20])
2443         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2444
2445 class Exceptions(unittest.TestCase):
2446     def test_repr(self):
2447         nmde = NeedMoreDataError(100, 50, 100)
2448         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2449         ucwe = UncoordinatedWriteError()
2450         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2451
2452 class SameKeyGenerator:
2453     def __init__(self, pubkey, privkey):
2454         self.pubkey = pubkey
2455         self.privkey = privkey
2456     def generate(self, keysize=None):
2457         return defer.succeed( (self.pubkey, self.privkey) )
2458
2459 class FirstServerGetsKilled:
2460     done = False
2461     def notify(self, retval, wrapper, methname):
2462         if not self.done:
2463             wrapper.broken = True
2464             self.done = True
2465         return retval
2466
2467 class FirstServerGetsDeleted:
2468     def __init__(self):
2469         self.done = False
2470         self.silenced = None
2471     def notify(self, retval, wrapper, methname):
2472         if not self.done:
2473             # this query will work, but later queries should think the share
2474             # has been deleted
2475             self.done = True
2476             self.silenced = wrapper
2477             return retval
2478         if wrapper == self.silenced:
2479             assert methname == "slot_testv_and_readv_and_writev"
2480             return (True, {})
2481         return retval
2482
2483 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2484     def test_publish_surprise(self):
2485         self.basedir = "mutable/Problems/test_publish_surprise"
2486         self.set_up_grid()
2487         nm = self.g.clients[0].nodemaker
2488         d = nm.create_mutable_file(MutableData("contents 1"))
2489         def _created(n):
2490             d = defer.succeed(None)
2491             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2492             def _got_smap1(smap):
2493                 # stash the old state of the file
2494                 self.old_map = smap
2495             d.addCallback(_got_smap1)
2496             # then modify the file, leaving the old map untouched
2497             d.addCallback(lambda res: log.msg("starting winning write"))
2498             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2499             # now attempt to modify the file with the old servermap. This
2500             # will look just like an uncoordinated write, in which every
2501             # single share got updated between our mapupdate and our publish
2502             d.addCallback(lambda res: log.msg("starting doomed write"))
2503             d.addCallback(lambda res:
2504                           self.shouldFail(UncoordinatedWriteError,
2505                                           "test_publish_surprise", None,
2506                                           n.upload,
2507                                           MutableData("contents 2a"), self.old_map))
2508             return d
2509         d.addCallback(_created)
2510         return d
2511
2512     def test_retrieve_surprise(self):
2513         self.basedir = "mutable/Problems/test_retrieve_surprise"
2514         self.set_up_grid()
2515         nm = self.g.clients[0].nodemaker
2516         d = nm.create_mutable_file(MutableData("contents 1"))
2517         def _created(n):
2518             d = defer.succeed(None)
2519             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2520             def _got_smap1(smap):
2521                 # stash the old state of the file
2522                 self.old_map = smap
2523             d.addCallback(_got_smap1)
2524             # then modify the file, leaving the old map untouched
2525             d.addCallback(lambda res: log.msg("starting winning write"))
2526             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2527             # now attempt to retrieve the old version with the old servermap.
2528             # This will look like someone has changed the file since we
2529             # updated the servermap.
2530             d.addCallback(lambda res: n._cache._clear())
2531             d.addCallback(lambda res: log.msg("starting doomed read"))
2532             d.addCallback(lambda res:
2533                           self.shouldFail(NotEnoughSharesError,
2534                                           "test_retrieve_surprise",
2535                                           "ran out of peers: have 0 of 1",
2536                                           n.download_version,
2537                                           self.old_map,
2538                                           self.old_map.best_recoverable_version(),
2539                                           ))
2540             return d
2541         d.addCallback(_created)
2542         return d
2543
2544
2545     def test_unexpected_shares(self):
2546         # upload the file, take a servermap, shut down one of the servers,
2547         # upload it again (causing shares to appear on a new server), then
2548         # upload using the old servermap. The last upload should fail with an
2549         # UncoordinatedWriteError, because of the shares that didn't appear
2550         # in the servermap.
2551         self.basedir = "mutable/Problems/test_unexpected_shares"
2552         self.set_up_grid()
2553         nm = self.g.clients[0].nodemaker
2554         d = nm.create_mutable_file(MutableData("contents 1"))
2555         def _created(n):
2556             d = defer.succeed(None)
2557             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2558             def _got_smap1(smap):
2559                 # stash the old state of the file
2560                 self.old_map = smap
2561                 # now shut down one of the servers
2562                 peer0 = list(smap.make_sharemap()[0])[0]
2563                 self.g.remove_server(peer0)
2564                 # then modify the file, leaving the old map untouched
2565                 log.msg("starting winning write")
2566                 return n.overwrite(MutableData("contents 2"))
2567             d.addCallback(_got_smap1)
2568             # now attempt to modify the file with the old servermap. This
2569             # will look just like an uncoordinated write, in which every
2570             # single share got updated between our mapupdate and our publish
2571             d.addCallback(lambda res: log.msg("starting doomed write"))
2572             d.addCallback(lambda res:
2573                           self.shouldFail(UncoordinatedWriteError,
2574                                           "test_surprise", None,
2575                                           n.upload,
2576                                           MutableData("contents 2a"), self.old_map))
2577             return d
2578         d.addCallback(_created)
2579         return d
2580
2581     def test_bad_server(self):
2582         # Break one server, then create the file: the initial publish should
2583         # complete with an alternate server. Breaking a second server should
2584         # not prevent an update from succeeding either.
2585         self.basedir = "mutable/Problems/test_bad_server"
2586         self.set_up_grid()
2587         nm = self.g.clients[0].nodemaker
2588
2589         # to make sure that one of the initial peers is broken, we have to
2590         # get creative. We create an RSA key and compute its storage-index.
2591         # Then we make a KeyGenerator that always returns that one key, and
2592         # use it to create the mutable file. This will get easier when we can
2593         # use #467 static-server-selection to disable permutation and force
2594         # the choice of server for share[0].
2595
2596         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2597         def _got_key( (pubkey, privkey) ):
2598             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2599             pubkey_s = pubkey.serialize()
2600             privkey_s = privkey.serialize()
2601             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2602                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2603             self._storage_index = u.get_storage_index()
2604         d.addCallback(_got_key)
2605         def _break_peer0(res):
2606             si = self._storage_index
2607             servers = nm.storage_broker.get_servers_for_psi(si)
2608             self.g.break_server(servers[0].get_serverid())
2609             self.server1 = servers[1]
2610         d.addCallback(_break_peer0)
2611         # now "create" the file, using the pre-established key, and let the
2612         # initial publish finally happen
2613         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2614         # that ought to work
2615         def _got_node(n):
2616             d = n.download_best_version()
2617             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2618             # now break the second peer
2619             def _break_peer1(res):
2620                 self.g.break_server(self.server1.get_serverid())
2621             d.addCallback(_break_peer1)
2622             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2623             # that ought to work too
2624             d.addCallback(lambda res: n.download_best_version())
2625             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2626             def _explain_error(f):
2627                 print f
2628                 if f.check(NotEnoughServersError):
2629                     print "first_error:", f.value.first_error
2630                 return f
2631             d.addErrback(_explain_error)
2632             return d
2633         d.addCallback(_got_node)
2634         return d
2635
2636     def test_bad_server_overlap(self):
2637         # like test_bad_server, but with no extra unused servers to fall back
2638         # upon. This means that we must re-use a server which we've already
2639         # used. If we don't remember the fact that we sent them one share
2640         # already, we'll mistakenly think we're experiencing an
2641         # UncoordinatedWriteError.
2642
2643         # Break one server, then create the file: the initial publish should
2644         # complete with an alternate server. Breaking a second server should
2645         # not prevent an update from succeeding either.
2646         self.basedir = "mutable/Problems/test_bad_server_overlap"
2647         self.set_up_grid()
2648         nm = self.g.clients[0].nodemaker
2649         sb = nm.storage_broker
2650
2651         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2652         self.g.break_server(peerids[0])
2653
2654         d = nm.create_mutable_file(MutableData("contents 1"))
2655         def _created(n):
2656             d = n.download_best_version()
2657             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2658             # now break one of the remaining servers
2659             def _break_second_server(res):
2660                 self.g.break_server(peerids[1])
2661             d.addCallback(_break_second_server)
2662             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2663             # that ought to work too
2664             d.addCallback(lambda res: n.download_best_version())
2665             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2666             return d
2667         d.addCallback(_created)
2668         return d
2669
2670     def test_publish_all_servers_bad(self):
2671         # Break all servers: the publish should fail
2672         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2673         self.set_up_grid()
2674         nm = self.g.clients[0].nodemaker
2675         for s in nm.storage_broker.get_connected_servers():
2676             s.get_rref().broken = True
2677
2678         d = self.shouldFail(NotEnoughServersError,
2679                             "test_publish_all_servers_bad",
2680                             "ran out of good servers",
2681                             nm.create_mutable_file, MutableData("contents"))
2682         return d
2683
2684     def test_publish_no_servers(self):
2685         # no servers at all: the publish should fail
2686         self.basedir = "mutable/Problems/test_publish_no_servers"
2687         self.set_up_grid(num_servers=0)
2688         nm = self.g.clients[0].nodemaker
2689
2690         d = self.shouldFail(NotEnoughServersError,
2691                             "test_publish_no_servers",
2692                             "Ran out of non-bad servers",
2693                             nm.create_mutable_file, MutableData("contents"))
2694         return d
2695
2696
2697     def test_privkey_query_error(self):
2698         # when a servermap is updated with MODE_WRITE, it tries to get the
2699         # privkey. Something might go wrong during this query attempt.
2700         # Exercise the code in _privkey_query_failed which tries to handle
2701         # such an error.
2702         self.basedir = "mutable/Problems/test_privkey_query_error"
2703         self.set_up_grid(num_servers=20)
2704         nm = self.g.clients[0].nodemaker
2705         nm._node_cache = DevNullDictionary() # disable the nodecache
2706
2707         # we need some contents that are large enough to push the privkey out
2708         # of the early part of the file
2709         LARGE = "These are Larger contents" * 2000 # about 50KB
2710         LARGE_uploadable = MutableData(LARGE)
2711         d = nm.create_mutable_file(LARGE_uploadable)
2712         def _created(n):
2713             self.uri = n.get_uri()
2714             self.n2 = nm.create_from_cap(self.uri)
2715
2716             # When a mapupdate is performed on a node that doesn't yet know
2717             # the privkey, a short read is sent to a batch of servers, to get
2718             # the verinfo and (hopefully, if the file is short enough) the
2719             # encprivkey. Our file is too large to let this first read
2720             # contain the encprivkey. Each non-encprivkey-bearing response
2721             # that arrives (until the node gets the encprivkey) will trigger
2722             # a second read to specifically read the encprivkey.
2723             #
2724             # So, to exercise this case:
2725             #  1. notice which server gets a read() call first
2726             #  2. tell that server to start throwing errors
2727             killer = FirstServerGetsKilled()
2728             for s in nm.storage_broker.get_connected_servers():
2729                 s.get_rref().post_call_notifier = killer.notify
2730         d.addCallback(_created)
2731
2732         # now we update a servermap from a new node (which doesn't have the
2733         # privkey yet, forcing it to use a separate privkey query). Note that
2734         # the map-update will succeed, since we'll just get a copy from one
2735         # of the other shares.
2736         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2737
2738         return d
2739
2740     def test_privkey_query_missing(self):
2741         # like test_privkey_query_error, but the shares are deleted by the
2742         # second query, instead of raising an exception.
2743         self.basedir = "mutable/Problems/test_privkey_query_missing"
2744         self.set_up_grid(num_servers=20)
2745         nm = self.g.clients[0].nodemaker
2746         LARGE = "These are Larger contents" * 2000 # about 50KiB
2747         LARGE_uploadable = MutableData(LARGE)
2748         nm._node_cache = DevNullDictionary() # disable the nodecache
2749
2750         d = nm.create_mutable_file(LARGE_uploadable)
2751         def _created(n):
2752             self.uri = n.get_uri()
2753             self.n2 = nm.create_from_cap(self.uri)
2754             deleter = FirstServerGetsDeleted()
2755             for s in nm.storage_broker.get_connected_servers():
2756                 s.get_rref().post_call_notifier = deleter.notify
2757         d.addCallback(_created)
2758         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2759         return d
2760
2761
2762     def test_block_and_hash_query_error(self):
2763         # This tests for what happens when a query to a remote server
2764         # fails in either the hash validation step or the block getting
2765         # step (because of batching, this is the same actual query).
2766         # We need to have the storage server persist up until the point
2767         # that its prefix is validated, then suddenly die. This
2768         # exercises some exception handling code in Retrieve.
2769         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2770         self.set_up_grid(num_servers=20)
2771         nm = self.g.clients[0].nodemaker
2772         CONTENTS = "contents" * 2000
2773         CONTENTS_uploadable = MutableData(CONTENTS)
2774         d = nm.create_mutable_file(CONTENTS_uploadable)
2775         def _created(node):
2776             self._node = node
2777         d.addCallback(_created)
2778         d.addCallback(lambda ignored:
2779             self._node.get_servermap(MODE_READ))
2780         def _then(servermap):
2781             # we have our servermap. Now we set up the servers like the
2782             # tests above -- the first one that gets a read call should
2783             # start throwing errors, but only after returning its prefix
2784             # for validation. Since we'll download without fetching the
2785             # private key, the next query to the remote server will be
2786             # for either a block and salt or for hashes, either of which
2787             # will exercise the error handling code.
2788             killer = FirstServerGetsKilled()
2789             for s in nm.storage_broker.get_connected_servers():
2790                 s.get_rref().post_call_notifier = killer.notify
2791             ver = servermap.best_recoverable_version()
2792             assert ver
2793             return self._node.download_version(servermap, ver)
2794         d.addCallback(_then)
2795         d.addCallback(lambda data:
2796             self.failUnlessEqual(data, CONTENTS))
2797         return d
2798
2799
2800 class FileHandle(unittest.TestCase):
2801     def setUp(self):
2802         self.test_data = "Test Data" * 50000
2803         self.sio = StringIO(self.test_data)
2804         self.uploadable = MutableFileHandle(self.sio)
2805
2806
2807     def test_filehandle_read(self):
2808         self.basedir = "mutable/FileHandle/test_filehandle_read"
2809         chunk_size = 10
2810         for i in xrange(0, len(self.test_data), chunk_size):
2811             data = self.uploadable.read(chunk_size)
2812             data = "".join(data)
2813             start = i
2814             end = i + chunk_size
2815             self.failUnlessEqual(data, self.test_data[start:end])
2816
2817
2818     def test_filehandle_get_size(self):
2819         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2820         actual_size = len(self.test_data)
2821         size = self.uploadable.get_size()
2822         self.failUnlessEqual(size, actual_size)
2823
2824
2825     def test_filehandle_get_size_out_of_order(self):
2826         # We should be able to call get_size whenever we want without
2827         # disturbing the location of the seek pointer.
2828         chunk_size = 100
2829         data = self.uploadable.read(chunk_size)
2830         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2831
2832         # Now get the size.
2833         size = self.uploadable.get_size()
2834         self.failUnlessEqual(size, len(self.test_data))
2835
2836         # Now get more data. We should be right where we left off.
2837         more_data = self.uploadable.read(chunk_size)
2838         start = chunk_size
2839         end = chunk_size * 2
2840         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2841
2842
2843     def test_filehandle_file(self):
2844         # Make sure that the MutableFileHandle works on a file as well
2845         # as a StringIO object, since in some cases it will be asked to
2846         # deal with files.
2847         self.basedir = self.mktemp()
2848         # necessary? What am I doing wrong here?
2849         os.mkdir(self.basedir)
2850         f_path = os.path.join(self.basedir, "test_file")
2851         f = open(f_path, "w")
2852         f.write(self.test_data)
2853         f.close()
2854         f = open(f_path, "r")
2855
2856         uploadable = MutableFileHandle(f)
2857
2858         data = uploadable.read(len(self.test_data))
2859         self.failUnlessEqual("".join(data), self.test_data)
2860         size = uploadable.get_size()
2861         self.failUnlessEqual(size, len(self.test_data))
2862
2863
2864     def test_close(self):
2865         # Make sure that the MutableFileHandle closes its handle when
2866         # told to do so.
2867         self.uploadable.close()
2868         self.failUnless(self.sio.closed)
2869
2870
2871 class DataHandle(unittest.TestCase):
2872     def setUp(self):
2873         self.test_data = "Test Data" * 50000
2874         self.uploadable = MutableData(self.test_data)
2875
2876
2877     def test_datahandle_read(self):
2878         chunk_size = 10
2879         for i in xrange(0, len(self.test_data), chunk_size):
2880             data = self.uploadable.read(chunk_size)
2881             data = "".join(data)
2882             start = i
2883             end = i + chunk_size
2884             self.failUnlessEqual(data, self.test_data[start:end])
2885
2886
2887     def test_datahandle_get_size(self):
2888         actual_size = len(self.test_data)
2889         size = self.uploadable.get_size()
2890         self.failUnlessEqual(size, actual_size)
2891
2892
2893     def test_datahandle_get_size_out_of_order(self):
2894         # We should be able to call get_size whenever we want without
2895         # disturbing the location of the seek pointer.
2896         chunk_size = 100
2897         data = self.uploadable.read(chunk_size)
2898         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2899
2900         # Now get the size.
2901         size = self.uploadable.get_size()
2902         self.failUnlessEqual(size, len(self.test_data))
2903
2904         # Now get more data. We should be right where we left off.
2905         more_data = self.uploadable.read(chunk_size)
2906         start = chunk_size
2907         end = chunk_size * 2
2908         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2909
2910
2911 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2912               PublishMixin):
2913     def setUp(self):
2914         GridTestMixin.setUp(self)
2915         self.basedir = self.mktemp()
2916         self.set_up_grid()
2917         self.c = self.g.clients[0]
2918         self.nm = self.c.nodemaker
2919         self.data = "test data" * 100000 # about 900 KiB; MDMF
2920         self.small_data = "test data" * 10 # about 90 B; SDMF
2921         return self.do_upload()
2922
2923
2924     def do_upload(self):
2925         d1 = self.nm.create_mutable_file(MutableData(self.data),
2926                                          version=MDMF_VERSION)
2927         d2 = self.nm.create_mutable_file(MutableData(self.small_data))
2928         dl = gatherResults([d1, d2])
2929         def _then((n1, n2)):
2930             assert isinstance(n1, MutableFileNode)
2931             assert isinstance(n2, MutableFileNode)
2932
2933             self.mdmf_node = n1
2934             self.sdmf_node = n2
2935         dl.addCallback(_then)
2936         return dl
2937
2938
2939     def test_get_readonly_mutable_version(self):
2940         # Attempting to get a mutable version of a mutable file from a
2941         # filenode initialized with a readcap should return a readonly
2942         # version of that same node.
2943         ro = self.mdmf_node.get_readonly()
2944         d = ro.get_best_mutable_version()
2945         d.addCallback(lambda version:
2946             self.failUnless(version.is_readonly()))
2947         d.addCallback(lambda ignored:
2948             self.sdmf_node.get_readonly())
2949         d.addCallback(lambda version:
2950             self.failUnless(version.is_readonly()))
2951         return d
2952
2953
2954     def test_get_sequence_number(self):
2955         d = self.mdmf_node.get_best_readable_version()
2956         d.addCallback(lambda bv:
2957             self.failUnlessEqual(bv.get_sequence_number(), 1))
2958         d.addCallback(lambda ignored:
2959             self.sdmf_node.get_best_readable_version())
2960         d.addCallback(lambda bv:
2961             self.failUnlessEqual(bv.get_sequence_number(), 1))
2962         # Now update. The sequence number in both cases should be 1 in
2963         # both cases.
2964         def _do_update(ignored):
2965             new_data = MutableData("foo bar baz" * 100000)
2966             new_small_data = MutableData("foo bar baz" * 10)
2967             d1 = self.mdmf_node.overwrite(new_data)
2968             d2 = self.sdmf_node.overwrite(new_small_data)
2969             dl = gatherResults([d1, d2])
2970             return dl
2971         d.addCallback(_do_update)
2972         d.addCallback(lambda ignored:
2973             self.mdmf_node.get_best_readable_version())
2974         d.addCallback(lambda bv:
2975             self.failUnlessEqual(bv.get_sequence_number(), 2))
2976         d.addCallback(lambda ignored:
2977             self.sdmf_node.get_best_readable_version())
2978         d.addCallback(lambda bv:
2979             self.failUnlessEqual(bv.get_sequence_number(), 2))
2980         return d
2981
2982
2983     def test_version_extension_api(self):
2984         # We need to define an API by which an uploader can set the
2985         # extension parameters, and by which a downloader can retrieve
2986         # extensions.
2987         d = self.mdmf_node.get_best_mutable_version()
2988         def _got_version(version):
2989             hints = version.get_downloader_hints()
2990             # Should be empty at this point.
2991             self.failUnlessIn("k", hints)
2992             self.failUnlessEqual(hints['k'], 3)
2993             self.failUnlessIn('segsize', hints)
2994             self.failUnlessEqual(hints['segsize'], 131073)
2995         d.addCallback(_got_version)
2996         return d
2997
2998
2999     def test_extensions_from_cap(self):
3000         # If we initialize a mutable file with a cap that has extension
3001         # parameters in it and then grab the extension parameters using
3002         # our API, we should see that they're set correctly.
3003         mdmf_uri = self.mdmf_node.get_uri()
3004         new_node = self.nm.create_from_cap(mdmf_uri)
3005         d = new_node.get_best_mutable_version()
3006         def _got_version(version):
3007             hints = version.get_downloader_hints()
3008             self.failUnlessIn("k", hints)
3009             self.failUnlessEqual(hints["k"], 3)
3010             self.failUnlessIn("segsize", hints)
3011             self.failUnlessEqual(hints["segsize"], 131073)
3012         d.addCallback(_got_version)
3013         return d
3014
3015
3016     def test_extensions_from_upload(self):
3017         # If we create a new mutable file with some contents, we should
3018         # get back an MDMF cap with the right hints in place.
3019         contents = "foo bar baz" * 100000
3020         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3021         def _got_mutable_file(n):
3022             rw_uri = n.get_uri()
3023             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3024             self.failUnlessIn(expected_k, rw_uri)
3025             # XXX: Get this more intelligently.
3026             self.failUnlessIn("131073", rw_uri)
3027
3028             ro_uri = n.get_readonly_uri()
3029             self.failUnlessIn(expected_k, ro_uri)
3030             self.failUnlessIn("131073", ro_uri)
3031         d.addCallback(_got_mutable_file)
3032         return d
3033
3034
3035     def test_cap_after_upload(self):
3036         # If we create a new mutable file and upload things to it, and
3037         # it's an MDMF file, we should get an MDMF cap back from that
3038         # file and should be able to use that.
3039         # That's essentially what MDMF node is, so just check that.
3040         mdmf_uri = self.mdmf_node.get_uri()
3041         cap = uri.from_string(mdmf_uri)
3042         self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3043         readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3044         cap = uri.from_string(readonly_mdmf_uri)
3045         self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3046
3047
3048     def test_get_writekey(self):
3049         d = self.mdmf_node.get_best_mutable_version()
3050         d.addCallback(lambda bv:
3051             self.failUnlessEqual(bv.get_writekey(),
3052                                  self.mdmf_node.get_writekey()))
3053         d.addCallback(lambda ignored:
3054             self.sdmf_node.get_best_mutable_version())
3055         d.addCallback(lambda bv:
3056             self.failUnlessEqual(bv.get_writekey(),
3057                                  self.sdmf_node.get_writekey()))
3058         return d
3059
3060
3061     def test_get_storage_index(self):
3062         d = self.mdmf_node.get_best_mutable_version()
3063         d.addCallback(lambda bv:
3064             self.failUnlessEqual(bv.get_storage_index(),
3065                                  self.mdmf_node.get_storage_index()))
3066         d.addCallback(lambda ignored:
3067             self.sdmf_node.get_best_mutable_version())
3068         d.addCallback(lambda bv:
3069             self.failUnlessEqual(bv.get_storage_index(),
3070                                  self.sdmf_node.get_storage_index()))
3071         return d
3072
3073
3074     def test_get_readonly_version(self):
3075         d = self.mdmf_node.get_best_readable_version()
3076         d.addCallback(lambda bv:
3077             self.failUnless(bv.is_readonly()))
3078         d.addCallback(lambda ignored:
3079             self.sdmf_node.get_best_readable_version())
3080         d.addCallback(lambda bv:
3081             self.failUnless(bv.is_readonly()))
3082         return d
3083
3084
3085     def test_get_mutable_version(self):
3086         d = self.mdmf_node.get_best_mutable_version()
3087         d.addCallback(lambda bv:
3088             self.failIf(bv.is_readonly()))
3089         d.addCallback(lambda ignored:
3090             self.sdmf_node.get_best_mutable_version())
3091         d.addCallback(lambda bv:
3092             self.failIf(bv.is_readonly()))
3093         return d
3094
3095
3096     def test_toplevel_overwrite(self):
3097         new_data = MutableData("foo bar baz" * 100000)
3098         new_small_data = MutableData("foo bar baz" * 10)
3099         d = self.mdmf_node.overwrite(new_data)
3100         d.addCallback(lambda ignored:
3101             self.mdmf_node.download_best_version())
3102         d.addCallback(lambda data:
3103             self.failUnlessEqual(data, "foo bar baz" * 100000))
3104         d.addCallback(lambda ignored:
3105             self.sdmf_node.overwrite(new_small_data))
3106         d.addCallback(lambda ignored:
3107             self.sdmf_node.download_best_version())
3108         d.addCallback(lambda data:
3109             self.failUnlessEqual(data, "foo bar baz" * 10))
3110         return d
3111
3112
3113     def test_toplevel_modify(self):
3114         def modifier(old_contents, servermap, first_time):
3115             return old_contents + "modified"
3116         d = self.mdmf_node.modify(modifier)
3117         d.addCallback(lambda ignored:
3118             self.mdmf_node.download_best_version())
3119         d.addCallback(lambda data:
3120             self.failUnlessIn("modified", data))
3121         d.addCallback(lambda ignored:
3122             self.sdmf_node.modify(modifier))
3123         d.addCallback(lambda ignored:
3124             self.sdmf_node.download_best_version())
3125         d.addCallback(lambda data:
3126             self.failUnlessIn("modified", data))
3127         return d
3128
3129
3130     def test_version_modify(self):
3131         # TODO: When we can publish multiple versions, alter this test
3132         # to modify a version other than the best usable version, then
3133         # test to see that the best recoverable version is that.
3134         def modifier(old_contents, servermap, first_time):
3135             return old_contents + "modified"
3136         d = self.mdmf_node.modify(modifier)
3137         d.addCallback(lambda ignored:
3138             self.mdmf_node.download_best_version())
3139         d.addCallback(lambda data:
3140             self.failUnlessIn("modified", data))
3141         d.addCallback(lambda ignored:
3142             self.sdmf_node.modify(modifier))
3143         d.addCallback(lambda ignored:
3144             self.sdmf_node.download_best_version())
3145         d.addCallback(lambda data:
3146             self.failUnlessIn("modified", data))
3147         return d
3148
3149
3150     def test_download_version(self):
3151         d = self.publish_multiple()
3152         # We want to have two recoverable versions on the grid.
3153         d.addCallback(lambda res:
3154                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3155                                           1:1,3:1,5:1,7:1,9:1}))
3156         # Now try to download each version. We should get the plaintext
3157         # associated with that version.
3158         d.addCallback(lambda ignored:
3159             self._fn.get_servermap(mode=MODE_READ))
3160         def _got_servermap(smap):
3161             versions = smap.recoverable_versions()
3162             assert len(versions) == 2
3163
3164             self.servermap = smap
3165             self.version1, self.version2 = versions
3166             assert self.version1 != self.version2
3167
3168             self.version1_seqnum = self.version1[0]
3169             self.version2_seqnum = self.version2[0]
3170             self.version1_index = self.version1_seqnum - 1
3171             self.version2_index = self.version2_seqnum - 1
3172
3173         d.addCallback(_got_servermap)
3174         d.addCallback(lambda ignored:
3175             self._fn.download_version(self.servermap, self.version1))
3176         d.addCallback(lambda results:
3177             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3178                                  results))
3179         d.addCallback(lambda ignored:
3180             self._fn.download_version(self.servermap, self.version2))
3181         d.addCallback(lambda results:
3182             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3183                                  results))
3184         return d
3185
3186
3187     def test_download_nonexistent_version(self):
3188         d = self.mdmf_node.get_servermap(mode=MODE_WRITE)
3189         def _set_servermap(servermap):
3190             self.servermap = servermap
3191         d.addCallback(_set_servermap)
3192         d.addCallback(lambda ignored:
3193            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3194                            None,
3195                            self.mdmf_node.download_version, self.servermap,
3196                            "not a version"))
3197         return d
3198
3199
3200     def test_partial_read(self):
3201         # read only a few bytes at a time, and see that the results are
3202         # what we expect.
3203         d = self.mdmf_node.get_best_readable_version()
3204         def _read_data(version):
3205             c = consumer.MemoryConsumer()
3206             d2 = defer.succeed(None)
3207             for i in xrange(0, len(self.data), 10000):
3208                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3209             d2.addCallback(lambda ignored:
3210                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3211             return d2
3212         d.addCallback(_read_data)
3213         return d
3214
3215     def test_partial_read_starting_on_segment_boundary(self):
3216         d = self.mdmf_node.get_best_readable_version()
3217         c = consumer.MemoryConsumer()
3218         offset = mathutil.next_multiple(128 * 1024, 3)
3219         d.addCallback(lambda version:
3220             version.read(c, offset, 50))
3221         expected = self.data[offset:offset+50]
3222         d.addCallback(lambda ignored:
3223             self.failUnlessEqual(expected, "".join(c.chunks)))
3224         return d
3225
3226     def test_partial_read_ending_on_segment_boundary(self):
3227         d = self.mdmf_node.get_best_readable_version()
3228         c = consumer.MemoryConsumer()
3229         offset = mathutil.next_multiple(128 * 1024, 3)
3230         start = offset - 50
3231         d.addCallback(lambda version:
3232             version.read(c, start, 51))
3233         expected = self.data[offset-50:offset+1]
3234         d.addCallback(lambda ignored:
3235             self.failUnlessEqual(expected, "".join(c.chunks)))
3236         return d
3237
3238     def test_read(self):
3239         d = self.mdmf_node.get_best_readable_version()
3240         def _read_data(version):
3241             c = consumer.MemoryConsumer()
3242             d2 = defer.succeed(None)
3243             d2.addCallback(lambda ignored: version.read(c))
3244             d2.addCallback(lambda ignored:
3245                 self.failUnlessEqual("".join(c.chunks), self.data))
3246             return d2
3247         d.addCallback(_read_data)
3248         return d
3249
3250
3251     def test_download_best_version(self):
3252         d = self.mdmf_node.download_best_version()
3253         d.addCallback(lambda data:
3254             self.failUnlessEqual(data, self.data))
3255         d.addCallback(lambda ignored:
3256             self.sdmf_node.download_best_version())
3257         d.addCallback(lambda data:
3258             self.failUnlessEqual(data, self.small_data))
3259         return d
3260
3261
3262 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3263     timeout = 400 # these tests are too big, 120s is not enough on slow
3264                   # platforms
3265     def setUp(self):
3266         GridTestMixin.setUp(self)
3267         self.basedir = self.mktemp()
3268         self.set_up_grid()
3269         self.c = self.g.clients[0]
3270         self.nm = self.c.nodemaker
3271         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3272         self.small_data = "test data" * 10 # about 90 B; SDMF
3273         return self.do_upload()
3274
3275
3276     def do_upload(self):
3277         d1 = self.nm.create_mutable_file(MutableData(self.data),
3278                                          version=MDMF_VERSION)
3279         d2 = self.nm.create_mutable_file(MutableData(self.small_data))
3280         dl = gatherResults([d1, d2])
3281         def _then((n1, n2)):
3282             assert isinstance(n1, MutableFileNode)
3283             assert isinstance(n2, MutableFileNode)
3284
3285             self.mdmf_node = n1
3286             self.sdmf_node = n2
3287         dl.addCallback(_then)
3288         # Make SDMF and MDMF mutable file nodes that have 255 shares.
3289         def _make_max_shares(ign):
3290             self.nm.default_encoding_parameters['n'] = 255
3291             self.nm.default_encoding_parameters['k'] = 127
3292             d1 = self.nm.create_mutable_file(MutableData(self.data),
3293                                              version=MDMF_VERSION)
3294             d2 = \
3295                 self.nm.create_mutable_file(MutableData(self.small_data))
3296             return gatherResults([d1, d2])
3297         dl.addCallback(_make_max_shares)
3298         def _stash((n1, n2)):
3299             assert isinstance(n1, MutableFileNode)
3300             assert isinstance(n2, MutableFileNode)
3301
3302             self.mdmf_max_shares_node = n1
3303             self.sdmf_max_shares_node = n2
3304         dl.addCallback(_stash)
3305         return dl
3306
3307     def test_append(self):
3308         # We should be able to append data to the middle of a mutable
3309         # file and get what we expect.
3310         new_data = self.data + "appended"
3311         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3312             d = node.get_best_mutable_version()
3313             d.addCallback(lambda mv:
3314                 mv.update(MutableData("appended"), len(self.data)))
3315             d.addCallback(lambda ignored, node=node:
3316                 node.download_best_version())
3317             d.addCallback(lambda results:
3318                 self.failUnlessEqual(results, new_data))
3319         return d
3320
3321     def test_replace(self):
3322         # We should be able to replace data in the middle of a mutable
3323         # file and get what we expect back. 
3324         new_data = self.data[:100]
3325         new_data += "appended"
3326         new_data += self.data[108:]
3327         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3328             d = node.get_best_mutable_version()
3329             d.addCallback(lambda mv:
3330                 mv.update(MutableData("appended"), 100))
3331             d.addCallback(lambda ignored, node=node:
3332                 node.download_best_version())
3333             d.addCallback(lambda results:
3334                 self.failUnlessEqual(results, new_data))
3335         return d
3336
3337     def test_replace_beginning(self):
3338         # We should be able to replace data at the beginning of the file
3339         # without truncating the file
3340         B = "beginning"
3341         new_data = B + self.data[len(B):]
3342         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3343             d = node.get_best_mutable_version()
3344             d.addCallback(lambda mv: mv.update(MutableData(B), 0))
3345             d.addCallback(lambda ignored, node=node:
3346                 node.download_best_version())
3347             d.addCallback(lambda results: self.failUnlessEqual(results, new_data))
3348         return d
3349
3350     def test_replace_segstart1(self):
3351         offset = 128*1024+1
3352         new_data = "NNNN"
3353         expected = self.data[:offset]+new_data+self.data[offset+4:]
3354         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3355             d = node.get_best_mutable_version()
3356             d.addCallback(lambda mv:
3357                 mv.update(MutableData(new_data), offset))
3358             # close around node.
3359             d.addCallback(lambda ignored, node=node:
3360                 node.download_best_version())
3361             def _check(results):
3362                 if results != expected:
3363                     print
3364                     print "got: %s ... %s" % (results[:20], results[-20:])
3365                     print "exp: %s ... %s" % (expected[:20], expected[-20:])
3366                     self.fail("results != expected")
3367             d.addCallback(_check)
3368         return d
3369
3370     def _check_differences(self, got, expected):
3371         # displaying arbitrary file corruption is tricky for a
3372         # 1MB file of repeating data,, so look for likely places
3373         # with problems and display them separately
3374         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3375         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3376         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3377                     for (start,end) in gotmods]
3378         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3379                     for (start,end) in expmods]
3380         #print "expecting: %s" % expspans
3381
3382         SEGSIZE = 128*1024
3383         if got != expected:
3384             print "differences:"
3385             for segnum in range(len(expected)//SEGSIZE):
3386                 start = segnum * SEGSIZE
3387                 end = (segnum+1) * SEGSIZE
3388                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3389                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3390                 if got_ends != exp_ends:
3391                     print "expected[%d]: %s" % (start, exp_ends)
3392                     print "got     [%d]: %s" % (start, got_ends)
3393             if expspans != gotspans:
3394                 print "expected: %s" % expspans
3395                 print "got     : %s" % gotspans
3396             open("EXPECTED","wb").write(expected)
3397             open("GOT","wb").write(got)
3398             print "wrote data to EXPECTED and GOT"
3399             self.fail("didn't get expected data")
3400
3401
3402     def test_replace_locations(self):
3403         # exercise fencepost conditions
3404         expected = self.data
3405         SEGSIZE = 128*1024
3406         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3407         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3408         d = defer.succeed(None)
3409         for offset in suspects:
3410             new_data = letters.next()*2 # "AA", then "BB", etc
3411             expected = expected[:offset]+new_data+expected[offset+2:]
3412             d.addCallback(lambda ign:
3413                           self.mdmf_node.get_best_mutable_version())
3414             def _modify(mv, offset=offset, new_data=new_data):
3415                 # close over 'offset','new_data'
3416                 md = MutableData(new_data)
3417                 return mv.update(md, offset)
3418             d.addCallback(_modify)
3419             d.addCallback(lambda ignored:
3420                           self.mdmf_node.download_best_version())
3421             d.addCallback(self._check_differences, expected)
3422         return d
3423
3424     def test_replace_locations_max_shares(self):
3425         # exercise fencepost conditions
3426         expected = self.data
3427         SEGSIZE = 128*1024
3428         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3429         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3430         d = defer.succeed(None)
3431         for offset in suspects:
3432             new_data = letters.next()*2 # "AA", then "BB", etc
3433             expected = expected[:offset]+new_data+expected[offset+2:]
3434             d.addCallback(lambda ign:
3435                           self.mdmf_max_shares_node.get_best_mutable_version())
3436             def _modify(mv, offset=offset, new_data=new_data):
3437                 # close over 'offset','new_data'
3438                 md = MutableData(new_data)
3439                 return mv.update(md, offset)
3440             d.addCallback(_modify)
3441             d.addCallback(lambda ignored:
3442                           self.mdmf_max_shares_node.download_best_version())
3443             d.addCallback(self._check_differences, expected)
3444         return d
3445
3446     def test_replace_and_extend(self):
3447         # We should be able to replace data in the middle of a mutable
3448         # file and extend that mutable file and get what we expect.
3449         new_data = self.data[:100]
3450         new_data += "modified " * 100000
3451         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3452             d = node.get_best_mutable_version()
3453             d.addCallback(lambda mv:
3454                 mv.update(MutableData("modified " * 100000), 100))
3455             d.addCallback(lambda ignored, node=node:
3456                 node.download_best_version())
3457             d.addCallback(lambda results:
3458                 self.failUnlessEqual(results, new_data))
3459         return d
3460
3461
3462     def test_append_power_of_two(self):
3463         # If we attempt to extend a mutable file so that its segment
3464         # count crosses a power-of-two boundary, the update operation
3465         # should know how to reencode the file.
3466
3467         # Note that the data populating self.mdmf_node is about 900 KiB
3468         # long -- this is 7 segments in the default segment size. So we
3469         # need to add 2 segments worth of data to push it over a
3470         # power-of-two boundary.
3471         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3472         new_data = self.data + (segment * 2)
3473         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3474             d = node.get_best_mutable_version()
3475             d.addCallback(lambda mv:
3476                 mv.update(MutableData(segment * 2), len(self.data)))
3477             d.addCallback(lambda ignored, node=node:
3478                 node.download_best_version())
3479             d.addCallback(lambda results:
3480                 self.failUnlessEqual(results, new_data))
3481         return d
3482
3483
3484     def test_update_sdmf(self):
3485         # Running update on a single-segment file should still work.
3486         new_data = self.small_data + "appended"
3487         for node in (self.sdmf_node, self.sdmf_max_shares_node):
3488             d = node.get_best_mutable_version()
3489             d.addCallback(lambda mv:
3490                 mv.update(MutableData("appended"), len(self.small_data)))
3491             d.addCallback(lambda ignored, node=node:
3492                 node.download_best_version())
3493             d.addCallback(lambda results:
3494                 self.failUnlessEqual(results, new_data))
3495         return d
3496
3497     def test_replace_in_last_segment(self):
3498         # The wrapper should know how to handle the tail segment
3499         # appropriately.
3500         replace_offset = len(self.data) - 100
3501         new_data = self.data[:replace_offset] + "replaced"
3502         rest_offset = replace_offset + len("replaced")
3503         new_data += self.data[rest_offset:]
3504         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3505             d = node.get_best_mutable_version()
3506             d.addCallback(lambda mv:
3507                 mv.update(MutableData("replaced"), replace_offset))
3508             d.addCallback(lambda ignored, node=node:
3509                 node.download_best_version())
3510             d.addCallback(lambda results:
3511                 self.failUnlessEqual(results, new_data))
3512         return d
3513
3514
3515     def test_multiple_segment_replace(self):
3516         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3517         new_data = self.data[:replace_offset]
3518         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3519         new_data += 2 * new_segment
3520         new_data += "replaced"
3521         rest_offset = len(new_data)
3522         new_data += self.data[rest_offset:]
3523         for node in (self.mdmf_node, self.mdmf_max_shares_node):
3524             d = node.get_best_mutable_version()
3525             d.addCallback(lambda mv:
3526                 mv.update(MutableData((2 * new_segment) + "replaced"),
3527                           replace_offset))
3528             d.addCallback(lambda ignored, node=node:
3529                 node.download_best_version())
3530             d.addCallback(lambda results:
3531                 self.failUnlessEqual(results, new_data))
3532         return d
3533
3534 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3535     sdmf_old_shares = {}
3536     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3537     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3538     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3539     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3540     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3541     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3542     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3543     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3544     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3545     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3546     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3547     sdmf_old_contents = "This is a test file.\n"
3548     def copy_sdmf_shares(self):
3549         # We'll basically be short-circuiting the upload process.
3550         servernums = self.g.servers_by_number.keys()
3551         assert len(servernums) == 10
3552
3553         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3554         # Get the storage index.
3555         cap = uri.from_string(self.sdmf_old_cap)
3556         si = cap.get_storage_index()
3557
3558         # Now execute each assignment by writing the storage.
3559         for (share, servernum) in assignments:
3560             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3561             storedir = self.get_serverdir(servernum)
3562             storage_path = os.path.join(storedir, "shares",
3563                                         storage_index_to_dir(si))
3564             fileutil.make_dirs(storage_path)
3565             fileutil.write(os.path.join(storage_path, "%d" % share),
3566                            sharedata)
3567         # ...and verify that the shares are there.
3568         shares = self.find_uri_shares(self.sdmf_old_cap)
3569         assert len(shares) == 10
3570
3571     def test_new_downloader_can_read_old_shares(self):
3572         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3573         self.set_up_grid()
3574         self.copy_sdmf_shares()
3575         nm = self.g.clients[0].nodemaker
3576         n = nm.create_from_cap(self.sdmf_old_cap)
3577         d = n.download_best_version()
3578         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3579         return d