]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
147f7de5adf383224ff525d0a566e8df5b66228f
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12      ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23 from allmydata.scripts import debug
24
25 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
26 from allmydata.mutable.common import ResponseCache, \
27      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
28      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
29      NotEnoughServersError, CorruptShareError
30 from allmydata.mutable.retrieve import Retrieve
31 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32                                       MutableData, \
33                                       DEFAULT_MAX_SEGMENT_SIZE
34 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
35 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
36 from allmydata.mutable.repairer import MustForceRepairError
37
38 import allmydata.test.common_util as testutil
39 from allmydata.test.common import TEST_RSA_KEY_SIZE
40
41
42 # this "FakeStorage" exists to put the share data in RAM and avoid using real
43 # network connections, both to speed up the tests and to reduce the amount of
44 # non-mutable.py code being exercised.
45
46 class FakeStorage:
47     # this class replaces the collection of storage servers, allowing the
48     # tests to examine and manipulate the published shares. It also lets us
49     # control the order in which read queries are answered, to exercise more
50     # of the error-handling code in Retrieve .
51     #
52     # Note that we ignore the storage index: this FakeStorage instance can
53     # only be used for a single storage index.
54
55
56     def __init__(self):
57         self._peers = {}
58         # _sequence is used to cause the responses to occur in a specific
59         # order. If it is in use, then we will defer queries instead of
60         # answering them right away, accumulating the Deferreds in a dict. We
61         # don't know exactly how many queries we'll get, so exactly one
62         # second after the first query arrives, we will release them all (in
63         # order).
64         self._sequence = None
65         self._pending = {}
66         self._pending_timer = None
67
68     def read(self, peerid, storage_index):
69         shares = self._peers.get(peerid, {})
70         if self._sequence is None:
71             return defer.succeed(shares)
72         d = defer.Deferred()
73         if not self._pending:
74             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
75         self._pending[peerid] = (d, shares)
76         return d
77
78     def _fire_readers(self):
79         self._pending_timer = None
80         pending = self._pending
81         self._pending = {}
82         for peerid in self._sequence:
83             if peerid in pending:
84                 d, shares = pending.pop(peerid)
85                 eventually(d.callback, shares)
86         for (d, shares) in pending.values():
87             eventually(d.callback, shares)
88
89     def write(self, peerid, storage_index, shnum, offset, data):
90         if peerid not in self._peers:
91             self._peers[peerid] = {}
92         shares = self._peers[peerid]
93         f = StringIO()
94         f.write(shares.get(shnum, ""))
95         f.seek(offset)
96         f.write(data)
97         shares[shnum] = f.getvalue()
98
99
100 class FakeStorageServer:
101     def __init__(self, peerid, storage):
102         self.peerid = peerid
103         self.storage = storage
104         self.queries = 0
105     def callRemote(self, methname, *args, **kwargs):
106         self.queries += 1
107         def _call():
108             meth = getattr(self, methname)
109             return meth(*args, **kwargs)
110         d = fireEventually()
111         d.addCallback(lambda res: _call())
112         return d
113
114     def callRemoteOnly(self, methname, *args, **kwargs):
115         self.queries += 1
116         d = self.callRemote(methname, *args, **kwargs)
117         d.addBoth(lambda ignore: None)
118         pass
119
120     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
121         pass
122
123     def slot_readv(self, storage_index, shnums, readv):
124         d = self.storage.read(self.peerid, storage_index)
125         def _read(shares):
126             response = {}
127             for shnum in shares:
128                 if shnums and shnum not in shnums:
129                     continue
130                 vector = response[shnum] = []
131                 for (offset, length) in readv:
132                     assert isinstance(offset, (int, long)), offset
133                     assert isinstance(length, (int, long)), length
134                     vector.append(shares[shnum][offset:offset+length])
135             return response
136         d.addCallback(_read)
137         return d
138
139     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
140                                         tw_vectors, read_vector):
141         # always-pass: parrot the test vectors back to them.
142         readv = {}
143         for shnum, (testv, writev, new_length) in tw_vectors.items():
144             for (offset, length, op, specimen) in testv:
145                 assert op in ("le", "eq", "ge")
146             # TODO: this isn't right, the read is controlled by read_vector,
147             # not by testv
148             readv[shnum] = [ specimen
149                              for (offset, length, op, specimen)
150                              in testv ]
151             for (offset, data) in writev:
152                 self.storage.write(self.peerid, storage_index, shnum,
153                                    offset, data)
154         answer = (True, readv)
155         return fireEventually(answer)
156
157
158 def flip_bit(original, byte_offset):
159     return (original[:byte_offset] +
160             chr(ord(original[byte_offset]) ^ 0x01) +
161             original[byte_offset+1:])
162
163 def add_two(original, byte_offset):
164     # It isn't enough to simply flip the bit for the version number,
165     # because 1 is a valid version number. So we add two instead.
166     return (original[:byte_offset] +
167             chr(ord(original[byte_offset]) ^ 0x02) +
168             original[byte_offset+1:])
169
170 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
171     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
172     # list of shnums to corrupt.
173     ds = []
174     for peerid in s._peers:
175         shares = s._peers[peerid]
176         for shnum in shares:
177             if (shnums_to_corrupt is not None
178                 and shnum not in shnums_to_corrupt):
179                 continue
180             data = shares[shnum]
181             # We're feeding the reader all of the share data, so it
182             # won't need to use the rref that we didn't provide, nor the
183             # storage index that we didn't provide. We do this because
184             # the reader will work for both MDMF and SDMF.
185             reader = MDMFSlotReadProxy(None, None, shnum, data)
186             # We need to get the offsets for the next part.
187             d = reader.get_verinfo()
188             def _do_corruption(verinfo, data, shnum):
189                 (seqnum,
190                  root_hash,
191                  IV,
192                  segsize,
193                  datalen,
194                  k, n, prefix, o) = verinfo
195                 if isinstance(offset, tuple):
196                     offset1, offset2 = offset
197                 else:
198                     offset1 = offset
199                     offset2 = 0
200                 if offset1 == "pubkey" and IV:
201                     real_offset = 107
202                 elif offset1 in o:
203                     real_offset = o[offset1]
204                 else:
205                     real_offset = offset1
206                 real_offset = int(real_offset) + offset2 + offset_offset
207                 assert isinstance(real_offset, int), offset
208                 if offset1 == 0: # verbyte
209                     f = add_two
210                 else:
211                     f = flip_bit
212                 shares[shnum] = f(data, real_offset)
213             d.addCallback(_do_corruption, data, shnum)
214             ds.append(d)
215     dl = defer.DeferredList(ds)
216     dl.addCallback(lambda ignored: res)
217     return dl
218
219 def make_storagebroker(s=None, num_peers=10):
220     if not s:
221         s = FakeStorage()
222     peerids = [tagged_hash("peerid", "%d" % i)[:20]
223                for i in range(num_peers)]
224     storage_broker = StorageFarmBroker(None, True)
225     for peerid in peerids:
226         fss = FakeStorageServer(peerid, s)
227         storage_broker.test_add_rref(peerid, fss)
228     return storage_broker
229
230 def make_nodemaker(s=None, num_peers=10):
231     storage_broker = make_storagebroker(s, num_peers)
232     sh = client.SecretHolder("lease secret", "convergence secret")
233     keygen = client.KeyGenerator()
234     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
235     nodemaker = NodeMaker(storage_broker, sh, None,
236                           None, None,
237                           {"k": 3, "n": 10}, keygen)
238     return nodemaker
239
240 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
241     # this used to be in Publish, but we removed the limit. Some of
242     # these tests test whether the new code correctly allows files
243     # larger than the limit.
244     OLD_MAX_SEGMENT_SIZE = 3500000
245     def setUp(self):
246         self._storage = s = FakeStorage()
247         self.nodemaker = make_nodemaker(s)
248
249     def test_create(self):
250         d = self.nodemaker.create_mutable_file()
251         def _created(n):
252             self.failUnless(isinstance(n, MutableFileNode))
253             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
254             sb = self.nodemaker.storage_broker
255             peer0 = sorted(sb.get_all_serverids())[0]
256             shnums = self._storage._peers[peer0].keys()
257             self.failUnlessEqual(len(shnums), 1)
258         d.addCallback(_created)
259         return d
260
261
262     def test_create_mdmf(self):
263         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
264         def _created(n):
265             self.failUnless(isinstance(n, MutableFileNode))
266             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267             sb = self.nodemaker.storage_broker
268             peer0 = sorted(sb.get_all_serverids())[0]
269             shnums = self._storage._peers[peer0].keys()
270             self.failUnlessEqual(len(shnums), 1)
271         d.addCallback(_created)
272         return d
273
274     def test_single_share(self):
275         # Make sure that we tolerate publishing a single share.
276         self.nodemaker.default_encoding_parameters['k'] = 1
277         self.nodemaker.default_encoding_parameters['happy'] = 1
278         self.nodemaker.default_encoding_parameters['n'] = 1
279         d = defer.succeed(None)
280         for v in (SDMF_VERSION, MDMF_VERSION):
281             d.addCallback(lambda ignored:
282                 self.nodemaker.create_mutable_file(version=v))
283             def _created(n):
284                 self.failUnless(isinstance(n, MutableFileNode))
285                 self._node = n
286                 return n
287             d.addCallback(_created)
288             d.addCallback(lambda n:
289                 n.overwrite(MutableData("Contents" * 50000)))
290             d.addCallback(lambda ignored:
291                 self._node.download_best_version())
292             d.addCallback(lambda contents:
293                 self.failUnlessEqual(contents, "Contents" * 50000))
294         return d
295
296     def test_max_shares(self):
297         self.nodemaker.default_encoding_parameters['n'] = 255
298         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
299         def _created(n):
300             self.failUnless(isinstance(n, MutableFileNode))
301             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
302             sb = self.nodemaker.storage_broker
303             num_shares = sum([len(self._storage._peers[x].keys()) for x \
304                               in sb.get_all_serverids()])
305             self.failUnlessEqual(num_shares, 255)
306             self._node = n
307             return n
308         d.addCallback(_created)
309         # Now we upload some contents
310         d.addCallback(lambda n:
311             n.overwrite(MutableData("contents" * 50000)))
312         # ...then download contents
313         d.addCallback(lambda ignored:
314             self._node.download_best_version())
315         # ...and check to make sure everything went okay.
316         d.addCallback(lambda contents:
317             self.failUnlessEqual("contents" * 50000, contents))
318         return d
319
320     def test_max_shares_mdmf(self):
321         # Test how files behave when there are 255 shares.
322         self.nodemaker.default_encoding_parameters['n'] = 255
323         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
324         def _created(n):
325             self.failUnless(isinstance(n, MutableFileNode))
326             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
327             sb = self.nodemaker.storage_broker
328             num_shares = sum([len(self._storage._peers[x].keys()) for x \
329                               in sb.get_all_serverids()])
330             self.failUnlessEqual(num_shares, 255)
331             self._node = n
332             return n
333         d.addCallback(_created)
334         d.addCallback(lambda n:
335             n.overwrite(MutableData("contents" * 50000)))
336         d.addCallback(lambda ignored:
337             self._node.download_best_version())
338         d.addCallback(lambda contents:
339             self.failUnlessEqual(contents, "contents" * 50000))
340         return d
341
342     def test_mdmf_filenode_cap(self):
343         # Test that an MDMF filenode, once created, returns an MDMF URI.
344         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
345         def _created(n):
346             self.failUnless(isinstance(n, MutableFileNode))
347             cap = n.get_cap()
348             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
349             rcap = n.get_readcap()
350             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
351             vcap = n.get_verify_cap()
352             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
353         d.addCallback(_created)
354         return d
355
356
357     def test_create_from_mdmf_writecap(self):
358         # Test that the nodemaker is capable of creating an MDMF
359         # filenode given an MDMF cap.
360         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
361         def _created(n):
362             self.failUnless(isinstance(n, MutableFileNode))
363             s = n.get_uri()
364             self.failUnless(s.startswith("URI:MDMF"))
365             n2 = self.nodemaker.create_from_cap(s)
366             self.failUnless(isinstance(n2, MutableFileNode))
367             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
368             self.failUnlessEqual(n.get_uri(), n2.get_uri())
369         d.addCallback(_created)
370         return d
371
372
373     def test_create_from_mdmf_writecap_with_extensions(self):
374         # Test that the nodemaker is capable of creating an MDMF
375         # filenode when given a writecap with extension parameters in
376         # them.
377         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
378         def _created(n):
379             self.failUnless(isinstance(n, MutableFileNode))
380             s = n.get_uri()
381             # We need to cheat a little and delete the nodemaker's
382             # cache, otherwise we'll get the same node instance back.
383             self.failUnlessIn(":3:131073", s)
384             n2 = self.nodemaker.create_from_cap(s)
385
386             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
387             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
388             hints = n2._downloader_hints
389             self.failUnlessEqual(hints['k'], 3)
390             self.failUnlessEqual(hints['segsize'], 131073)
391         d.addCallback(_created)
392         return d
393
394
395     def test_create_from_mdmf_readcap(self):
396         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
397         def _created(n):
398             self.failUnless(isinstance(n, MutableFileNode))
399             s = n.get_readonly_uri()
400             n2 = self.nodemaker.create_from_cap(s)
401             self.failUnless(isinstance(n2, MutableFileNode))
402
403             # Check that it's a readonly node
404             self.failUnless(n2.is_readonly())
405         d.addCallback(_created)
406         return d
407
408
409     def test_create_from_mdmf_readcap_with_extensions(self):
410         # We should be able to create an MDMF filenode with the
411         # extension parameters without it breaking.
412         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
413         def _created(n):
414             self.failUnless(isinstance(n, MutableFileNode))
415             s = n.get_readonly_uri()
416             self.failUnlessIn(":3:131073", s)
417
418             n2 = self.nodemaker.create_from_cap(s)
419             self.failUnless(isinstance(n2, MutableFileNode))
420             self.failUnless(n2.is_readonly())
421             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
422             hints = n2._downloader_hints
423             self.failUnlessEqual(hints["k"], 3)
424             self.failUnlessEqual(hints["segsize"], 131073)
425         d.addCallback(_created)
426         return d
427
428
429     def test_internal_version_from_cap(self):
430         # MutableFileNodes and MutableFileVersions have an internal
431         # switch that tells them whether they're dealing with an SDMF or
432         # MDMF mutable file when they start doing stuff. We want to make
433         # sure that this is set appropriately given an MDMF cap.
434         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
435         def _created(n):
436             self.uri = n.get_uri()
437             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
438
439             n2 = self.nodemaker.create_from_cap(self.uri)
440             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
441         d.addCallback(_created)
442         return d
443
444
445     def test_serialize(self):
446         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
447         calls = []
448         def _callback(*args, **kwargs):
449             self.failUnlessEqual(args, (4,) )
450             self.failUnlessEqual(kwargs, {"foo": 5})
451             calls.append(1)
452             return 6
453         d = n._do_serialized(_callback, 4, foo=5)
454         def _check_callback(res):
455             self.failUnlessEqual(res, 6)
456             self.failUnlessEqual(calls, [1])
457         d.addCallback(_check_callback)
458
459         def _errback():
460             raise ValueError("heya")
461         d.addCallback(lambda res:
462                       self.shouldFail(ValueError, "_check_errback", "heya",
463                                       n._do_serialized, _errback))
464         return d
465
466     def test_upload_and_download(self):
467         d = self.nodemaker.create_mutable_file()
468         def _created(n):
469             d = defer.succeed(None)
470             d.addCallback(lambda res: n.get_servermap(MODE_READ))
471             d.addCallback(lambda smap: smap.dump(StringIO()))
472             d.addCallback(lambda sio:
473                           self.failUnless("3-of-10" in sio.getvalue()))
474             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
475             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
476             d.addCallback(lambda res: n.download_best_version())
477             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
478             d.addCallback(lambda res: n.get_size_of_best_version())
479             d.addCallback(lambda size:
480                           self.failUnlessEqual(size, len("contents 1")))
481             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
482             d.addCallback(lambda res: n.download_best_version())
483             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
484             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
485             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
486             d.addCallback(lambda res: n.download_best_version())
487             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
488             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
489             d.addCallback(lambda smap:
490                           n.download_version(smap,
491                                              smap.best_recoverable_version()))
492             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493             # test a file that is large enough to overcome the
494             # mapupdate-to-retrieve data caching (i.e. make the shares larger
495             # than the default readsize, which is 2000 bytes). A 15kB file
496             # will have 5kB shares.
497             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
498             d.addCallback(lambda res: n.download_best_version())
499             d.addCallback(lambda res:
500                           self.failUnlessEqual(res, "large size file" * 1000))
501             return d
502         d.addCallback(_created)
503         return d
504
505
506     def test_upload_and_download_mdmf(self):
507         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
508         def _created(n):
509             d = defer.succeed(None)
510             d.addCallback(lambda ignored:
511                 n.get_servermap(MODE_READ))
512             def _then(servermap):
513                 dumped = servermap.dump(StringIO())
514                 self.failUnlessIn("3-of-10", dumped.getvalue())
515             d.addCallback(_then)
516             # Now overwrite the contents with some new contents. We want 
517             # to make them big enough to force the file to be uploaded
518             # in more than one segment.
519             big_contents = "contents1" * 100000 # about 900 KiB
520             big_contents_uploadable = MutableData(big_contents)
521             d.addCallback(lambda ignored:
522                 n.overwrite(big_contents_uploadable))
523             d.addCallback(lambda ignored:
524                 n.download_best_version())
525             d.addCallback(lambda data:
526                 self.failUnlessEqual(data, big_contents))
527             # Overwrite the contents again with some new contents. As
528             # before, they need to be big enough to force multiple
529             # segments, so that we make the downloader deal with
530             # multiple segments.
531             bigger_contents = "contents2" * 1000000 # about 9MiB 
532             bigger_contents_uploadable = MutableData(bigger_contents)
533             d.addCallback(lambda ignored:
534                 n.overwrite(bigger_contents_uploadable))
535             d.addCallback(lambda ignored:
536                 n.download_best_version())
537             d.addCallback(lambda data:
538                 self.failUnlessEqual(data, bigger_contents))
539             return d
540         d.addCallback(_created)
541         return d
542
543
544     def test_retrieve_pause(self):
545         # We should make sure that the retriever is able to pause
546         # correctly.
547         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
548         def _created(node):
549             self.node = node
550
551             return node.overwrite(MutableData("contents1" * 100000))
552         d.addCallback(_created)
553         # Now we'll retrieve it into a pausing consumer.
554         d.addCallback(lambda ignored:
555             self.node.get_best_mutable_version())
556         def _got_version(version):
557             self.c = PausingConsumer()
558             return version.read(self.c)
559         d.addCallback(_got_version)
560         d.addCallback(lambda ignored:
561             self.failUnlessEqual(self.c.data, "contents1" * 100000))
562         return d
563
564
565     def test_download_from_mdmf_cap(self):
566         # We should be able to download an MDMF file given its cap
567         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
568         def _created(node):
569             self.uri = node.get_uri()
570
571             return node.overwrite(MutableData("contents1" * 100000))
572         def _then(ignored):
573             node = self.nodemaker.create_from_cap(self.uri)
574             return node.download_best_version()
575         def _downloaded(data):
576             self.failUnlessEqual(data, "contents1" * 100000)
577         d.addCallback(_created)
578         d.addCallback(_then)
579         d.addCallback(_downloaded)
580         return d
581
582
583     def test_create_and_download_from_bare_mdmf_cap(self):
584         # MDMF caps have extension parameters on them by default. We
585         # need to make sure that they work without extension parameters.
586         contents = MutableData("contents" * 100000)
587         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
588                                                contents=contents)
589         def _created(node):
590             uri = node.get_uri()
591             self._created = node
592             self.failUnlessIn(":3:131073", uri)
593             # Now strip that off the end of the uri, then try creating
594             # and downloading the node again.
595             bare_uri = uri.replace(":3:131073", "")
596             assert ":3:131073" not in bare_uri
597
598             return self.nodemaker.create_from_cap(bare_uri)
599         d.addCallback(_created)
600         def _created_bare(node):
601             self.failUnlessEqual(node.get_writekey(),
602                                  self._created.get_writekey())
603             self.failUnlessEqual(node.get_readkey(),
604                                  self._created.get_readkey())
605             self.failUnlessEqual(node.get_storage_index(),
606                                  self._created.get_storage_index())
607             return node.download_best_version()
608         d.addCallback(_created_bare)
609         d.addCallback(lambda data:
610             self.failUnlessEqual(data, "contents" * 100000))
611         return d
612
613
614     def test_mdmf_write_count(self):
615         # Publishing an MDMF file should only cause one write for each
616         # share that is to be published. Otherwise, we introduce
617         # undesirable semantics that are a regression from SDMF
618         upload = MutableData("MDMF" * 100000) # about 400 KiB
619         d = self.nodemaker.create_mutable_file(upload,
620                                                version=MDMF_VERSION)
621         def _check_server_write_counts(ignored):
622             sb = self.nodemaker.storage_broker
623             for server in sb.servers.itervalues():
624                 self.failUnlessEqual(server.get_rref().queries, 1)
625         d.addCallback(_check_server_write_counts)
626         return d
627
628
629     def test_create_with_initial_contents(self):
630         upload1 = MutableData("contents 1")
631         d = self.nodemaker.create_mutable_file(upload1)
632         def _created(n):
633             d = n.download_best_version()
634             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
635             upload2 = MutableData("contents 2")
636             d.addCallback(lambda res: n.overwrite(upload2))
637             d.addCallback(lambda res: n.download_best_version())
638             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
639             return d
640         d.addCallback(_created)
641         return d
642
643
644     def test_create_mdmf_with_initial_contents(self):
645         initial_contents = "foobarbaz" * 131072 # 900KiB
646         initial_contents_uploadable = MutableData(initial_contents)
647         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
648                                                version=MDMF_VERSION)
649         def _created(n):
650             d = n.download_best_version()
651             d.addCallback(lambda data:
652                 self.failUnlessEqual(data, initial_contents))
653             uploadable2 = MutableData(initial_contents + "foobarbaz")
654             d.addCallback(lambda ignored:
655                 n.overwrite(uploadable2))
656             d.addCallback(lambda ignored:
657                 n.download_best_version())
658             d.addCallback(lambda data:
659                 self.failUnlessEqual(data, initial_contents +
660                                            "foobarbaz"))
661             return d
662         d.addCallback(_created)
663         return d
664
665
666     def test_response_cache_memory_leak(self):
667         d = self.nodemaker.create_mutable_file("contents")
668         def _created(n):
669             d = n.download_best_version()
670             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
671             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
672
673             def _check_cache(expected):
674                 # The total size of cache entries should not increase on the second download;
675                 # in fact the cache contents should be identical.
676                 d2 = n.download_best_version()
677                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
678                 return d2
679             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
680             return d
681         d.addCallback(_created)
682         return d
683
684     def test_create_with_initial_contents_function(self):
685         data = "initial contents"
686         def _make_contents(n):
687             self.failUnless(isinstance(n, MutableFileNode))
688             key = n.get_writekey()
689             self.failUnless(isinstance(key, str), key)
690             self.failUnlessEqual(len(key), 16) # AES key size
691             return MutableData(data)
692         d = self.nodemaker.create_mutable_file(_make_contents)
693         def _created(n):
694             return n.download_best_version()
695         d.addCallback(_created)
696         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
697         return d
698
699
700     def test_create_mdmf_with_initial_contents_function(self):
701         data = "initial contents" * 100000
702         def _make_contents(n):
703             self.failUnless(isinstance(n, MutableFileNode))
704             key = n.get_writekey()
705             self.failUnless(isinstance(key, str), key)
706             self.failUnlessEqual(len(key), 16)
707             return MutableData(data)
708         d = self.nodemaker.create_mutable_file(_make_contents,
709                                                version=MDMF_VERSION)
710         d.addCallback(lambda n:
711             n.download_best_version())
712         d.addCallback(lambda data2:
713             self.failUnlessEqual(data2, data))
714         return d
715
716
717     def test_create_with_too_large_contents(self):
718         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
719         BIG_uploadable = MutableData(BIG)
720         d = self.nodemaker.create_mutable_file(BIG_uploadable)
721         def _created(n):
722             other_BIG_uploadable = MutableData(BIG)
723             d = n.overwrite(other_BIG_uploadable)
724             return d
725         d.addCallback(_created)
726         return d
727
728     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
729         d = n.get_servermap(MODE_READ)
730         d.addCallback(lambda servermap: servermap.best_recoverable_version())
731         d.addCallback(lambda verinfo:
732                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
733         return d
734
735     def test_modify(self):
736         def _modifier(old_contents, servermap, first_time):
737             new_contents = old_contents + "line2"
738             return new_contents
739         def _non_modifier(old_contents, servermap, first_time):
740             return old_contents
741         def _none_modifier(old_contents, servermap, first_time):
742             return None
743         def _error_modifier(old_contents, servermap, first_time):
744             raise ValueError("oops")
745         def _toobig_modifier(old_contents, servermap, first_time):
746             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
747             return new_content
748         calls = []
749         def _ucw_error_modifier(old_contents, servermap, first_time):
750             # simulate an UncoordinatedWriteError once
751             calls.append(1)
752             if len(calls) <= 1:
753                 raise UncoordinatedWriteError("simulated")
754             new_contents = old_contents + "line3"
755             return new_contents
756         def _ucw_error_non_modifier(old_contents, servermap, first_time):
757             # simulate an UncoordinatedWriteError once, and don't actually
758             # modify the contents on subsequent invocations
759             calls.append(1)
760             if len(calls) <= 1:
761                 raise UncoordinatedWriteError("simulated")
762             return old_contents
763
764         initial_contents = "line1"
765         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
766         def _created(n):
767             d = n.modify(_modifier)
768             d.addCallback(lambda res: n.download_best_version())
769             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
770             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
771
772             d.addCallback(lambda res: n.modify(_non_modifier))
773             d.addCallback(lambda res: n.download_best_version())
774             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
775             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
776
777             d.addCallback(lambda res: n.modify(_none_modifier))
778             d.addCallback(lambda res: n.download_best_version())
779             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
780             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
781
782             d.addCallback(lambda res:
783                           self.shouldFail(ValueError, "error_modifier", None,
784                                           n.modify, _error_modifier))
785             d.addCallback(lambda res: n.download_best_version())
786             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
787             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
788
789
790             d.addCallback(lambda res: n.download_best_version())
791             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
792             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
793
794             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
795             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
796             d.addCallback(lambda res: n.download_best_version())
797             d.addCallback(lambda res: self.failUnlessEqual(res,
798                                                            "line1line2line3"))
799             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
800
801             def _reset_ucw_error_modifier(res):
802                 calls[:] = []
803                 return res
804             d.addCallback(_reset_ucw_error_modifier)
805
806             # in practice, this n.modify call should publish twice: the first
807             # one gets a UCWE, the second does not. But our test jig (in
808             # which the modifier raises the UCWE) skips over the first one,
809             # so in this test there will be only one publish, and the seqnum
810             # will only be one larger than the previous test, not two (i.e. 4
811             # instead of 5).
812             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
813             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
814             d.addCallback(lambda res: n.download_best_version())
815             d.addCallback(lambda res: self.failUnlessEqual(res,
816                                                            "line1line2line3"))
817             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
818             d.addCallback(lambda res: n.modify(_toobig_modifier))
819             return d
820         d.addCallback(_created)
821         return d
822
823
824     def test_modify_backoffer(self):
825         def _modifier(old_contents, servermap, first_time):
826             return old_contents + "line2"
827         calls = []
828         def _ucw_error_modifier(old_contents, servermap, first_time):
829             # simulate an UncoordinatedWriteError once
830             calls.append(1)
831             if len(calls) <= 1:
832                 raise UncoordinatedWriteError("simulated")
833             return old_contents + "line3"
834         def _always_ucw_error_modifier(old_contents, servermap, first_time):
835             raise UncoordinatedWriteError("simulated")
836         def _backoff_stopper(node, f):
837             return f
838         def _backoff_pauser(node, f):
839             d = defer.Deferred()
840             reactor.callLater(0.5, d.callback, None)
841             return d
842
843         # the give-up-er will hit its maximum retry count quickly
844         giveuper = BackoffAgent()
845         giveuper._delay = 0.1
846         giveuper.factor = 1
847
848         d = self.nodemaker.create_mutable_file(MutableData("line1"))
849         def _created(n):
850             d = n.modify(_modifier)
851             d.addCallback(lambda res: n.download_best_version())
852             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
853             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
854
855             d.addCallback(lambda res:
856                           self.shouldFail(UncoordinatedWriteError,
857                                           "_backoff_stopper", None,
858                                           n.modify, _ucw_error_modifier,
859                                           _backoff_stopper))
860             d.addCallback(lambda res: n.download_best_version())
861             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
862             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
863
864             def _reset_ucw_error_modifier(res):
865                 calls[:] = []
866                 return res
867             d.addCallback(_reset_ucw_error_modifier)
868             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
869                                                _backoff_pauser))
870             d.addCallback(lambda res: n.download_best_version())
871             d.addCallback(lambda res: self.failUnlessEqual(res,
872                                                            "line1line2line3"))
873             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
874
875             d.addCallback(lambda res:
876                           self.shouldFail(UncoordinatedWriteError,
877                                           "giveuper", None,
878                                           n.modify, _always_ucw_error_modifier,
879                                           giveuper.delay))
880             d.addCallback(lambda res: n.download_best_version())
881             d.addCallback(lambda res: self.failUnlessEqual(res,
882                                                            "line1line2line3"))
883             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
884
885             return d
886         d.addCallback(_created)
887         return d
888
889     def test_upload_and_download_full_size_keys(self):
890         self.nodemaker.key_generator = client.KeyGenerator()
891         d = self.nodemaker.create_mutable_file()
892         def _created(n):
893             d = defer.succeed(None)
894             d.addCallback(lambda res: n.get_servermap(MODE_READ))
895             d.addCallback(lambda smap: smap.dump(StringIO()))
896             d.addCallback(lambda sio:
897                           self.failUnless("3-of-10" in sio.getvalue()))
898             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
899             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
900             d.addCallback(lambda res: n.download_best_version())
901             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
902             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
903             d.addCallback(lambda res: n.download_best_version())
904             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
905             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
906             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
907             d.addCallback(lambda res: n.download_best_version())
908             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
909             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
910             d.addCallback(lambda smap:
911                           n.download_version(smap,
912                                              smap.best_recoverable_version()))
913             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
914             return d
915         d.addCallback(_created)
916         return d
917
918
919     def test_size_after_servermap_update(self):
920         # a mutable file node should have something to say about how big
921         # it is after a servermap update is performed, since this tells
922         # us how large the best version of that mutable file is.
923         d = self.nodemaker.create_mutable_file()
924         def _created(n):
925             self.n = n
926             return n.get_servermap(MODE_READ)
927         d.addCallback(_created)
928         d.addCallback(lambda ignored:
929             self.failUnlessEqual(self.n.get_size(), 0))
930         d.addCallback(lambda ignored:
931             self.n.overwrite(MutableData("foobarbaz")))
932         d.addCallback(lambda ignored:
933             self.failUnlessEqual(self.n.get_size(), 9))
934         d.addCallback(lambda ignored:
935             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
936         d.addCallback(_created)
937         d.addCallback(lambda ignored:
938             self.failUnlessEqual(self.n.get_size(), 9))
939         return d
940
941
942 class PublishMixin:
943     def publish_one(self):
944         # publish a file and create shares, which can then be manipulated
945         # later.
946         self.CONTENTS = "New contents go here" * 1000
947         self.uploadable = MutableData(self.CONTENTS)
948         self._storage = FakeStorage()
949         self._nodemaker = make_nodemaker(self._storage)
950         self._storage_broker = self._nodemaker.storage_broker
951         d = self._nodemaker.create_mutable_file(self.uploadable)
952         def _created(node):
953             self._fn = node
954             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
955         d.addCallback(_created)
956         return d
957
958     def publish_mdmf(self):
959         # like publish_one, except that the result is guaranteed to be
960         # an MDMF file.
961         # self.CONTENTS should have more than one segment.
962         self.CONTENTS = "This is an MDMF file" * 100000
963         self.uploadable = MutableData(self.CONTENTS)
964         self._storage = FakeStorage()
965         self._nodemaker = make_nodemaker(self._storage)
966         self._storage_broker = self._nodemaker.storage_broker
967         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
968         def _created(node):
969             self._fn = node
970             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
971         d.addCallback(_created)
972         return d
973
974
975     def publish_sdmf(self):
976         # like publish_one, except that the result is guaranteed to be
977         # an SDMF file
978         self.CONTENTS = "This is an SDMF file" * 1000
979         self.uploadable = MutableData(self.CONTENTS)
980         self._storage = FakeStorage()
981         self._nodemaker = make_nodemaker(self._storage)
982         self._storage_broker = self._nodemaker.storage_broker
983         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
984         def _created(node):
985             self._fn = node
986             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
987         d.addCallback(_created)
988         return d
989
990
991     def publish_multiple(self, version=0):
992         self.CONTENTS = ["Contents 0",
993                          "Contents 1",
994                          "Contents 2",
995                          "Contents 3a",
996                          "Contents 3b"]
997         self.uploadables = [MutableData(d) for d in self.CONTENTS]
998         self._copied_shares = {}
999         self._storage = FakeStorage()
1000         self._nodemaker = make_nodemaker(self._storage)
1001         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1002         def _created(node):
1003             self._fn = node
1004             # now create multiple versions of the same file, and accumulate
1005             # their shares, so we can mix and match them later.
1006             d = defer.succeed(None)
1007             d.addCallback(self._copy_shares, 0)
1008             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1009             d.addCallback(self._copy_shares, 1)
1010             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1011             d.addCallback(self._copy_shares, 2)
1012             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1013             d.addCallback(self._copy_shares, 3)
1014             # now we replace all the shares with version s3, and upload a new
1015             # version to get s4b.
1016             rollback = dict([(i,2) for i in range(10)])
1017             d.addCallback(lambda res: self._set_versions(rollback))
1018             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1019             d.addCallback(self._copy_shares, 4)
1020             # we leave the storage in state 4
1021             return d
1022         d.addCallback(_created)
1023         return d
1024
1025
1026     def _copy_shares(self, ignored, index):
1027         shares = self._storage._peers
1028         # we need a deep copy
1029         new_shares = {}
1030         for peerid in shares:
1031             new_shares[peerid] = {}
1032             for shnum in shares[peerid]:
1033                 new_shares[peerid][shnum] = shares[peerid][shnum]
1034         self._copied_shares[index] = new_shares
1035
1036     def _set_versions(self, versionmap):
1037         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1038         # share to be at. Any shnum which is left out of the map will stay at
1039         # its current version.
1040         shares = self._storage._peers
1041         oldshares = self._copied_shares
1042         for peerid in shares:
1043             for shnum in shares[peerid]:
1044                 if shnum in versionmap:
1045                     index = versionmap[shnum]
1046                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1047
1048 class PausingConsumer:
1049     implements(IConsumer)
1050     def __init__(self):
1051         self.data = ""
1052         self.already_paused = False
1053
1054     def registerProducer(self, producer, streaming):
1055         self.producer = producer
1056         self.producer.resumeProducing()
1057
1058     def unregisterProducer(self):
1059         self.producer = None
1060
1061     def _unpause(self, ignored):
1062         self.producer.resumeProducing()
1063
1064     def write(self, data):
1065         self.data += data
1066         if not self.already_paused:
1067            self.producer.pauseProducing()
1068            self.already_paused = True
1069            reactor.callLater(15, self._unpause, None)
1070
1071
1072 class Servermap(unittest.TestCase, PublishMixin):
1073     def setUp(self):
1074         return self.publish_one()
1075
1076     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1077                        update_range=None):
1078         if fn is None:
1079             fn = self._fn
1080         if sb is None:
1081             sb = self._storage_broker
1082         smu = ServermapUpdater(fn, sb, Monitor(),
1083                                ServerMap(), mode, update_range=update_range)
1084         d = smu.update()
1085         return d
1086
1087     def update_servermap(self, oldmap, mode=MODE_CHECK):
1088         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1089                                oldmap, mode)
1090         d = smu.update()
1091         return d
1092
1093     def failUnlessOneRecoverable(self, sm, num_shares):
1094         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1095         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1096         best = sm.best_recoverable_version()
1097         self.failIfEqual(best, None)
1098         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1099         self.failUnlessEqual(len(sm.shares_available()), 1)
1100         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1101         shnum, peerids = sm.make_sharemap().items()[0]
1102         peerid = list(peerids)[0]
1103         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1104         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1105         return sm
1106
1107     def test_basic(self):
1108         d = defer.succeed(None)
1109         ms = self.make_servermap
1110         us = self.update_servermap
1111
1112         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1113         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1114         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1115         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1116         d.addCallback(lambda res: ms(mode=MODE_READ))
1117         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1118         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1119         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1120         # this mode stops at 'k' shares
1121         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1122
1123         # and can we re-use the same servermap? Note that these are sorted in
1124         # increasing order of number of servers queried, since once a server
1125         # gets into the servermap, we'll always ask it for an update.
1126         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1127         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1128         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1129         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1130         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1131         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1132         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1133         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1134         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1135
1136         return d
1137
1138     def test_fetch_privkey(self):
1139         d = defer.succeed(None)
1140         # use the sibling filenode (which hasn't been used yet), and make
1141         # sure it can fetch the privkey. The file is small, so the privkey
1142         # will be fetched on the first (query) pass.
1143         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1144         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1145
1146         # create a new file, which is large enough to knock the privkey out
1147         # of the early part of the file
1148         LARGE = "These are Larger contents" * 200 # about 5KB
1149         LARGE_uploadable = MutableData(LARGE)
1150         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1151         def _created(large_fn):
1152             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1153             return self.make_servermap(MODE_WRITE, large_fn2)
1154         d.addCallback(_created)
1155         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1156         return d
1157
1158
1159     def test_mark_bad(self):
1160         d = defer.succeed(None)
1161         ms = self.make_servermap
1162
1163         d.addCallback(lambda res: ms(mode=MODE_READ))
1164         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1165         def _made_map(sm):
1166             v = sm.best_recoverable_version()
1167             vm = sm.make_versionmap()
1168             shares = list(vm[v])
1169             self.failUnlessEqual(len(shares), 6)
1170             self._corrupted = set()
1171             # mark the first 5 shares as corrupt, then update the servermap.
1172             # The map should not have the marked shares it in any more, and
1173             # new shares should be found to replace the missing ones.
1174             for (shnum, peerid, timestamp) in shares:
1175                 if shnum < 5:
1176                     self._corrupted.add( (peerid, shnum) )
1177                     sm.mark_bad_share(peerid, shnum, "")
1178             return self.update_servermap(sm, MODE_WRITE)
1179         d.addCallback(_made_map)
1180         def _check_map(sm):
1181             # this should find all 5 shares that weren't marked bad
1182             v = sm.best_recoverable_version()
1183             vm = sm.make_versionmap()
1184             shares = list(vm[v])
1185             for (peerid, shnum) in self._corrupted:
1186                 peer_shares = sm.shares_on_peer(peerid)
1187                 self.failIf(shnum in peer_shares,
1188                             "%d was in %s" % (shnum, peer_shares))
1189             self.failUnlessEqual(len(shares), 5)
1190         d.addCallback(_check_map)
1191         return d
1192
1193     def failUnlessNoneRecoverable(self, sm):
1194         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1195         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1196         best = sm.best_recoverable_version()
1197         self.failUnlessEqual(best, None)
1198         self.failUnlessEqual(len(sm.shares_available()), 0)
1199
1200     def test_no_shares(self):
1201         self._storage._peers = {} # delete all shares
1202         ms = self.make_servermap
1203         d = defer.succeed(None)
1204 #
1205         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1206         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1207
1208         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1209         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1210
1211         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1212         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1213
1214         d.addCallback(lambda res: ms(mode=MODE_READ))
1215         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1216
1217         return d
1218
1219     def failUnlessNotQuiteEnough(self, sm):
1220         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1221         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1222         best = sm.best_recoverable_version()
1223         self.failUnlessEqual(best, None)
1224         self.failUnlessEqual(len(sm.shares_available()), 1)
1225         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1226         return sm
1227
1228     def test_not_quite_enough_shares(self):
1229         s = self._storage
1230         ms = self.make_servermap
1231         num_shares = len(s._peers)
1232         for peerid in s._peers:
1233             s._peers[peerid] = {}
1234             num_shares -= 1
1235             if num_shares == 2:
1236                 break
1237         # now there ought to be only two shares left
1238         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1239
1240         d = defer.succeed(None)
1241
1242         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1243         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1244         d.addCallback(lambda sm:
1245                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1246         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1247         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1248         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1249         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1250         d.addCallback(lambda res: ms(mode=MODE_READ))
1251         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1252
1253         return d
1254
1255
1256     def test_servermapupdater_finds_mdmf_files(self):
1257         # setUp already published an MDMF file for us. We just need to
1258         # make sure that when we run the ServermapUpdater, the file is
1259         # reported to have one recoverable version.
1260         d = defer.succeed(None)
1261         d.addCallback(lambda ignored:
1262             self.publish_mdmf())
1263         d.addCallback(lambda ignored:
1264             self.make_servermap(mode=MODE_CHECK))
1265         # Calling make_servermap also updates the servermap in the mode
1266         # that we specify, so we just need to see what it says.
1267         def _check_servermap(sm):
1268             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1269         d.addCallback(_check_servermap)
1270         return d
1271
1272
1273     def test_fetch_update(self):
1274         d = defer.succeed(None)
1275         d.addCallback(lambda ignored:
1276             self.publish_mdmf())
1277         d.addCallback(lambda ignored:
1278             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1279         def _check_servermap(sm):
1280             # 10 shares
1281             self.failUnlessEqual(len(sm.update_data), 10)
1282             # one version
1283             for data in sm.update_data.itervalues():
1284                 self.failUnlessEqual(len(data), 1)
1285         d.addCallback(_check_servermap)
1286         return d
1287
1288
1289     def test_servermapupdater_finds_sdmf_files(self):
1290         d = defer.succeed(None)
1291         d.addCallback(lambda ignored:
1292             self.publish_sdmf())
1293         d.addCallback(lambda ignored:
1294             self.make_servermap(mode=MODE_CHECK))
1295         d.addCallback(lambda servermap:
1296             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1297         return d
1298
1299
1300 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1301     def setUp(self):
1302         return self.publish_one()
1303
1304     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1305         if oldmap is None:
1306             oldmap = ServerMap()
1307         if sb is None:
1308             sb = self._storage_broker
1309         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1310         d = smu.update()
1311         return d
1312
1313     def abbrev_verinfo(self, verinfo):
1314         if verinfo is None:
1315             return None
1316         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1317          offsets_tuple) = verinfo
1318         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1319
1320     def abbrev_verinfo_dict(self, verinfo_d):
1321         output = {}
1322         for verinfo,value in verinfo_d.items():
1323             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1324              offsets_tuple) = verinfo
1325             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1326         return output
1327
1328     def dump_servermap(self, servermap):
1329         print "SERVERMAP", servermap
1330         print "RECOVERABLE", [self.abbrev_verinfo(v)
1331                               for v in servermap.recoverable_versions()]
1332         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1333         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1334
1335     def do_download(self, servermap, version=None):
1336         if version is None:
1337             version = servermap.best_recoverable_version()
1338         r = Retrieve(self._fn, servermap, version)
1339         c = consumer.MemoryConsumer()
1340         d = r.download(consumer=c)
1341         d.addCallback(lambda mc: "".join(mc.chunks))
1342         return d
1343
1344
1345     def test_basic(self):
1346         d = self.make_servermap()
1347         def _do_retrieve(servermap):
1348             self._smap = servermap
1349             #self.dump_servermap(servermap)
1350             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1351             return self.do_download(servermap)
1352         d.addCallback(_do_retrieve)
1353         def _retrieved(new_contents):
1354             self.failUnlessEqual(new_contents, self.CONTENTS)
1355         d.addCallback(_retrieved)
1356         # we should be able to re-use the same servermap, both with and
1357         # without updating it.
1358         d.addCallback(lambda res: self.do_download(self._smap))
1359         d.addCallback(_retrieved)
1360         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1361         d.addCallback(lambda res: self.do_download(self._smap))
1362         d.addCallback(_retrieved)
1363         # clobbering the pubkey should make the servermap updater re-fetch it
1364         def _clobber_pubkey(res):
1365             self._fn._pubkey = None
1366         d.addCallback(_clobber_pubkey)
1367         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1368         d.addCallback(lambda res: self.do_download(self._smap))
1369         d.addCallback(_retrieved)
1370         return d
1371
1372     def test_all_shares_vanished(self):
1373         d = self.make_servermap()
1374         def _remove_shares(servermap):
1375             for shares in self._storage._peers.values():
1376                 shares.clear()
1377             d1 = self.shouldFail(NotEnoughSharesError,
1378                                  "test_all_shares_vanished",
1379                                  "ran out of peers",
1380                                  self.do_download, servermap)
1381             return d1
1382         d.addCallback(_remove_shares)
1383         return d
1384
1385     def test_no_servers(self):
1386         sb2 = make_storagebroker(num_peers=0)
1387         # if there are no servers, then a MODE_READ servermap should come
1388         # back empty
1389         d = self.make_servermap(sb=sb2)
1390         def _check_servermap(servermap):
1391             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1392             self.failIf(servermap.recoverable_versions())
1393             self.failIf(servermap.unrecoverable_versions())
1394             self.failIf(servermap.all_peers())
1395         d.addCallback(_check_servermap)
1396         return d
1397
1398     def test_no_servers_download(self):
1399         sb2 = make_storagebroker(num_peers=0)
1400         self._fn._storage_broker = sb2
1401         d = self.shouldFail(UnrecoverableFileError,
1402                             "test_no_servers_download",
1403                             "no recoverable versions",
1404                             self._fn.download_best_version)
1405         def _restore(res):
1406             # a failed download that occurs while we aren't connected to
1407             # anybody should not prevent a subsequent download from working.
1408             # This isn't quite the webapi-driven test that #463 wants, but it
1409             # should be close enough.
1410             self._fn._storage_broker = self._storage_broker
1411             return self._fn.download_best_version()
1412         def _retrieved(new_contents):
1413             self.failUnlessEqual(new_contents, self.CONTENTS)
1414         d.addCallback(_restore)
1415         d.addCallback(_retrieved)
1416         return d
1417
1418
1419     def _test_corrupt_all(self, offset, substring,
1420                           should_succeed=False,
1421                           corrupt_early=True,
1422                           failure_checker=None,
1423                           fetch_privkey=False):
1424         d = defer.succeed(None)
1425         if corrupt_early:
1426             d.addCallback(corrupt, self._storage, offset)
1427         d.addCallback(lambda res: self.make_servermap())
1428         if not corrupt_early:
1429             d.addCallback(corrupt, self._storage, offset)
1430         def _do_retrieve(servermap):
1431             ver = servermap.best_recoverable_version()
1432             if ver is None and not should_succeed:
1433                 # no recoverable versions == not succeeding. The problem
1434                 # should be noted in the servermap's list of problems.
1435                 if substring:
1436                     allproblems = [str(f) for f in servermap.problems]
1437                     self.failUnlessIn(substring, "".join(allproblems))
1438                 return servermap
1439             if should_succeed:
1440                 d1 = self._fn.download_version(servermap, ver,
1441                                                fetch_privkey)
1442                 d1.addCallback(lambda new_contents:
1443                                self.failUnlessEqual(new_contents, self.CONTENTS))
1444             else:
1445                 d1 = self.shouldFail(NotEnoughSharesError,
1446                                      "_corrupt_all(offset=%s)" % (offset,),
1447                                      substring,
1448                                      self._fn.download_version, servermap,
1449                                                                 ver,
1450                                                                 fetch_privkey)
1451             if failure_checker:
1452                 d1.addCallback(failure_checker)
1453             d1.addCallback(lambda res: servermap)
1454             return d1
1455         d.addCallback(_do_retrieve)
1456         return d
1457
1458     def test_corrupt_all_verbyte(self):
1459         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1460         # error in unpack_share().
1461         d = self._test_corrupt_all(0, "UnknownVersionError")
1462         def _check_servermap(servermap):
1463             # and the dump should mention the problems
1464             s = StringIO()
1465             dump = servermap.dump(s).getvalue()
1466             self.failUnless("30 PROBLEMS" in dump, dump)
1467         d.addCallback(_check_servermap)
1468         return d
1469
1470     def test_corrupt_all_seqnum(self):
1471         # a corrupt sequence number will trigger a bad signature
1472         return self._test_corrupt_all(1, "signature is invalid")
1473
1474     def test_corrupt_all_R(self):
1475         # a corrupt root hash will trigger a bad signature
1476         return self._test_corrupt_all(9, "signature is invalid")
1477
1478     def test_corrupt_all_IV(self):
1479         # a corrupt salt/IV will trigger a bad signature
1480         return self._test_corrupt_all(41, "signature is invalid")
1481
1482     def test_corrupt_all_k(self):
1483         # a corrupt 'k' will trigger a bad signature
1484         return self._test_corrupt_all(57, "signature is invalid")
1485
1486     def test_corrupt_all_N(self):
1487         # a corrupt 'N' will trigger a bad signature
1488         return self._test_corrupt_all(58, "signature is invalid")
1489
1490     def test_corrupt_all_segsize(self):
1491         # a corrupt segsize will trigger a bad signature
1492         return self._test_corrupt_all(59, "signature is invalid")
1493
1494     def test_corrupt_all_datalen(self):
1495         # a corrupt data length will trigger a bad signature
1496         return self._test_corrupt_all(67, "signature is invalid")
1497
1498     def test_corrupt_all_pubkey(self):
1499         # a corrupt pubkey won't match the URI's fingerprint. We need to
1500         # remove the pubkey from the filenode, or else it won't bother trying
1501         # to update it.
1502         self._fn._pubkey = None
1503         return self._test_corrupt_all("pubkey",
1504                                       "pubkey doesn't match fingerprint")
1505
1506     def test_corrupt_all_sig(self):
1507         # a corrupt signature is a bad one
1508         # the signature runs from about [543:799], depending upon the length
1509         # of the pubkey
1510         return self._test_corrupt_all("signature", "signature is invalid")
1511
1512     def test_corrupt_all_share_hash_chain_number(self):
1513         # a corrupt share hash chain entry will show up as a bad hash. If we
1514         # mangle the first byte, that will look like a bad hash number,
1515         # causing an IndexError
1516         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1517
1518     def test_corrupt_all_share_hash_chain_hash(self):
1519         # a corrupt share hash chain entry will show up as a bad hash. If we
1520         # mangle a few bytes in, that will look like a bad hash.
1521         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1522
1523     def test_corrupt_all_block_hash_tree(self):
1524         return self._test_corrupt_all("block_hash_tree",
1525                                       "block hash tree failure")
1526
1527     def test_corrupt_all_block(self):
1528         return self._test_corrupt_all("share_data", "block hash tree failure")
1529
1530     def test_corrupt_all_encprivkey(self):
1531         # a corrupted privkey won't even be noticed by the reader, only by a
1532         # writer.
1533         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1534
1535
1536     def test_corrupt_all_encprivkey_late(self):
1537         # this should work for the same reason as above, but we corrupt 
1538         # after the servermap update to exercise the error handling
1539         # code.
1540         # We need to remove the privkey from the node, or the retrieve
1541         # process won't know to update it.
1542         self._fn._privkey = None
1543         return self._test_corrupt_all("enc_privkey",
1544                                       None, # this shouldn't fail
1545                                       should_succeed=True,
1546                                       corrupt_early=False,
1547                                       fetch_privkey=True)
1548
1549
1550     def test_corrupt_all_seqnum_late(self):
1551         # corrupting the seqnum between mapupdate and retrieve should result
1552         # in NotEnoughSharesError, since each share will look invalid
1553         def _check(res):
1554             f = res[0]
1555             self.failUnless(f.check(NotEnoughSharesError))
1556             self.failUnless("uncoordinated write" in str(f))
1557         return self._test_corrupt_all(1, "ran out of peers",
1558                                       corrupt_early=False,
1559                                       failure_checker=_check)
1560
1561     def test_corrupt_all_block_hash_tree_late(self):
1562         def _check(res):
1563             f = res[0]
1564             self.failUnless(f.check(NotEnoughSharesError))
1565         return self._test_corrupt_all("block_hash_tree",
1566                                       "block hash tree failure",
1567                                       corrupt_early=False,
1568                                       failure_checker=_check)
1569
1570
1571     def test_corrupt_all_block_late(self):
1572         def _check(res):
1573             f = res[0]
1574             self.failUnless(f.check(NotEnoughSharesError))
1575         return self._test_corrupt_all("share_data", "block hash tree failure",
1576                                       corrupt_early=False,
1577                                       failure_checker=_check)
1578
1579
1580     def test_basic_pubkey_at_end(self):
1581         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1582         # download to succeed but forcing a bunch of retries first. Note that
1583         # this is rather pessimistic: our Retrieve process will throw away
1584         # the whole share if the pubkey is bad, even though the rest of the
1585         # share might be good.
1586
1587         self._fn._pubkey = None
1588         k = self._fn.get_required_shares()
1589         N = self._fn.get_total_shares()
1590         d = defer.succeed(None)
1591         d.addCallback(corrupt, self._storage, "pubkey",
1592                       shnums_to_corrupt=range(0, N-k))
1593         d.addCallback(lambda res: self.make_servermap())
1594         def _do_retrieve(servermap):
1595             self.failUnless(servermap.problems)
1596             self.failUnless("pubkey doesn't match fingerprint"
1597                             in str(servermap.problems[0]))
1598             ver = servermap.best_recoverable_version()
1599             r = Retrieve(self._fn, servermap, ver)
1600             c = consumer.MemoryConsumer()
1601             return r.download(c)
1602         d.addCallback(_do_retrieve)
1603         d.addCallback(lambda mc: "".join(mc.chunks))
1604         d.addCallback(lambda new_contents:
1605                       self.failUnlessEqual(new_contents, self.CONTENTS))
1606         return d
1607
1608
1609     def _test_corrupt_some(self, offset, mdmf=False):
1610         if mdmf:
1611             d = self.publish_mdmf()
1612         else:
1613             d = defer.succeed(None)
1614         d.addCallback(lambda ignored:
1615             corrupt(None, self._storage, offset, range(5)))
1616         d.addCallback(lambda ignored:
1617             self.make_servermap())
1618         def _do_retrieve(servermap):
1619             ver = servermap.best_recoverable_version()
1620             self.failUnless(ver)
1621             return self._fn.download_best_version()
1622         d.addCallback(_do_retrieve)
1623         d.addCallback(lambda new_contents:
1624             self.failUnlessEqual(new_contents, self.CONTENTS))
1625         return d
1626
1627
1628     def test_corrupt_some(self):
1629         # corrupt the data of first five shares (so the servermap thinks
1630         # they're good but retrieve marks them as bad), so that the
1631         # MODE_READ set of 6 will be insufficient, forcing node.download to
1632         # retry with more servers.
1633         return self._test_corrupt_some("share_data")
1634
1635
1636     def test_download_fails(self):
1637         d = corrupt(None, self._storage, "signature")
1638         d.addCallback(lambda ignored:
1639             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1640                             "no recoverable versions",
1641                             self._fn.download_best_version))
1642         return d
1643
1644
1645
1646     def test_corrupt_mdmf_block_hash_tree(self):
1647         d = self.publish_mdmf()
1648         d.addCallback(lambda ignored:
1649             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1650                                    "block hash tree failure",
1651                                    corrupt_early=False,
1652                                    should_succeed=False))
1653         return d
1654
1655
1656     def test_corrupt_mdmf_block_hash_tree_late(self):
1657         d = self.publish_mdmf()
1658         d.addCallback(lambda ignored:
1659             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1660                                    "block hash tree failure",
1661                                    corrupt_early=True,
1662                                    should_succeed=False))
1663         return d
1664
1665
1666     def test_corrupt_mdmf_share_data(self):
1667         d = self.publish_mdmf()
1668         d.addCallback(lambda ignored:
1669             # TODO: Find out what the block size is and corrupt a
1670             # specific block, rather than just guessing.
1671             self._test_corrupt_all(("share_data", 12 * 40),
1672                                     "block hash tree failure",
1673                                     corrupt_early=True,
1674                                     should_succeed=False))
1675         return d
1676
1677
1678     def test_corrupt_some_mdmf(self):
1679         return self._test_corrupt_some(("share_data", 12 * 40),
1680                                        mdmf=True)
1681
1682
1683 class CheckerMixin:
1684     def check_good(self, r, where):
1685         self.failUnless(r.is_healthy(), where)
1686         return r
1687
1688     def check_bad(self, r, where):
1689         self.failIf(r.is_healthy(), where)
1690         return r
1691
1692     def check_expected_failure(self, r, expected_exception, substring, where):
1693         for (peerid, storage_index, shnum, f) in r.problems:
1694             if f.check(expected_exception):
1695                 self.failUnless(substring in str(f),
1696                                 "%s: substring '%s' not in '%s'" %
1697                                 (where, substring, str(f)))
1698                 return
1699         self.fail("%s: didn't see expected exception %s in problems %s" %
1700                   (where, expected_exception, r.problems))
1701
1702
1703 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1704     def setUp(self):
1705         return self.publish_one()
1706
1707
1708     def test_check_good(self):
1709         d = self._fn.check(Monitor())
1710         d.addCallback(self.check_good, "test_check_good")
1711         return d
1712
1713     def test_check_mdmf_good(self):
1714         d = self.publish_mdmf()
1715         d.addCallback(lambda ignored:
1716             self._fn.check(Monitor()))
1717         d.addCallback(self.check_good, "test_check_mdmf_good")
1718         return d
1719
1720     def test_check_no_shares(self):
1721         for shares in self._storage._peers.values():
1722             shares.clear()
1723         d = self._fn.check(Monitor())
1724         d.addCallback(self.check_bad, "test_check_no_shares")
1725         return d
1726
1727     def test_check_mdmf_no_shares(self):
1728         d = self.publish_mdmf()
1729         def _then(ignored):
1730             for share in self._storage._peers.values():
1731                 share.clear()
1732         d.addCallback(_then)
1733         d.addCallback(lambda ignored:
1734             self._fn.check(Monitor()))
1735         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1736         return d
1737
1738     def test_check_not_enough_shares(self):
1739         for shares in self._storage._peers.values():
1740             for shnum in shares.keys():
1741                 if shnum > 0:
1742                     del shares[shnum]
1743         d = self._fn.check(Monitor())
1744         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1745         return d
1746
1747     def test_check_mdmf_not_enough_shares(self):
1748         d = self.publish_mdmf()
1749         def _then(ignored):
1750             for shares in self._storage._peers.values():
1751                 for shnum in shares.keys():
1752                     if shnum > 0:
1753                         del shares[shnum]
1754         d.addCallback(_then)
1755         d.addCallback(lambda ignored:
1756             self._fn.check(Monitor()))
1757         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1758         return d
1759
1760
1761     def test_check_all_bad_sig(self):
1762         d = corrupt(None, self._storage, 1) # bad sig
1763         d.addCallback(lambda ignored:
1764             self._fn.check(Monitor()))
1765         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1766         return d
1767
1768     def test_check_mdmf_all_bad_sig(self):
1769         d = self.publish_mdmf()
1770         d.addCallback(lambda ignored:
1771             corrupt(None, self._storage, 1))
1772         d.addCallback(lambda ignored:
1773             self._fn.check(Monitor()))
1774         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1775         return d
1776
1777     def test_check_all_bad_blocks(self):
1778         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1779         # the Checker won't notice this.. it doesn't look at actual data
1780         d.addCallback(lambda ignored:
1781             self._fn.check(Monitor()))
1782         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1783         return d
1784
1785
1786     def test_check_mdmf_all_bad_blocks(self):
1787         d = self.publish_mdmf()
1788         d.addCallback(lambda ignored:
1789             corrupt(None, self._storage, "share_data"))
1790         d.addCallback(lambda ignored:
1791             self._fn.check(Monitor()))
1792         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1793         return d
1794
1795     def test_verify_good(self):
1796         d = self._fn.check(Monitor(), verify=True)
1797         d.addCallback(self.check_good, "test_verify_good")
1798         return d
1799
1800     def test_verify_all_bad_sig(self):
1801         d = corrupt(None, self._storage, 1) # bad sig
1802         d.addCallback(lambda ignored:
1803             self._fn.check(Monitor(), verify=True))
1804         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1805         return d
1806
1807     def test_verify_one_bad_sig(self):
1808         d = corrupt(None, self._storage, 1, [9]) # bad sig
1809         d.addCallback(lambda ignored:
1810             self._fn.check(Monitor(), verify=True))
1811         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1812         return d
1813
1814     def test_verify_one_bad_block(self):
1815         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1816         # the Verifier *will* notice this, since it examines every byte
1817         d.addCallback(lambda ignored:
1818             self._fn.check(Monitor(), verify=True))
1819         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1820         d.addCallback(self.check_expected_failure,
1821                       CorruptShareError, "block hash tree failure",
1822                       "test_verify_one_bad_block")
1823         return d
1824
1825     def test_verify_one_bad_sharehash(self):
1826         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1827         d.addCallback(lambda ignored:
1828             self._fn.check(Monitor(), verify=True))
1829         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1830         d.addCallback(self.check_expected_failure,
1831                       CorruptShareError, "corrupt hashes",
1832                       "test_verify_one_bad_sharehash")
1833         return d
1834
1835     def test_verify_one_bad_encprivkey(self):
1836         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1837         d.addCallback(lambda ignored:
1838             self._fn.check(Monitor(), verify=True))
1839         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1840         d.addCallback(self.check_expected_failure,
1841                       CorruptShareError, "invalid privkey",
1842                       "test_verify_one_bad_encprivkey")
1843         return d
1844
1845     def test_verify_one_bad_encprivkey_uncheckable(self):
1846         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1847         readonly_fn = self._fn.get_readonly()
1848         # a read-only node has no way to validate the privkey
1849         d.addCallback(lambda ignored:
1850             readonly_fn.check(Monitor(), verify=True))
1851         d.addCallback(self.check_good,
1852                       "test_verify_one_bad_encprivkey_uncheckable")
1853         return d
1854
1855
1856     def test_verify_mdmf_good(self):
1857         d = self.publish_mdmf()
1858         d.addCallback(lambda ignored:
1859             self._fn.check(Monitor(), verify=True))
1860         d.addCallback(self.check_good, "test_verify_mdmf_good")
1861         return d
1862
1863
1864     def test_verify_mdmf_one_bad_block(self):
1865         d = self.publish_mdmf()
1866         d.addCallback(lambda ignored:
1867             corrupt(None, self._storage, "share_data", [1]))
1868         d.addCallback(lambda ignored:
1869             self._fn.check(Monitor(), verify=True))
1870         # We should find one bad block here
1871         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1872         d.addCallback(self.check_expected_failure,
1873                       CorruptShareError, "block hash tree failure",
1874                       "test_verify_mdmf_one_bad_block")
1875         return d
1876
1877
1878     def test_verify_mdmf_bad_encprivkey(self):
1879         d = self.publish_mdmf()
1880         d.addCallback(lambda ignored:
1881             corrupt(None, self._storage, "enc_privkey", [0]))
1882         d.addCallback(lambda ignored:
1883             self._fn.check(Monitor(), verify=True))
1884         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1885         d.addCallback(self.check_expected_failure,
1886                       CorruptShareError, "privkey",
1887                       "test_verify_mdmf_bad_encprivkey")
1888         return d
1889
1890
1891     def test_verify_mdmf_bad_sig(self):
1892         d = self.publish_mdmf()
1893         d.addCallback(lambda ignored:
1894             corrupt(None, self._storage, 1, [1]))
1895         d.addCallback(lambda ignored:
1896             self._fn.check(Monitor(), verify=True))
1897         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1898         return d
1899
1900
1901     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1902         d = self.publish_mdmf()
1903         d.addCallback(lambda ignored:
1904             corrupt(None, self._storage, "enc_privkey", [1]))
1905         d.addCallback(lambda ignored:
1906             self._fn.get_readonly())
1907         d.addCallback(lambda fn:
1908             fn.check(Monitor(), verify=True))
1909         d.addCallback(self.check_good,
1910                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1911         return d
1912
1913
1914 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1915
1916     def get_shares(self, s):
1917         all_shares = {} # maps (peerid, shnum) to share data
1918         for peerid in s._peers:
1919             shares = s._peers[peerid]
1920             for shnum in shares:
1921                 data = shares[shnum]
1922                 all_shares[ (peerid, shnum) ] = data
1923         return all_shares
1924
1925     def copy_shares(self, ignored=None):
1926         self.old_shares.append(self.get_shares(self._storage))
1927
1928     def test_repair_nop(self):
1929         self.old_shares = []
1930         d = self.publish_one()
1931         d.addCallback(self.copy_shares)
1932         d.addCallback(lambda res: self._fn.check(Monitor()))
1933         d.addCallback(lambda check_results: self._fn.repair(check_results))
1934         def _check_results(rres):
1935             self.failUnless(IRepairResults.providedBy(rres))
1936             self.failUnless(rres.get_successful())
1937             # TODO: examine results
1938
1939             self.copy_shares()
1940
1941             initial_shares = self.old_shares[0]
1942             new_shares = self.old_shares[1]
1943             # TODO: this really shouldn't change anything. When we implement
1944             # a "minimal-bandwidth" repairer", change this test to assert:
1945             #self.failUnlessEqual(new_shares, initial_shares)
1946
1947             # all shares should be in the same place as before
1948             self.failUnlessEqual(set(initial_shares.keys()),
1949                                  set(new_shares.keys()))
1950             # but they should all be at a newer seqnum. The IV will be
1951             # different, so the roothash will be too.
1952             for key in initial_shares:
1953                 (version0,
1954                  seqnum0,
1955                  root_hash0,
1956                  IV0,
1957                  k0, N0, segsize0, datalen0,
1958                  o0) = unpack_header(initial_shares[key])
1959                 (version1,
1960                  seqnum1,
1961                  root_hash1,
1962                  IV1,
1963                  k1, N1, segsize1, datalen1,
1964                  o1) = unpack_header(new_shares[key])
1965                 self.failUnlessEqual(version0, version1)
1966                 self.failUnlessEqual(seqnum0+1, seqnum1)
1967                 self.failUnlessEqual(k0, k1)
1968                 self.failUnlessEqual(N0, N1)
1969                 self.failUnlessEqual(segsize0, segsize1)
1970                 self.failUnlessEqual(datalen0, datalen1)
1971         d.addCallback(_check_results)
1972         return d
1973
1974     def failIfSharesChanged(self, ignored=None):
1975         old_shares = self.old_shares[-2]
1976         current_shares = self.old_shares[-1]
1977         self.failUnlessEqual(old_shares, current_shares)
1978
1979
1980     def test_unrepairable_0shares(self):
1981         d = self.publish_one()
1982         def _delete_all_shares(ign):
1983             shares = self._storage._peers
1984             for peerid in shares:
1985                 shares[peerid] = {}
1986         d.addCallback(_delete_all_shares)
1987         d.addCallback(lambda ign: self._fn.check(Monitor()))
1988         d.addCallback(lambda check_results: self._fn.repair(check_results))
1989         def _check(crr):
1990             self.failUnlessEqual(crr.get_successful(), False)
1991         d.addCallback(_check)
1992         return d
1993
1994     def test_mdmf_unrepairable_0shares(self):
1995         d = self.publish_mdmf()
1996         def _delete_all_shares(ign):
1997             shares = self._storage._peers
1998             for peerid in shares:
1999                 shares[peerid] = {}
2000         d.addCallback(_delete_all_shares)
2001         d.addCallback(lambda ign: self._fn.check(Monitor()))
2002         d.addCallback(lambda check_results: self._fn.repair(check_results))
2003         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2004         return d
2005
2006
2007     def test_unrepairable_1share(self):
2008         d = self.publish_one()
2009         def _delete_all_shares(ign):
2010             shares = self._storage._peers
2011             for peerid in shares:
2012                 for shnum in list(shares[peerid]):
2013                     if shnum > 0:
2014                         del shares[peerid][shnum]
2015         d.addCallback(_delete_all_shares)
2016         d.addCallback(lambda ign: self._fn.check(Monitor()))
2017         d.addCallback(lambda check_results: self._fn.repair(check_results))
2018         def _check(crr):
2019             self.failUnlessEqual(crr.get_successful(), False)
2020         d.addCallback(_check)
2021         return d
2022
2023     def test_mdmf_unrepairable_1share(self):
2024         d = self.publish_mdmf()
2025         def _delete_all_shares(ign):
2026             shares = self._storage._peers
2027             for peerid in shares:
2028                 for shnum in list(shares[peerid]):
2029                     if shnum > 0:
2030                         del shares[peerid][shnum]
2031         d.addCallback(_delete_all_shares)
2032         d.addCallback(lambda ign: self._fn.check(Monitor()))
2033         d.addCallback(lambda check_results: self._fn.repair(check_results))
2034         def _check(crr):
2035             self.failUnlessEqual(crr.get_successful(), False)
2036         d.addCallback(_check)
2037         return d
2038
2039     def test_repairable_5shares(self):
2040         d = self.publish_mdmf()
2041         def _delete_all_shares(ign):
2042             shares = self._storage._peers
2043             for peerid in shares:
2044                 for shnum in list(shares[peerid]):
2045                     if shnum > 4:
2046                         del shares[peerid][shnum]
2047         d.addCallback(_delete_all_shares)
2048         d.addCallback(lambda ign: self._fn.check(Monitor()))
2049         d.addCallback(lambda check_results: self._fn.repair(check_results))
2050         def _check(crr):
2051             self.failUnlessEqual(crr.get_successful(), True)
2052         d.addCallback(_check)
2053         return d
2054
2055     def test_mdmf_repairable_5shares(self):
2056         d = self.publish_mdmf()
2057         def _delete_some_shares(ign):
2058             shares = self._storage._peers
2059             for peerid in shares:
2060                 for shnum in list(shares[peerid]):
2061                     if shnum > 5:
2062                         del shares[peerid][shnum]
2063         d.addCallback(_delete_some_shares)
2064         d.addCallback(lambda ign: self._fn.check(Monitor()))
2065         def _check(cr):
2066             self.failIf(cr.is_healthy())
2067             self.failUnless(cr.is_recoverable())
2068             return cr
2069         d.addCallback(_check)
2070         d.addCallback(lambda check_results: self._fn.repair(check_results))
2071         def _check1(crr):
2072             self.failUnlessEqual(crr.get_successful(), True)
2073         d.addCallback(_check1)
2074         return d
2075
2076
2077     def test_merge(self):
2078         self.old_shares = []
2079         d = self.publish_multiple()
2080         # repair will refuse to merge multiple highest seqnums unless you
2081         # pass force=True
2082         d.addCallback(lambda res:
2083                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2084                                           1:4,3:4,5:4,7:4,9:4}))
2085         d.addCallback(self.copy_shares)
2086         d.addCallback(lambda res: self._fn.check(Monitor()))
2087         def _try_repair(check_results):
2088             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2089             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2090                                  self._fn.repair, check_results)
2091             d2.addCallback(self.copy_shares)
2092             d2.addCallback(self.failIfSharesChanged)
2093             d2.addCallback(lambda res: check_results)
2094             return d2
2095         d.addCallback(_try_repair)
2096         d.addCallback(lambda check_results:
2097                       self._fn.repair(check_results, force=True))
2098         # this should give us 10 shares of the highest roothash
2099         def _check_repair_results(rres):
2100             self.failUnless(rres.get_successful())
2101             pass # TODO
2102         d.addCallback(_check_repair_results)
2103         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2104         def _check_smap(smap):
2105             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2106             self.failIf(smap.unrecoverable_versions())
2107             # now, which should have won?
2108             roothash_s4a = self.get_roothash_for(3)
2109             roothash_s4b = self.get_roothash_for(4)
2110             if roothash_s4b > roothash_s4a:
2111                 expected_contents = self.CONTENTS[4]
2112             else:
2113                 expected_contents = self.CONTENTS[3]
2114             new_versionid = smap.best_recoverable_version()
2115             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2116             d2 = self._fn.download_version(smap, new_versionid)
2117             d2.addCallback(self.failUnlessEqual, expected_contents)
2118             return d2
2119         d.addCallback(_check_smap)
2120         return d
2121
2122     def test_non_merge(self):
2123         self.old_shares = []
2124         d = self.publish_multiple()
2125         # repair should not refuse a repair that doesn't need to merge. In
2126         # this case, we combine v2 with v3. The repair should ignore v2 and
2127         # copy v3 into a new v5.
2128         d.addCallback(lambda res:
2129                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2130                                           1:3,3:3,5:3,7:3,9:3}))
2131         d.addCallback(lambda res: self._fn.check(Monitor()))
2132         d.addCallback(lambda check_results: self._fn.repair(check_results))
2133         # this should give us 10 shares of v3
2134         def _check_repair_results(rres):
2135             self.failUnless(rres.get_successful())
2136             pass # TODO
2137         d.addCallback(_check_repair_results)
2138         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2139         def _check_smap(smap):
2140             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2141             self.failIf(smap.unrecoverable_versions())
2142             # now, which should have won?
2143             expected_contents = self.CONTENTS[3]
2144             new_versionid = smap.best_recoverable_version()
2145             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2146             d2 = self._fn.download_version(smap, new_versionid)
2147             d2.addCallback(self.failUnlessEqual, expected_contents)
2148             return d2
2149         d.addCallback(_check_smap)
2150         return d
2151
2152     def get_roothash_for(self, index):
2153         # return the roothash for the first share we see in the saved set
2154         shares = self._copied_shares[index]
2155         for peerid in shares:
2156             for shnum in shares[peerid]:
2157                 share = shares[peerid][shnum]
2158                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2159                           unpack_header(share)
2160                 return root_hash
2161
2162     def test_check_and_repair_readcap(self):
2163         # we can't currently repair from a mutable readcap: #625
2164         self.old_shares = []
2165         d = self.publish_one()
2166         d.addCallback(self.copy_shares)
2167         def _get_readcap(res):
2168             self._fn3 = self._fn.get_readonly()
2169             # also delete some shares
2170             for peerid,shares in self._storage._peers.items():
2171                 shares.pop(0, None)
2172         d.addCallback(_get_readcap)
2173         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2174         def _check_results(crr):
2175             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2176             # we should detect the unhealthy, but skip over mutable-readcap
2177             # repairs until #625 is fixed
2178             self.failIf(crr.get_pre_repair_results().is_healthy())
2179             self.failIf(crr.get_repair_attempted())
2180             self.failIf(crr.get_post_repair_results().is_healthy())
2181         d.addCallback(_check_results)
2182         return d
2183
2184 class DevNullDictionary(dict):
2185     def __setitem__(self, key, value):
2186         return
2187
2188 class MultipleEncodings(unittest.TestCase):
2189     def setUp(self):
2190         self.CONTENTS = "New contents go here"
2191         self.uploadable = MutableData(self.CONTENTS)
2192         self._storage = FakeStorage()
2193         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2194         self._storage_broker = self._nodemaker.storage_broker
2195         d = self._nodemaker.create_mutable_file(self.uploadable)
2196         def _created(node):
2197             self._fn = node
2198         d.addCallback(_created)
2199         return d
2200
2201     def _encode(self, k, n, data, version=SDMF_VERSION):
2202         # encode 'data' into a peerid->shares dict.
2203
2204         fn = self._fn
2205         # disable the nodecache, since for these tests we explicitly need
2206         # multiple nodes pointing at the same file
2207         self._nodemaker._node_cache = DevNullDictionary()
2208         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2209         # then we copy over other fields that are normally fetched from the
2210         # existing shares
2211         fn2._pubkey = fn._pubkey
2212         fn2._privkey = fn._privkey
2213         fn2._encprivkey = fn._encprivkey
2214         # and set the encoding parameters to something completely different
2215         fn2._required_shares = k
2216         fn2._total_shares = n
2217
2218         s = self._storage
2219         s._peers = {} # clear existing storage
2220         p2 = Publish(fn2, self._storage_broker, None)
2221         uploadable = MutableData(data)
2222         d = p2.publish(uploadable)
2223         def _published(res):
2224             shares = s._peers
2225             s._peers = {}
2226             return shares
2227         d.addCallback(_published)
2228         return d
2229
2230     def make_servermap(self, mode=MODE_READ, oldmap=None):
2231         if oldmap is None:
2232             oldmap = ServerMap()
2233         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2234                                oldmap, mode)
2235         d = smu.update()
2236         return d
2237
2238     def test_multiple_encodings(self):
2239         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2240         # then mix up the shares, to make sure that download survives seeing
2241         # a variety of encodings. This is actually kind of tricky to set up.
2242
2243         contents1 = "Contents for encoding 1 (3-of-10) go here"
2244         contents2 = "Contents for encoding 2 (4-of-9) go here"
2245         contents3 = "Contents for encoding 3 (4-of-7) go here"
2246
2247         # we make a retrieval object that doesn't know what encoding
2248         # parameters to use
2249         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2250
2251         # now we upload a file through fn1, and grab its shares
2252         d = self._encode(3, 10, contents1)
2253         def _encoded_1(shares):
2254             self._shares1 = shares
2255         d.addCallback(_encoded_1)
2256         d.addCallback(lambda res: self._encode(4, 9, contents2))
2257         def _encoded_2(shares):
2258             self._shares2 = shares
2259         d.addCallback(_encoded_2)
2260         d.addCallback(lambda res: self._encode(4, 7, contents3))
2261         def _encoded_3(shares):
2262             self._shares3 = shares
2263         d.addCallback(_encoded_3)
2264
2265         def _merge(res):
2266             log.msg("merging sharelists")
2267             # we merge the shares from the two sets, leaving each shnum in
2268             # its original location, but using a share from set1 or set2
2269             # according to the following sequence:
2270             #
2271             #  4-of-9  a  s2
2272             #  4-of-9  b  s2
2273             #  4-of-7  c   s3
2274             #  4-of-9  d  s2
2275             #  3-of-9  e s1
2276             #  3-of-9  f s1
2277             #  3-of-9  g s1
2278             #  4-of-9  h  s2
2279             #
2280             # so that neither form can be recovered until fetch [f], at which
2281             # point version-s1 (the 3-of-10 form) should be recoverable. If
2282             # the implementation latches on to the first version it sees,
2283             # then s2 will be recoverable at fetch [g].
2284
2285             # Later, when we implement code that handles multiple versions,
2286             # we can use this framework to assert that all recoverable
2287             # versions are retrieved, and test that 'epsilon' does its job
2288
2289             places = [2, 2, 3, 2, 1, 1, 1, 2]
2290
2291             sharemap = {}
2292             sb = self._storage_broker
2293
2294             for peerid in sorted(sb.get_all_serverids()):
2295                 for shnum in self._shares1.get(peerid, {}):
2296                     if shnum < len(places):
2297                         which = places[shnum]
2298                     else:
2299                         which = "x"
2300                     self._storage._peers[peerid] = peers = {}
2301                     in_1 = shnum in self._shares1[peerid]
2302                     in_2 = shnum in self._shares2.get(peerid, {})
2303                     in_3 = shnum in self._shares3.get(peerid, {})
2304                     if which == 1:
2305                         if in_1:
2306                             peers[shnum] = self._shares1[peerid][shnum]
2307                             sharemap[shnum] = peerid
2308                     elif which == 2:
2309                         if in_2:
2310                             peers[shnum] = self._shares2[peerid][shnum]
2311                             sharemap[shnum] = peerid
2312                     elif which == 3:
2313                         if in_3:
2314                             peers[shnum] = self._shares3[peerid][shnum]
2315                             sharemap[shnum] = peerid
2316
2317             # we don't bother placing any other shares
2318             # now sort the sequence so that share 0 is returned first
2319             new_sequence = [sharemap[shnum]
2320                             for shnum in sorted(sharemap.keys())]
2321             self._storage._sequence = new_sequence
2322             log.msg("merge done")
2323         d.addCallback(_merge)
2324         d.addCallback(lambda res: fn3.download_best_version())
2325         def _retrieved(new_contents):
2326             # the current specified behavior is "first version recoverable"
2327             self.failUnlessEqual(new_contents, contents1)
2328         d.addCallback(_retrieved)
2329         return d
2330
2331
2332 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2333
2334     def setUp(self):
2335         return self.publish_multiple()
2336
2337     def test_multiple_versions(self):
2338         # if we see a mix of versions in the grid, download_best_version
2339         # should get the latest one
2340         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2341         d = self._fn.download_best_version()
2342         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2343         # and the checker should report problems
2344         d.addCallback(lambda res: self._fn.check(Monitor()))
2345         d.addCallback(self.check_bad, "test_multiple_versions")
2346
2347         # but if everything is at version 2, that's what we should download
2348         d.addCallback(lambda res:
2349                       self._set_versions(dict([(i,2) for i in range(10)])))
2350         d.addCallback(lambda res: self._fn.download_best_version())
2351         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2352         # if exactly one share is at version 3, we should still get v2
2353         d.addCallback(lambda res:
2354                       self._set_versions({0:3}))
2355         d.addCallback(lambda res: self._fn.download_best_version())
2356         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2357         # but the servermap should see the unrecoverable version. This
2358         # depends upon the single newer share being queried early.
2359         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2360         def _check_smap(smap):
2361             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2362             newer = smap.unrecoverable_newer_versions()
2363             self.failUnlessEqual(len(newer), 1)
2364             verinfo, health = newer.items()[0]
2365             self.failUnlessEqual(verinfo[0], 4)
2366             self.failUnlessEqual(health, (1,3))
2367             self.failIf(smap.needs_merge())
2368         d.addCallback(_check_smap)
2369         # if we have a mix of two parallel versions (s4a and s4b), we could
2370         # recover either
2371         d.addCallback(lambda res:
2372                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2373                                           1:4,3:4,5:4,7:4,9:4}))
2374         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2375         def _check_smap_mixed(smap):
2376             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2377             newer = smap.unrecoverable_newer_versions()
2378             self.failUnlessEqual(len(newer), 0)
2379             self.failUnless(smap.needs_merge())
2380         d.addCallback(_check_smap_mixed)
2381         d.addCallback(lambda res: self._fn.download_best_version())
2382         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2383                                                   res == self.CONTENTS[4]))
2384         return d
2385
2386     def test_replace(self):
2387         # if we see a mix of versions in the grid, we should be able to
2388         # replace them all with a newer version
2389
2390         # if exactly one share is at version 3, we should download (and
2391         # replace) v2, and the result should be v4. Note that the index we
2392         # give to _set_versions is different than the sequence number.
2393         target = dict([(i,2) for i in range(10)]) # seqnum3
2394         target[0] = 3 # seqnum4
2395         self._set_versions(target)
2396
2397         def _modify(oldversion, servermap, first_time):
2398             return oldversion + " modified"
2399         d = self._fn.modify(_modify)
2400         d.addCallback(lambda res: self._fn.download_best_version())
2401         expected = self.CONTENTS[2] + " modified"
2402         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2403         # and the servermap should indicate that the outlier was replaced too
2404         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2405         def _check_smap(smap):
2406             self.failUnlessEqual(smap.highest_seqnum(), 5)
2407             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2408             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2409         d.addCallback(_check_smap)
2410         return d
2411
2412
2413 class Utils(unittest.TestCase):
2414     def test_cache(self):
2415         c = ResponseCache()
2416         # xdata = base62.b2a(os.urandom(100))[:100]
2417         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2418         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2419         c.add("v1", 1, 0, xdata)
2420         c.add("v1", 1, 2000, ydata)
2421         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2422         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2423         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2424         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2425         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2426         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2427         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2428         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2429         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2430         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2431         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2432         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2433         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2434         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2435         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2436         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2437         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2438         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2439
2440         # test joining fragments
2441         c = ResponseCache()
2442         c.add("v1", 1, 0, xdata[:10])
2443         c.add("v1", 1, 10, xdata[10:20])
2444         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2445
2446 class Exceptions(unittest.TestCase):
2447     def test_repr(self):
2448         nmde = NeedMoreDataError(100, 50, 100)
2449         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2450         ucwe = UncoordinatedWriteError()
2451         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2452
2453 class SameKeyGenerator:
2454     def __init__(self, pubkey, privkey):
2455         self.pubkey = pubkey
2456         self.privkey = privkey
2457     def generate(self, keysize=None):
2458         return defer.succeed( (self.pubkey, self.privkey) )
2459
2460 class FirstServerGetsKilled:
2461     done = False
2462     def notify(self, retval, wrapper, methname):
2463         if not self.done:
2464             wrapper.broken = True
2465             self.done = True
2466         return retval
2467
2468 class FirstServerGetsDeleted:
2469     def __init__(self):
2470         self.done = False
2471         self.silenced = None
2472     def notify(self, retval, wrapper, methname):
2473         if not self.done:
2474             # this query will work, but later queries should think the share
2475             # has been deleted
2476             self.done = True
2477             self.silenced = wrapper
2478             return retval
2479         if wrapper == self.silenced:
2480             assert methname == "slot_testv_and_readv_and_writev"
2481             return (True, {})
2482         return retval
2483
2484 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2485     def test_publish_surprise(self):
2486         self.basedir = "mutable/Problems/test_publish_surprise"
2487         self.set_up_grid()
2488         nm = self.g.clients[0].nodemaker
2489         d = nm.create_mutable_file(MutableData("contents 1"))
2490         def _created(n):
2491             d = defer.succeed(None)
2492             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2493             def _got_smap1(smap):
2494                 # stash the old state of the file
2495                 self.old_map = smap
2496             d.addCallback(_got_smap1)
2497             # then modify the file, leaving the old map untouched
2498             d.addCallback(lambda res: log.msg("starting winning write"))
2499             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2500             # now attempt to modify the file with the old servermap. This
2501             # will look just like an uncoordinated write, in which every
2502             # single share got updated between our mapupdate and our publish
2503             d.addCallback(lambda res: log.msg("starting doomed write"))
2504             d.addCallback(lambda res:
2505                           self.shouldFail(UncoordinatedWriteError,
2506                                           "test_publish_surprise", None,
2507                                           n.upload,
2508                                           MutableData("contents 2a"), self.old_map))
2509             return d
2510         d.addCallback(_created)
2511         return d
2512
2513     def test_retrieve_surprise(self):
2514         self.basedir = "mutable/Problems/test_retrieve_surprise"
2515         self.set_up_grid()
2516         nm = self.g.clients[0].nodemaker
2517         d = nm.create_mutable_file(MutableData("contents 1"))
2518         def _created(n):
2519             d = defer.succeed(None)
2520             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2521             def _got_smap1(smap):
2522                 # stash the old state of the file
2523                 self.old_map = smap
2524             d.addCallback(_got_smap1)
2525             # then modify the file, leaving the old map untouched
2526             d.addCallback(lambda res: log.msg("starting winning write"))
2527             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2528             # now attempt to retrieve the old version with the old servermap.
2529             # This will look like someone has changed the file since we
2530             # updated the servermap.
2531             d.addCallback(lambda res: n._cache._clear())
2532             d.addCallback(lambda res: log.msg("starting doomed read"))
2533             d.addCallback(lambda res:
2534                           self.shouldFail(NotEnoughSharesError,
2535                                           "test_retrieve_surprise",
2536                                           "ran out of peers: have 0 of 1",
2537                                           n.download_version,
2538                                           self.old_map,
2539                                           self.old_map.best_recoverable_version(),
2540                                           ))
2541             return d
2542         d.addCallback(_created)
2543         return d
2544
2545
2546     def test_unexpected_shares(self):
2547         # upload the file, take a servermap, shut down one of the servers,
2548         # upload it again (causing shares to appear on a new server), then
2549         # upload using the old servermap. The last upload should fail with an
2550         # UncoordinatedWriteError, because of the shares that didn't appear
2551         # in the servermap.
2552         self.basedir = "mutable/Problems/test_unexpected_shares"
2553         self.set_up_grid()
2554         nm = self.g.clients[0].nodemaker
2555         d = nm.create_mutable_file(MutableData("contents 1"))
2556         def _created(n):
2557             d = defer.succeed(None)
2558             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2559             def _got_smap1(smap):
2560                 # stash the old state of the file
2561                 self.old_map = smap
2562                 # now shut down one of the servers
2563                 peer0 = list(smap.make_sharemap()[0])[0]
2564                 self.g.remove_server(peer0)
2565                 # then modify the file, leaving the old map untouched
2566                 log.msg("starting winning write")
2567                 return n.overwrite(MutableData("contents 2"))
2568             d.addCallback(_got_smap1)
2569             # now attempt to modify the file with the old servermap. This
2570             # will look just like an uncoordinated write, in which every
2571             # single share got updated between our mapupdate and our publish
2572             d.addCallback(lambda res: log.msg("starting doomed write"))
2573             d.addCallback(lambda res:
2574                           self.shouldFail(UncoordinatedWriteError,
2575                                           "test_surprise", None,
2576                                           n.upload,
2577                                           MutableData("contents 2a"), self.old_map))
2578             return d
2579         d.addCallback(_created)
2580         return d
2581
2582     def test_bad_server(self):
2583         # Break one server, then create the file: the initial publish should
2584         # complete with an alternate server. Breaking a second server should
2585         # not prevent an update from succeeding either.
2586         self.basedir = "mutable/Problems/test_bad_server"
2587         self.set_up_grid()
2588         nm = self.g.clients[0].nodemaker
2589
2590         # to make sure that one of the initial peers is broken, we have to
2591         # get creative. We create an RSA key and compute its storage-index.
2592         # Then we make a KeyGenerator that always returns that one key, and
2593         # use it to create the mutable file. This will get easier when we can
2594         # use #467 static-server-selection to disable permutation and force
2595         # the choice of server for share[0].
2596
2597         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2598         def _got_key( (pubkey, privkey) ):
2599             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2600             pubkey_s = pubkey.serialize()
2601             privkey_s = privkey.serialize()
2602             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2603                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2604             self._storage_index = u.get_storage_index()
2605         d.addCallback(_got_key)
2606         def _break_peer0(res):
2607             si = self._storage_index
2608             servers = nm.storage_broker.get_servers_for_psi(si)
2609             self.g.break_server(servers[0].get_serverid())
2610             self.server1 = servers[1]
2611         d.addCallback(_break_peer0)
2612         # now "create" the file, using the pre-established key, and let the
2613         # initial publish finally happen
2614         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2615         # that ought to work
2616         def _got_node(n):
2617             d = n.download_best_version()
2618             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2619             # now break the second peer
2620             def _break_peer1(res):
2621                 self.g.break_server(self.server1.get_serverid())
2622             d.addCallback(_break_peer1)
2623             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2624             # that ought to work too
2625             d.addCallback(lambda res: n.download_best_version())
2626             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2627             def _explain_error(f):
2628                 print f
2629                 if f.check(NotEnoughServersError):
2630                     print "first_error:", f.value.first_error
2631                 return f
2632             d.addErrback(_explain_error)
2633             return d
2634         d.addCallback(_got_node)
2635         return d
2636
2637     def test_bad_server_overlap(self):
2638         # like test_bad_server, but with no extra unused servers to fall back
2639         # upon. This means that we must re-use a server which we've already
2640         # used. If we don't remember the fact that we sent them one share
2641         # already, we'll mistakenly think we're experiencing an
2642         # UncoordinatedWriteError.
2643
2644         # Break one server, then create the file: the initial publish should
2645         # complete with an alternate server. Breaking a second server should
2646         # not prevent an update from succeeding either.
2647         self.basedir = "mutable/Problems/test_bad_server_overlap"
2648         self.set_up_grid()
2649         nm = self.g.clients[0].nodemaker
2650         sb = nm.storage_broker
2651
2652         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2653         self.g.break_server(peerids[0])
2654
2655         d = nm.create_mutable_file(MutableData("contents 1"))
2656         def _created(n):
2657             d = n.download_best_version()
2658             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2659             # now break one of the remaining servers
2660             def _break_second_server(res):
2661                 self.g.break_server(peerids[1])
2662             d.addCallback(_break_second_server)
2663             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2664             # that ought to work too
2665             d.addCallback(lambda res: n.download_best_version())
2666             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2667             return d
2668         d.addCallback(_created)
2669         return d
2670
2671     def test_publish_all_servers_bad(self):
2672         # Break all servers: the publish should fail
2673         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2674         self.set_up_grid()
2675         nm = self.g.clients[0].nodemaker
2676         for s in nm.storage_broker.get_connected_servers():
2677             s.get_rref().broken = True
2678
2679         d = self.shouldFail(NotEnoughServersError,
2680                             "test_publish_all_servers_bad",
2681                             "ran out of good servers",
2682                             nm.create_mutable_file, MutableData("contents"))
2683         return d
2684
2685     def test_publish_no_servers(self):
2686         # no servers at all: the publish should fail
2687         self.basedir = "mutable/Problems/test_publish_no_servers"
2688         self.set_up_grid(num_servers=0)
2689         nm = self.g.clients[0].nodemaker
2690
2691         d = self.shouldFail(NotEnoughServersError,
2692                             "test_publish_no_servers",
2693                             "Ran out of non-bad servers",
2694                             nm.create_mutable_file, MutableData("contents"))
2695         return d
2696
2697
2698     def test_privkey_query_error(self):
2699         # when a servermap is updated with MODE_WRITE, it tries to get the
2700         # privkey. Something might go wrong during this query attempt.
2701         # Exercise the code in _privkey_query_failed which tries to handle
2702         # such an error.
2703         self.basedir = "mutable/Problems/test_privkey_query_error"
2704         self.set_up_grid(num_servers=20)
2705         nm = self.g.clients[0].nodemaker
2706         nm._node_cache = DevNullDictionary() # disable the nodecache
2707
2708         # we need some contents that are large enough to push the privkey out
2709         # of the early part of the file
2710         LARGE = "These are Larger contents" * 2000 # about 50KB
2711         LARGE_uploadable = MutableData(LARGE)
2712         d = nm.create_mutable_file(LARGE_uploadable)
2713         def _created(n):
2714             self.uri = n.get_uri()
2715             self.n2 = nm.create_from_cap(self.uri)
2716
2717             # When a mapupdate is performed on a node that doesn't yet know
2718             # the privkey, a short read is sent to a batch of servers, to get
2719             # the verinfo and (hopefully, if the file is short enough) the
2720             # encprivkey. Our file is too large to let this first read
2721             # contain the encprivkey. Each non-encprivkey-bearing response
2722             # that arrives (until the node gets the encprivkey) will trigger
2723             # a second read to specifically read the encprivkey.
2724             #
2725             # So, to exercise this case:
2726             #  1. notice which server gets a read() call first
2727             #  2. tell that server to start throwing errors
2728             killer = FirstServerGetsKilled()
2729             for s in nm.storage_broker.get_connected_servers():
2730                 s.get_rref().post_call_notifier = killer.notify
2731         d.addCallback(_created)
2732
2733         # now we update a servermap from a new node (which doesn't have the
2734         # privkey yet, forcing it to use a separate privkey query). Note that
2735         # the map-update will succeed, since we'll just get a copy from one
2736         # of the other shares.
2737         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2738
2739         return d
2740
2741     def test_privkey_query_missing(self):
2742         # like test_privkey_query_error, but the shares are deleted by the
2743         # second query, instead of raising an exception.
2744         self.basedir = "mutable/Problems/test_privkey_query_missing"
2745         self.set_up_grid(num_servers=20)
2746         nm = self.g.clients[0].nodemaker
2747         LARGE = "These are Larger contents" * 2000 # about 50KiB
2748         LARGE_uploadable = MutableData(LARGE)
2749         nm._node_cache = DevNullDictionary() # disable the nodecache
2750
2751         d = nm.create_mutable_file(LARGE_uploadable)
2752         def _created(n):
2753             self.uri = n.get_uri()
2754             self.n2 = nm.create_from_cap(self.uri)
2755             deleter = FirstServerGetsDeleted()
2756             for s in nm.storage_broker.get_connected_servers():
2757                 s.get_rref().post_call_notifier = deleter.notify
2758         d.addCallback(_created)
2759         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2760         return d
2761
2762
2763     def test_block_and_hash_query_error(self):
2764         # This tests for what happens when a query to a remote server
2765         # fails in either the hash validation step or the block getting
2766         # step (because of batching, this is the same actual query).
2767         # We need to have the storage server persist up until the point
2768         # that its prefix is validated, then suddenly die. This
2769         # exercises some exception handling code in Retrieve.
2770         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2771         self.set_up_grid(num_servers=20)
2772         nm = self.g.clients[0].nodemaker
2773         CONTENTS = "contents" * 2000
2774         CONTENTS_uploadable = MutableData(CONTENTS)
2775         d = nm.create_mutable_file(CONTENTS_uploadable)
2776         def _created(node):
2777             self._node = node
2778         d.addCallback(_created)
2779         d.addCallback(lambda ignored:
2780             self._node.get_servermap(MODE_READ))
2781         def _then(servermap):
2782             # we have our servermap. Now we set up the servers like the
2783             # tests above -- the first one that gets a read call should
2784             # start throwing errors, but only after returning its prefix
2785             # for validation. Since we'll download without fetching the
2786             # private key, the next query to the remote server will be
2787             # for either a block and salt or for hashes, either of which
2788             # will exercise the error handling code.
2789             killer = FirstServerGetsKilled()
2790             for s in nm.storage_broker.get_connected_servers():
2791                 s.get_rref().post_call_notifier = killer.notify
2792             ver = servermap.best_recoverable_version()
2793             assert ver
2794             return self._node.download_version(servermap, ver)
2795         d.addCallback(_then)
2796         d.addCallback(lambda data:
2797             self.failUnlessEqual(data, CONTENTS))
2798         return d
2799
2800
2801 class FileHandle(unittest.TestCase):
2802     def setUp(self):
2803         self.test_data = "Test Data" * 50000
2804         self.sio = StringIO(self.test_data)
2805         self.uploadable = MutableFileHandle(self.sio)
2806
2807
2808     def test_filehandle_read(self):
2809         self.basedir = "mutable/FileHandle/test_filehandle_read"
2810         chunk_size = 10
2811         for i in xrange(0, len(self.test_data), chunk_size):
2812             data = self.uploadable.read(chunk_size)
2813             data = "".join(data)
2814             start = i
2815             end = i + chunk_size
2816             self.failUnlessEqual(data, self.test_data[start:end])
2817
2818
2819     def test_filehandle_get_size(self):
2820         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2821         actual_size = len(self.test_data)
2822         size = self.uploadable.get_size()
2823         self.failUnlessEqual(size, actual_size)
2824
2825
2826     def test_filehandle_get_size_out_of_order(self):
2827         # We should be able to call get_size whenever we want without
2828         # disturbing the location of the seek pointer.
2829         chunk_size = 100
2830         data = self.uploadable.read(chunk_size)
2831         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2832
2833         # Now get the size.
2834         size = self.uploadable.get_size()
2835         self.failUnlessEqual(size, len(self.test_data))
2836
2837         # Now get more data. We should be right where we left off.
2838         more_data = self.uploadable.read(chunk_size)
2839         start = chunk_size
2840         end = chunk_size * 2
2841         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2842
2843
2844     def test_filehandle_file(self):
2845         # Make sure that the MutableFileHandle works on a file as well
2846         # as a StringIO object, since in some cases it will be asked to
2847         # deal with files.
2848         self.basedir = self.mktemp()
2849         # necessary? What am I doing wrong here?
2850         os.mkdir(self.basedir)
2851         f_path = os.path.join(self.basedir, "test_file")
2852         f = open(f_path, "w")
2853         f.write(self.test_data)
2854         f.close()
2855         f = open(f_path, "r")
2856
2857         uploadable = MutableFileHandle(f)
2858
2859         data = uploadable.read(len(self.test_data))
2860         self.failUnlessEqual("".join(data), self.test_data)
2861         size = uploadable.get_size()
2862         self.failUnlessEqual(size, len(self.test_data))
2863
2864
2865     def test_close(self):
2866         # Make sure that the MutableFileHandle closes its handle when
2867         # told to do so.
2868         self.uploadable.close()
2869         self.failUnless(self.sio.closed)
2870
2871
2872 class DataHandle(unittest.TestCase):
2873     def setUp(self):
2874         self.test_data = "Test Data" * 50000
2875         self.uploadable = MutableData(self.test_data)
2876
2877
2878     def test_datahandle_read(self):
2879         chunk_size = 10
2880         for i in xrange(0, len(self.test_data), chunk_size):
2881             data = self.uploadable.read(chunk_size)
2882             data = "".join(data)
2883             start = i
2884             end = i + chunk_size
2885             self.failUnlessEqual(data, self.test_data[start:end])
2886
2887
2888     def test_datahandle_get_size(self):
2889         actual_size = len(self.test_data)
2890         size = self.uploadable.get_size()
2891         self.failUnlessEqual(size, actual_size)
2892
2893
2894     def test_datahandle_get_size_out_of_order(self):
2895         # We should be able to call get_size whenever we want without
2896         # disturbing the location of the seek pointer.
2897         chunk_size = 100
2898         data = self.uploadable.read(chunk_size)
2899         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2900
2901         # Now get the size.
2902         size = self.uploadable.get_size()
2903         self.failUnlessEqual(size, len(self.test_data))
2904
2905         # Now get more data. We should be right where we left off.
2906         more_data = self.uploadable.read(chunk_size)
2907         start = chunk_size
2908         end = chunk_size * 2
2909         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2910
2911
2912 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2913               PublishMixin):
2914     def setUp(self):
2915         GridTestMixin.setUp(self)
2916         self.basedir = self.mktemp()
2917         self.set_up_grid()
2918         self.c = self.g.clients[0]
2919         self.nm = self.c.nodemaker
2920         self.data = "test data" * 100000 # about 900 KiB; MDMF
2921         self.small_data = "test data" * 10 # about 90 B; SDMF
2922
2923
2924     def do_upload_mdmf(self):
2925         d = self.nm.create_mutable_file(MutableData(self.data),
2926                                         version=MDMF_VERSION)
2927         def _then(n):
2928             assert isinstance(n, MutableFileNode)
2929             assert n._protocol_version == MDMF_VERSION
2930             self.mdmf_node = n
2931             return n
2932         d.addCallback(_then)
2933         return d
2934
2935     def do_upload_sdmf(self):
2936         d = self.nm.create_mutable_file(MutableData(self.small_data))
2937         def _then(n):
2938             assert isinstance(n, MutableFileNode)
2939             assert n._protocol_version == SDMF_VERSION
2940             self.sdmf_node = n
2941             return n
2942         d.addCallback(_then)
2943         return d
2944
2945     def do_upload_empty_sdmf(self):
2946         d = self.nm.create_mutable_file(MutableData(""))
2947         def _then(n):
2948             assert isinstance(n, MutableFileNode)
2949             self.sdmf_zero_length_node = n
2950             assert n._protocol_version == SDMF_VERSION
2951             return n
2952         d.addCallback(_then)
2953         return d
2954
2955     def do_upload(self):
2956         d = self.do_upload_mdmf()
2957         d.addCallback(lambda ign: self.do_upload_sdmf())
2958         return d
2959
2960     def test_debug(self):
2961         d = self.do_upload_mdmf()
2962         def _debug(n):
2963             fso = debug.FindSharesOptions()
2964             storage_index = base32.b2a(n.get_storage_index())
2965             fso.si_s = storage_index
2966             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2967                             for (i,ss,storedir)
2968                             in self.iterate_servers()]
2969             fso.stdout = StringIO()
2970             fso.stderr = StringIO()
2971             debug.find_shares(fso)
2972             sharefiles = fso.stdout.getvalue().splitlines()
2973             expected = self.nm.default_encoding_parameters["n"]
2974             self.failUnlessEqual(len(sharefiles), expected)
2975
2976             do = debug.DumpOptions()
2977             do["filename"] = sharefiles[0]
2978             do.stdout = StringIO()
2979             debug.dump_share(do)
2980             output = do.stdout.getvalue()
2981             lines = set(output.splitlines())
2982             self.failUnless("Mutable slot found:" in lines, output)
2983             self.failUnless(" share_type: MDMF" in lines, output)
2984             self.failUnless(" num_extra_leases: 0" in lines, output)
2985             self.failUnless(" MDMF contents:" in lines, output)
2986             self.failUnless("  seqnum: 1" in lines, output)
2987             self.failUnless("  required_shares: 3" in lines, output)
2988             self.failUnless("  total_shares: 10" in lines, output)
2989             self.failUnless("  segsize: 131073" in lines, output)
2990             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
2991             vcap = n.get_verify_cap().to_string()
2992             self.failUnless("  verify-cap: %s" % vcap in lines, output)
2993
2994             cso = debug.CatalogSharesOptions()
2995             cso.nodedirs = fso.nodedirs
2996             cso.stdout = StringIO()
2997             cso.stderr = StringIO()
2998             debug.catalog_shares(cso)
2999             shares = cso.stdout.getvalue().splitlines()
3000             oneshare = shares[0] # all shares should be MDMF
3001             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3002             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3003             fields = oneshare.split()
3004             self.failUnlessEqual(fields[0], "MDMF")
3005             self.failUnlessEqual(fields[1], storage_index)
3006             self.failUnlessEqual(fields[2], "3/10")
3007             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3008             self.failUnless(fields[4].startswith("#1:"), fields[3])
3009             # the rest of fields[4] is the roothash, which depends upon
3010             # encryption salts and is not constant. fields[5] is the
3011             # remaining time on the longest lease, which is timing dependent.
3012             # The rest of the line is the quoted pathname to the share.
3013         d.addCallback(_debug)
3014         return d
3015
3016     def test_get_sequence_number(self):
3017         d = self.do_upload()
3018         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3019         d.addCallback(lambda bv:
3020             self.failUnlessEqual(bv.get_sequence_number(), 1))
3021         d.addCallback(lambda ignored:
3022             self.sdmf_node.get_best_readable_version())
3023         d.addCallback(lambda bv:
3024             self.failUnlessEqual(bv.get_sequence_number(), 1))
3025         # Now update. The sequence number in both cases should be 1 in
3026         # both cases.
3027         def _do_update(ignored):
3028             new_data = MutableData("foo bar baz" * 100000)
3029             new_small_data = MutableData("foo bar baz" * 10)
3030             d1 = self.mdmf_node.overwrite(new_data)
3031             d2 = self.sdmf_node.overwrite(new_small_data)
3032             dl = gatherResults([d1, d2])
3033             return dl
3034         d.addCallback(_do_update)
3035         d.addCallback(lambda ignored:
3036             self.mdmf_node.get_best_readable_version())
3037         d.addCallback(lambda bv:
3038             self.failUnlessEqual(bv.get_sequence_number(), 2))
3039         d.addCallback(lambda ignored:
3040             self.sdmf_node.get_best_readable_version())
3041         d.addCallback(lambda bv:
3042             self.failUnlessEqual(bv.get_sequence_number(), 2))
3043         return d
3044
3045
3046     def test_version_extension_api(self):
3047         # We need to define an API by which an uploader can set the
3048         # extension parameters, and by which a downloader can retrieve
3049         # extensions.
3050         d = self.do_upload_mdmf()
3051         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3052         def _got_version(version):
3053             hints = version.get_downloader_hints()
3054             # Should be empty at this point.
3055             self.failUnlessIn("k", hints)
3056             self.failUnlessEqual(hints['k'], 3)
3057             self.failUnlessIn('segsize', hints)
3058             self.failUnlessEqual(hints['segsize'], 131073)
3059         d.addCallback(_got_version)
3060         return d
3061
3062
3063     def test_extensions_from_cap(self):
3064         # If we initialize a mutable file with a cap that has extension
3065         # parameters in it and then grab the extension parameters using
3066         # our API, we should see that they're set correctly.
3067         d = self.do_upload_mdmf()
3068         def _then(ign):
3069             mdmf_uri = self.mdmf_node.get_uri()
3070             new_node = self.nm.create_from_cap(mdmf_uri)
3071             return new_node.get_best_mutable_version()
3072         d.addCallback(_then)
3073         def _got_version(version):
3074             hints = version.get_downloader_hints()
3075             self.failUnlessIn("k", hints)
3076             self.failUnlessEqual(hints["k"], 3)
3077             self.failUnlessIn("segsize", hints)
3078             self.failUnlessEqual(hints["segsize"], 131073)
3079         d.addCallback(_got_version)
3080         return d
3081
3082
3083     def test_extensions_from_upload(self):
3084         # If we create a new mutable file with some contents, we should
3085         # get back an MDMF cap with the right hints in place.
3086         contents = "foo bar baz" * 100000
3087         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3088         def _got_mutable_file(n):
3089             rw_uri = n.get_uri()
3090             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3091             self.failUnlessIn(expected_k, rw_uri)
3092             # XXX: Get this more intelligently.
3093             self.failUnlessIn("131073", rw_uri)
3094
3095             ro_uri = n.get_readonly_uri()
3096             self.failUnlessIn(expected_k, ro_uri)
3097             self.failUnlessIn("131073", ro_uri)
3098         d.addCallback(_got_mutable_file)
3099         return d
3100
3101
3102     def test_cap_after_upload(self):
3103         # If we create a new mutable file and upload things to it, and
3104         # it's an MDMF file, we should get an MDMF cap back from that
3105         # file and should be able to use that.
3106         # That's essentially what MDMF node is, so just check that.
3107         d = self.do_upload_mdmf()
3108         def _then(ign):
3109             mdmf_uri = self.mdmf_node.get_uri()
3110             cap = uri.from_string(mdmf_uri)
3111             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3112             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3113             cap = uri.from_string(readonly_mdmf_uri)
3114             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3115         d.addCallback(_then)
3116         return d
3117
3118     def test_mutable_version(self):
3119         # assert that getting parameters from the IMutableVersion object
3120         # gives us the same data as getting them from the filenode itself
3121         d = self.do_upload()
3122         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3123         def _check_mdmf(bv):
3124             n = self.mdmf_node
3125             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3126             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3127             self.failIf(bv.is_readonly())
3128         d.addCallback(_check_mdmf)
3129         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3130         def _check_sdmf(bv):
3131             n = self.sdmf_node
3132             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3133             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3134             self.failIf(bv.is_readonly())
3135         d.addCallback(_check_sdmf)
3136         return d
3137
3138
3139     def test_get_readonly_version(self):
3140         d = self.do_upload()
3141         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3142         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3143
3144         # Attempting to get a mutable version of a mutable file from a
3145         # filenode initialized with a readcap should return a readonly
3146         # version of that same node.
3147         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3148         d.addCallback(lambda ro: ro.get_best_mutable_version())
3149         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3150
3151         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3152         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3153
3154         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3155         d.addCallback(lambda ro: ro.get_best_mutable_version())
3156         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3157         return d
3158
3159
3160     def test_toplevel_overwrite(self):
3161         new_data = MutableData("foo bar baz" * 100000)
3162         new_small_data = MutableData("foo bar baz" * 10)
3163         d = self.do_upload()
3164         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3165         d.addCallback(lambda ignored:
3166             self.mdmf_node.download_best_version())
3167         d.addCallback(lambda data:
3168             self.failUnlessEqual(data, "foo bar baz" * 100000))
3169         d.addCallback(lambda ignored:
3170             self.sdmf_node.overwrite(new_small_data))
3171         d.addCallback(lambda ignored:
3172             self.sdmf_node.download_best_version())
3173         d.addCallback(lambda data:
3174             self.failUnlessEqual(data, "foo bar baz" * 10))
3175         return d
3176
3177
3178     def test_toplevel_modify(self):
3179         d = self.do_upload()
3180         def modifier(old_contents, servermap, first_time):
3181             return old_contents + "modified"
3182         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3183         d.addCallback(lambda ignored:
3184             self.mdmf_node.download_best_version())
3185         d.addCallback(lambda data:
3186             self.failUnlessIn("modified", data))
3187         d.addCallback(lambda ignored:
3188             self.sdmf_node.modify(modifier))
3189         d.addCallback(lambda ignored:
3190             self.sdmf_node.download_best_version())
3191         d.addCallback(lambda data:
3192             self.failUnlessIn("modified", data))
3193         return d
3194
3195
3196     def test_version_modify(self):
3197         # TODO: When we can publish multiple versions, alter this test
3198         # to modify a version other than the best usable version, then
3199         # test to see that the best recoverable version is that.
3200         d = self.do_upload()
3201         def modifier(old_contents, servermap, first_time):
3202             return old_contents + "modified"
3203         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3204         d.addCallback(lambda ignored:
3205             self.mdmf_node.download_best_version())
3206         d.addCallback(lambda data:
3207             self.failUnlessIn("modified", data))
3208         d.addCallback(lambda ignored:
3209             self.sdmf_node.modify(modifier))
3210         d.addCallback(lambda ignored:
3211             self.sdmf_node.download_best_version())
3212         d.addCallback(lambda data:
3213             self.failUnlessIn("modified", data))
3214         return d
3215
3216
3217     def test_download_version(self):
3218         d = self.publish_multiple()
3219         # We want to have two recoverable versions on the grid.
3220         d.addCallback(lambda res:
3221                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3222                                           1:1,3:1,5:1,7:1,9:1}))
3223         # Now try to download each version. We should get the plaintext
3224         # associated with that version.
3225         d.addCallback(lambda ignored:
3226             self._fn.get_servermap(mode=MODE_READ))
3227         def _got_servermap(smap):
3228             versions = smap.recoverable_versions()
3229             assert len(versions) == 2
3230
3231             self.servermap = smap
3232             self.version1, self.version2 = versions
3233             assert self.version1 != self.version2
3234
3235             self.version1_seqnum = self.version1[0]
3236             self.version2_seqnum = self.version2[0]
3237             self.version1_index = self.version1_seqnum - 1
3238             self.version2_index = self.version2_seqnum - 1
3239
3240         d.addCallback(_got_servermap)
3241         d.addCallback(lambda ignored:
3242             self._fn.download_version(self.servermap, self.version1))
3243         d.addCallback(lambda results:
3244             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3245                                  results))
3246         d.addCallback(lambda ignored:
3247             self._fn.download_version(self.servermap, self.version2))
3248         d.addCallback(lambda results:
3249             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3250                                  results))
3251         return d
3252
3253
3254     def test_download_nonexistent_version(self):
3255         d = self.do_upload_mdmf()
3256         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3257         def _set_servermap(servermap):
3258             self.servermap = servermap
3259         d.addCallback(_set_servermap)
3260         d.addCallback(lambda ignored:
3261            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3262                            None,
3263                            self.mdmf_node.download_version, self.servermap,
3264                            "not a version"))
3265         return d
3266
3267
3268     def test_partial_read(self):
3269         # read only a few bytes at a time, and see that the results are
3270         # what we expect.
3271         d = self.do_upload_mdmf()
3272         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3273         def _read_data(version):
3274             c = consumer.MemoryConsumer()
3275             d2 = defer.succeed(None)
3276             for i in xrange(0, len(self.data), 10000):
3277                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3278             d2.addCallback(lambda ignored:
3279                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3280             return d2
3281         d.addCallback(_read_data)
3282         return d
3283
3284
3285     def _test_partial_read(self, offset, length):
3286         d = self.do_upload_mdmf()
3287         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3288         c = consumer.MemoryConsumer()
3289         d.addCallback(lambda version:
3290             version.read(c, offset, length))
3291         expected = self.data[offset:offset+length]
3292         d.addCallback(lambda ignored: "".join(c.chunks))
3293         def _check(results):
3294             if results != expected:
3295                 print
3296                 print "got: %s ... %s" % (results[:20], results[-20:])
3297                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3298                 self.fail("results != expected")
3299         d.addCallback(_check)
3300         return d
3301
3302     def test_partial_read_starting_on_segment_boundary(self):
3303         return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 50)
3304
3305     def test_partial_read_ending_one_byte_after_segment_boundary(self):
3306         return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3)-50, 51)
3307
3308     def test_partial_read_zero_length_at_start(self):
3309         return self._test_partial_read(0, 0)
3310
3311     def test_partial_read_zero_length_in_middle(self):
3312         return self._test_partial_read(50, 0)
3313
3314     def test_partial_read_zero_length_at_segment_boundary(self):
3315         return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 0)
3316
3317     # XXX factor these into a single upload after they pass
3318     _broken = "zero-length reads of mutable files don't work"
3319     test_partial_read_zero_length_at_start.todo = _broken
3320     test_partial_read_zero_length_in_middle.todo = _broken
3321     test_partial_read_zero_length_at_segment_boundary.todo = _broken
3322
3323     def _test_read_and_download(self, node, expected):
3324         d = node.get_best_readable_version()
3325         def _read_data(version):
3326             c = consumer.MemoryConsumer()
3327             d2 = defer.succeed(None)
3328             d2.addCallback(lambda ignored: version.read(c))
3329             d2.addCallback(lambda ignored:
3330                 self.failUnlessEqual(expected, "".join(c.chunks)))
3331             return d2
3332         d.addCallback(_read_data)
3333         d.addCallback(lambda ignored: node.download_best_version())
3334         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3335         return d
3336
3337     def test_read_and_download_mdmf(self):
3338         d = self.do_upload_mdmf()
3339         d.addCallback(self._test_read_and_download, self.data)
3340         return d
3341
3342     def test_read_and_download_sdmf(self):
3343         d = self.do_upload_sdmf()
3344         d.addCallback(self._test_read_and_download, self.small_data)
3345         return d
3346
3347     def test_read_and_download_sdmf_zero_length(self):
3348         d = self.do_upload_empty_sdmf()
3349         d.addCallback(self._test_read_and_download, "")
3350         return d
3351
3352
3353 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3354     timeout = 400 # these tests are too big, 120s is not enough on slow
3355                   # platforms
3356     def setUp(self):
3357         GridTestMixin.setUp(self)
3358         self.basedir = self.mktemp()
3359         self.set_up_grid()
3360         self.c = self.g.clients[0]
3361         self.nm = self.c.nodemaker
3362         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3363         self.small_data = "test data" * 10 # about 90 B; SDMF
3364
3365
3366     def do_upload_sdmf(self):
3367         d = self.nm.create_mutable_file(MutableData(self.small_data))
3368         def _then(n):
3369             assert isinstance(n, MutableFileNode)
3370             self.sdmf_node = n
3371             # Make SDMF node that has 255 shares.
3372             self.nm.default_encoding_parameters['n'] = 255
3373             self.nm.default_encoding_parameters['k'] = 127
3374             return self.nm.create_mutable_file(MutableData(self.small_data))
3375         d.addCallback(_then)
3376         def _then2(n):
3377             assert isinstance(n, MutableFileNode)
3378             self.sdmf_max_shares_node = n
3379         d.addCallback(_then2)
3380         return d
3381
3382     def do_upload_mdmf(self):
3383         d = self.nm.create_mutable_file(MutableData(self.data),
3384                                         version=MDMF_VERSION)
3385         def _then(n):
3386             assert isinstance(n, MutableFileNode)
3387             self.mdmf_node = n
3388             # Make MDMF node that has 255 shares.
3389             self.nm.default_encoding_parameters['n'] = 255
3390             self.nm.default_encoding_parameters['k'] = 127
3391             return self.nm.create_mutable_file(MutableData(self.data),
3392                                                version=MDMF_VERSION)
3393         d.addCallback(_then)
3394         def _then2(n):
3395             assert isinstance(n, MutableFileNode)
3396             self.mdmf_max_shares_node = n
3397         d.addCallback(_then2)
3398         return d
3399
3400     def _test_replace(self, offset, new_data):
3401         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3402         d0 = self.do_upload_mdmf()
3403         def _run(ign):
3404             d = defer.succeed(None)
3405             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3406                 d.addCallback(lambda ign: node.get_best_mutable_version())
3407                 d.addCallback(lambda mv:
3408                     mv.update(MutableData(new_data), offset))
3409                 # close around node.
3410                 d.addCallback(lambda ignored, node=node:
3411                     node.download_best_version())
3412                 def _check(results):
3413                     if results != expected:
3414                         print
3415                         print "got: %s ... %s" % (results[:20], results[-20:])
3416                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3417                         self.fail("results != expected")
3418                 d.addCallback(_check)
3419             return d
3420         d0.addCallback(_run)
3421         return d0
3422
3423     def test_append(self):
3424         # We should be able to append data to a mutable file and get
3425         # what we expect.
3426         return self._test_replace(len(self.data), "appended")
3427
3428     def test_replace_middle(self):
3429         # We should be able to replace data in the middle of a mutable
3430         # file and get what we expect back.
3431         return self._test_replace(100, "replaced")
3432
3433     def test_replace_beginning(self):
3434         # We should be able to replace data at the beginning of the file
3435         # without truncating the file
3436         return self._test_replace(0, "beginning")
3437
3438     def test_replace_segstart1(self):
3439         return self._test_replace(128*1024+1, "NNNN")
3440
3441     def test_replace_zero_length_beginning(self):
3442         return self._test_replace(0, "")
3443
3444     def test_replace_zero_length_middle(self):
3445         return self._test_replace(50, "")
3446
3447     def test_replace_zero_length_segstart1(self):
3448         return self._test_replace(128*1024+1, "")
3449
3450     def test_replace_and_extend(self):
3451         # We should be able to replace data in the middle of a mutable
3452         # file and extend that mutable file and get what we expect.
3453         return self._test_replace(100, "modified " * 100000)
3454
3455
3456     def _check_differences(self, got, expected):
3457         # displaying arbitrary file corruption is tricky for a
3458         # 1MB file of repeating data,, so look for likely places
3459         # with problems and display them separately
3460         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3461         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3462         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3463                     for (start,end) in gotmods]
3464         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3465                     for (start,end) in expmods]
3466         #print "expecting: %s" % expspans
3467
3468         SEGSIZE = 128*1024
3469         if got != expected:
3470             print "differences:"
3471             for segnum in range(len(expected)//SEGSIZE):
3472                 start = segnum * SEGSIZE
3473                 end = (segnum+1) * SEGSIZE
3474                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3475                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3476                 if got_ends != exp_ends:
3477                     print "expected[%d]: %s" % (start, exp_ends)
3478                     print "got     [%d]: %s" % (start, got_ends)
3479             if expspans != gotspans:
3480                 print "expected: %s" % expspans
3481                 print "got     : %s" % gotspans
3482             open("EXPECTED","wb").write(expected)
3483             open("GOT","wb").write(got)
3484             print "wrote data to EXPECTED and GOT"
3485             self.fail("didn't get expected data")
3486
3487
3488     def test_replace_locations(self):
3489         # exercise fencepost conditions
3490         SEGSIZE = 128*1024
3491         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3492         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3493         d0 = self.do_upload_mdmf()
3494         def _run(ign):
3495             expected = self.data
3496             d = defer.succeed(None)
3497             for offset in suspects:
3498                 new_data = letters.next()*2 # "AA", then "BB", etc
3499                 expected = expected[:offset]+new_data+expected[offset+2:]
3500                 d.addCallback(lambda ign:
3501                               self.mdmf_node.get_best_mutable_version())
3502                 def _modify(mv, offset=offset, new_data=new_data):
3503                     # close over 'offset','new_data'
3504                     md = MutableData(new_data)
3505                     return mv.update(md, offset)
3506                 d.addCallback(_modify)
3507                 d.addCallback(lambda ignored:
3508                               self.mdmf_node.download_best_version())
3509                 d.addCallback(self._check_differences, expected)
3510             return d
3511         d0.addCallback(_run)
3512         return d0
3513
3514     def test_replace_locations_max_shares(self):
3515         # exercise fencepost conditions
3516         SEGSIZE = 128*1024
3517         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3518         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3519         d0 = self.do_upload_mdmf()
3520         def _run(ign):
3521             expected = self.data
3522             d = defer.succeed(None)
3523             for offset in suspects:
3524                 new_data = letters.next()*2 # "AA", then "BB", etc
3525                 expected = expected[:offset]+new_data+expected[offset+2:]
3526                 d.addCallback(lambda ign:
3527                               self.mdmf_max_shares_node.get_best_mutable_version())
3528                 def _modify(mv, offset=offset, new_data=new_data):
3529                     # close over 'offset','new_data'
3530                     md = MutableData(new_data)
3531                     return mv.update(md, offset)
3532                 d.addCallback(_modify)
3533                 d.addCallback(lambda ignored:
3534                               self.mdmf_max_shares_node.download_best_version())
3535                 d.addCallback(self._check_differences, expected)
3536             return d
3537         d0.addCallback(_run)
3538         return d0
3539
3540
3541     def test_append_power_of_two(self):
3542         # If we attempt to extend a mutable file so that its segment
3543         # count crosses a power-of-two boundary, the update operation
3544         # should know how to reencode the file.
3545
3546         # Note that the data populating self.mdmf_node is about 900 KiB
3547         # long -- this is 7 segments in the default segment size. So we
3548         # need to add 2 segments worth of data to push it over a
3549         # power-of-two boundary.
3550         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3551         new_data = self.data + (segment * 2)
3552         d0 = self.do_upload_mdmf()
3553         def _run(ign):
3554             d = defer.succeed(None)
3555             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3556                 d.addCallback(lambda ign: node.get_best_mutable_version())
3557                 d.addCallback(lambda mv:
3558                     mv.update(MutableData(segment * 2), len(self.data)))
3559                 d.addCallback(lambda ignored, node=node:
3560                     node.download_best_version())
3561                 d.addCallback(lambda results:
3562                     self.failUnlessEqual(results, new_data))
3563             return d
3564         d0.addCallback(_run)
3565         return d0
3566
3567     def test_update_sdmf(self):
3568         # Running update on a single-segment file should still work.
3569         new_data = self.small_data + "appended"
3570         d0 = self.do_upload_sdmf()
3571         def _run(ign):
3572             d = defer.succeed(None)
3573             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3574                 d.addCallback(lambda ign: node.get_best_mutable_version())
3575                 d.addCallback(lambda mv:
3576                     mv.update(MutableData("appended"), len(self.small_data)))
3577                 d.addCallback(lambda ignored, node=node:
3578                     node.download_best_version())
3579                 d.addCallback(lambda results:
3580                     self.failUnlessEqual(results, new_data))
3581             return d
3582         d0.addCallback(_run)
3583         return d0
3584
3585     def test_replace_in_last_segment(self):
3586         # The wrapper should know how to handle the tail segment
3587         # appropriately.
3588         replace_offset = len(self.data) - 100
3589         new_data = self.data[:replace_offset] + "replaced"
3590         rest_offset = replace_offset + len("replaced")
3591         new_data += self.data[rest_offset:]
3592         d0 = self.do_upload_mdmf()
3593         def _run(ign):
3594             d = defer.succeed(None)
3595             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3596                 d.addCallback(lambda ign: node.get_best_mutable_version())
3597                 d.addCallback(lambda mv:
3598                     mv.update(MutableData("replaced"), replace_offset))
3599                 d.addCallback(lambda ignored, node=node:
3600                     node.download_best_version())
3601                 d.addCallback(lambda results:
3602                     self.failUnlessEqual(results, new_data))
3603             return d
3604         d0.addCallback(_run)
3605         return d0
3606
3607     def test_multiple_segment_replace(self):
3608         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3609         new_data = self.data[:replace_offset]
3610         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3611         new_data += 2 * new_segment
3612         new_data += "replaced"
3613         rest_offset = len(new_data)
3614         new_data += self.data[rest_offset:]
3615         d0 = self.do_upload_mdmf()
3616         def _run(ign):
3617             d = defer.succeed(None)
3618             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3619                 d.addCallback(lambda ign: node.get_best_mutable_version())
3620                 d.addCallback(lambda mv:
3621                     mv.update(MutableData((2 * new_segment) + "replaced"),
3622                               replace_offset))
3623                 d.addCallback(lambda ignored, node=node:
3624                     node.download_best_version())
3625                 d.addCallback(lambda results:
3626                     self.failUnlessEqual(results, new_data))
3627             return d
3628         d0.addCallback(_run)
3629         return d0
3630
3631 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3632     sdmf_old_shares = {}
3633     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3634     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3635     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3636     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3637     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3638     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3639     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3640     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3641     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3642     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3643     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3644     sdmf_old_contents = "This is a test file.\n"
3645     def copy_sdmf_shares(self):
3646         # We'll basically be short-circuiting the upload process.
3647         servernums = self.g.servers_by_number.keys()
3648         assert len(servernums) == 10
3649
3650         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3651         # Get the storage index.
3652         cap = uri.from_string(self.sdmf_old_cap)
3653         si = cap.get_storage_index()
3654
3655         # Now execute each assignment by writing the storage.
3656         for (share, servernum) in assignments:
3657             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3658             storedir = self.get_serverdir(servernum)
3659             storage_path = os.path.join(storedir, "shares",
3660                                         storage_index_to_dir(si))
3661             fileutil.make_dirs(storage_path)
3662             fileutil.write(os.path.join(storage_path, "%d" % share),
3663                            sharedata)
3664         # ...and verify that the shares are there.
3665         shares = self.find_uri_shares(self.sdmf_old_cap)
3666         assert len(shares) == 10
3667
3668     def test_new_downloader_can_read_old_shares(self):
3669         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3670         self.set_up_grid()
3671         self.copy_sdmf_shares()
3672         nm = self.g.clients[0].nodemaker
3673         n = nm.create_from_cap(self.sdmf_old_cap)
3674         d = n.download_best_version()
3675         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3676         return d
3677
3678 class DifferentEncoding(unittest.TestCase):
3679     def setUp(self):
3680         self._storage = s = FakeStorage()
3681         self.nodemaker = make_nodemaker(s)
3682
3683     def test_filenode(self):
3684         # create a file with 3-of-20, then modify it with a client configured
3685         # to do 3-of-10. #1510 tracks a failure here
3686         self.nodemaker.default_encoding_parameters["n"] = 20
3687         d = self.nodemaker.create_mutable_file("old contents")
3688         def _created(n):
3689             filecap = n.get_cap().to_string()
3690             del n # we want a new object, not the cached one
3691             self.nodemaker.default_encoding_parameters["n"] = 10
3692             n2 = self.nodemaker.create_from_cap(filecap)
3693             return n2
3694         d.addCallback(_created)
3695         def modifier(old_contents, servermap, first_time):
3696             return "new contents"
3697         d.addCallback(lambda n: n.modify(modifier))
3698         return d