]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
14b62ee2eae13da976608c296fe3c92638671b47
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12      ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23 from allmydata.scripts import debug
24
25 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
26 from allmydata.mutable.common import ResponseCache, \
27      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
28      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
29      NotEnoughServersError, CorruptShareError
30 from allmydata.mutable.retrieve import Retrieve
31 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32                                       MutableData, \
33                                       DEFAULT_MAX_SEGMENT_SIZE
34 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
35 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
36 from allmydata.mutable.repairer import MustForceRepairError
37
38 import allmydata.test.common_util as testutil
39 from allmydata.test.common import TEST_RSA_KEY_SIZE
40
41
42 # this "FakeStorage" exists to put the share data in RAM and avoid using real
43 # network connections, both to speed up the tests and to reduce the amount of
44 # non-mutable.py code being exercised.
45
46 class FakeStorage:
47     # this class replaces the collection of storage servers, allowing the
48     # tests to examine and manipulate the published shares. It also lets us
49     # control the order in which read queries are answered, to exercise more
50     # of the error-handling code in Retrieve .
51     #
52     # Note that we ignore the storage index: this FakeStorage instance can
53     # only be used for a single storage index.
54
55
56     def __init__(self):
57         self._peers = {}
58         # _sequence is used to cause the responses to occur in a specific
59         # order. If it is in use, then we will defer queries instead of
60         # answering them right away, accumulating the Deferreds in a dict. We
61         # don't know exactly how many queries we'll get, so exactly one
62         # second after the first query arrives, we will release them all (in
63         # order).
64         self._sequence = None
65         self._pending = {}
66         self._pending_timer = None
67
68     def read(self, peerid, storage_index):
69         shares = self._peers.get(peerid, {})
70         if self._sequence is None:
71             return defer.succeed(shares)
72         d = defer.Deferred()
73         if not self._pending:
74             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
75         if peerid not in self._pending:
76             self._pending[peerid] = []
77         self._pending[peerid].append( (d, shares) )
78         return d
79
80     def _fire_readers(self):
81         self._pending_timer = None
82         pending = self._pending
83         self._pending = {}
84         for peerid in self._sequence:
85             if peerid in pending:
86                 for (d, shares) in pending.pop(peerid):
87                     eventually(d.callback, shares)
88         for peerid in pending:
89             for (d, shares) in pending[peerid]:
90                 eventually(d.callback, shares)
91
92     def write(self, peerid, storage_index, shnum, offset, data):
93         if peerid not in self._peers:
94             self._peers[peerid] = {}
95         shares = self._peers[peerid]
96         f = StringIO()
97         f.write(shares.get(shnum, ""))
98         f.seek(offset)
99         f.write(data)
100         shares[shnum] = f.getvalue()
101
102
103 class FakeStorageServer:
104     def __init__(self, peerid, storage):
105         self.peerid = peerid
106         self.storage = storage
107         self.queries = 0
108     def callRemote(self, methname, *args, **kwargs):
109         self.queries += 1
110         def _call():
111             meth = getattr(self, methname)
112             return meth(*args, **kwargs)
113         d = fireEventually()
114         d.addCallback(lambda res: _call())
115         return d
116
117     def callRemoteOnly(self, methname, *args, **kwargs):
118         self.queries += 1
119         d = self.callRemote(methname, *args, **kwargs)
120         d.addBoth(lambda ignore: None)
121         pass
122
123     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
124         pass
125
126     def slot_readv(self, storage_index, shnums, readv):
127         d = self.storage.read(self.peerid, storage_index)
128         def _read(shares):
129             response = {}
130             for shnum in shares:
131                 if shnums and shnum not in shnums:
132                     continue
133                 vector = response[shnum] = []
134                 for (offset, length) in readv:
135                     assert isinstance(offset, (int, long)), offset
136                     assert isinstance(length, (int, long)), length
137                     vector.append(shares[shnum][offset:offset+length])
138             return response
139         d.addCallback(_read)
140         return d
141
142     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
143                                         tw_vectors, read_vector):
144         # always-pass: parrot the test vectors back to them.
145         readv = {}
146         for shnum, (testv, writev, new_length) in tw_vectors.items():
147             for (offset, length, op, specimen) in testv:
148                 assert op in ("le", "eq", "ge")
149             # TODO: this isn't right, the read is controlled by read_vector,
150             # not by testv
151             readv[shnum] = [ specimen
152                              for (offset, length, op, specimen)
153                              in testv ]
154             for (offset, data) in writev:
155                 self.storage.write(self.peerid, storage_index, shnum,
156                                    offset, data)
157         answer = (True, readv)
158         return fireEventually(answer)
159
160
161 def flip_bit(original, byte_offset):
162     return (original[:byte_offset] +
163             chr(ord(original[byte_offset]) ^ 0x01) +
164             original[byte_offset+1:])
165
166 def add_two(original, byte_offset):
167     # It isn't enough to simply flip the bit for the version number,
168     # because 1 is a valid version number. So we add two instead.
169     return (original[:byte_offset] +
170             chr(ord(original[byte_offset]) ^ 0x02) +
171             original[byte_offset+1:])
172
173 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
174     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
175     # list of shnums to corrupt.
176     ds = []
177     for peerid in s._peers:
178         shares = s._peers[peerid]
179         for shnum in shares:
180             if (shnums_to_corrupt is not None
181                 and shnum not in shnums_to_corrupt):
182                 continue
183             data = shares[shnum]
184             # We're feeding the reader all of the share data, so it
185             # won't need to use the rref that we didn't provide, nor the
186             # storage index that we didn't provide. We do this because
187             # the reader will work for both MDMF and SDMF.
188             reader = MDMFSlotReadProxy(None, None, shnum, data)
189             # We need to get the offsets for the next part.
190             d = reader.get_verinfo()
191             def _do_corruption(verinfo, data, shnum):
192                 (seqnum,
193                  root_hash,
194                  IV,
195                  segsize,
196                  datalen,
197                  k, n, prefix, o) = verinfo
198                 if isinstance(offset, tuple):
199                     offset1, offset2 = offset
200                 else:
201                     offset1 = offset
202                     offset2 = 0
203                 if offset1 == "pubkey" and IV:
204                     real_offset = 107
205                 elif offset1 in o:
206                     real_offset = o[offset1]
207                 else:
208                     real_offset = offset1
209                 real_offset = int(real_offset) + offset2 + offset_offset
210                 assert isinstance(real_offset, int), offset
211                 if offset1 == 0: # verbyte
212                     f = add_two
213                 else:
214                     f = flip_bit
215                 shares[shnum] = f(data, real_offset)
216             d.addCallback(_do_corruption, data, shnum)
217             ds.append(d)
218     dl = defer.DeferredList(ds)
219     dl.addCallback(lambda ignored: res)
220     return dl
221
222 def make_storagebroker(s=None, num_peers=10):
223     if not s:
224         s = FakeStorage()
225     peerids = [tagged_hash("peerid", "%d" % i)[:20]
226                for i in range(num_peers)]
227     storage_broker = StorageFarmBroker(None, True)
228     for peerid in peerids:
229         fss = FakeStorageServer(peerid, s)
230         storage_broker.test_add_rref(peerid, fss)
231     return storage_broker
232
233 def make_nodemaker(s=None, num_peers=10):
234     storage_broker = make_storagebroker(s, num_peers)
235     sh = client.SecretHolder("lease secret", "convergence secret")
236     keygen = client.KeyGenerator()
237     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
238     nodemaker = NodeMaker(storage_broker, sh, None,
239                           None, None,
240                           {"k": 3, "n": 10}, keygen)
241     return nodemaker
242
243 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
244     # this used to be in Publish, but we removed the limit. Some of
245     # these tests test whether the new code correctly allows files
246     # larger than the limit.
247     OLD_MAX_SEGMENT_SIZE = 3500000
248     def setUp(self):
249         self._storage = s = FakeStorage()
250         self.nodemaker = make_nodemaker(s)
251
252     def test_create(self):
253         d = self.nodemaker.create_mutable_file()
254         def _created(n):
255             self.failUnless(isinstance(n, MutableFileNode))
256             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
257             sb = self.nodemaker.storage_broker
258             peer0 = sorted(sb.get_all_serverids())[0]
259             shnums = self._storage._peers[peer0].keys()
260             self.failUnlessEqual(len(shnums), 1)
261         d.addCallback(_created)
262         return d
263
264
265     def test_create_mdmf(self):
266         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
267         def _created(n):
268             self.failUnless(isinstance(n, MutableFileNode))
269             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
270             sb = self.nodemaker.storage_broker
271             peer0 = sorted(sb.get_all_serverids())[0]
272             shnums = self._storage._peers[peer0].keys()
273             self.failUnlessEqual(len(shnums), 1)
274         d.addCallback(_created)
275         return d
276
277     def test_single_share(self):
278         # Make sure that we tolerate publishing a single share.
279         self.nodemaker.default_encoding_parameters['k'] = 1
280         self.nodemaker.default_encoding_parameters['happy'] = 1
281         self.nodemaker.default_encoding_parameters['n'] = 1
282         d = defer.succeed(None)
283         for v in (SDMF_VERSION, MDMF_VERSION):
284             d.addCallback(lambda ignored:
285                 self.nodemaker.create_mutable_file(version=v))
286             def _created(n):
287                 self.failUnless(isinstance(n, MutableFileNode))
288                 self._node = n
289                 return n
290             d.addCallback(_created)
291             d.addCallback(lambda n:
292                 n.overwrite(MutableData("Contents" * 50000)))
293             d.addCallback(lambda ignored:
294                 self._node.download_best_version())
295             d.addCallback(lambda contents:
296                 self.failUnlessEqual(contents, "Contents" * 50000))
297         return d
298
299     def test_max_shares(self):
300         self.nodemaker.default_encoding_parameters['n'] = 255
301         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
302         def _created(n):
303             self.failUnless(isinstance(n, MutableFileNode))
304             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
305             sb = self.nodemaker.storage_broker
306             num_shares = sum([len(self._storage._peers[x].keys()) for x \
307                               in sb.get_all_serverids()])
308             self.failUnlessEqual(num_shares, 255)
309             self._node = n
310             return n
311         d.addCallback(_created)
312         # Now we upload some contents
313         d.addCallback(lambda n:
314             n.overwrite(MutableData("contents" * 50000)))
315         # ...then download contents
316         d.addCallback(lambda ignored:
317             self._node.download_best_version())
318         # ...and check to make sure everything went okay.
319         d.addCallback(lambda contents:
320             self.failUnlessEqual("contents" * 50000, contents))
321         return d
322
323     def test_max_shares_mdmf(self):
324         # Test how files behave when there are 255 shares.
325         self.nodemaker.default_encoding_parameters['n'] = 255
326         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
327         def _created(n):
328             self.failUnless(isinstance(n, MutableFileNode))
329             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
330             sb = self.nodemaker.storage_broker
331             num_shares = sum([len(self._storage._peers[x].keys()) for x \
332                               in sb.get_all_serverids()])
333             self.failUnlessEqual(num_shares, 255)
334             self._node = n
335             return n
336         d.addCallback(_created)
337         d.addCallback(lambda n:
338             n.overwrite(MutableData("contents" * 50000)))
339         d.addCallback(lambda ignored:
340             self._node.download_best_version())
341         d.addCallback(lambda contents:
342             self.failUnlessEqual(contents, "contents" * 50000))
343         return d
344
345     def test_mdmf_filenode_cap(self):
346         # Test that an MDMF filenode, once created, returns an MDMF URI.
347         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
348         def _created(n):
349             self.failUnless(isinstance(n, MutableFileNode))
350             cap = n.get_cap()
351             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
352             rcap = n.get_readcap()
353             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
354             vcap = n.get_verify_cap()
355             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
356         d.addCallback(_created)
357         return d
358
359
360     def test_create_from_mdmf_writecap(self):
361         # Test that the nodemaker is capable of creating an MDMF
362         # filenode given an MDMF cap.
363         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
364         def _created(n):
365             self.failUnless(isinstance(n, MutableFileNode))
366             s = n.get_uri()
367             self.failUnless(s.startswith("URI:MDMF"))
368             n2 = self.nodemaker.create_from_cap(s)
369             self.failUnless(isinstance(n2, MutableFileNode))
370             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
371             self.failUnlessEqual(n.get_uri(), n2.get_uri())
372         d.addCallback(_created)
373         return d
374
375
376     def test_create_from_mdmf_writecap_with_extensions(self):
377         # Test that the nodemaker is capable of creating an MDMF
378         # filenode when given a writecap with extension parameters in
379         # them.
380         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
381         def _created(n):
382             self.failUnless(isinstance(n, MutableFileNode))
383             s = n.get_uri()
384             # We need to cheat a little and delete the nodemaker's
385             # cache, otherwise we'll get the same node instance back.
386             self.failUnlessIn(":3:131073", s)
387             n2 = self.nodemaker.create_from_cap(s)
388
389             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
390             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
391             hints = n2._downloader_hints
392             self.failUnlessEqual(hints['k'], 3)
393             self.failUnlessEqual(hints['segsize'], 131073)
394         d.addCallback(_created)
395         return d
396
397
398     def test_create_from_mdmf_readcap(self):
399         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
400         def _created(n):
401             self.failUnless(isinstance(n, MutableFileNode))
402             s = n.get_readonly_uri()
403             n2 = self.nodemaker.create_from_cap(s)
404             self.failUnless(isinstance(n2, MutableFileNode))
405
406             # Check that it's a readonly node
407             self.failUnless(n2.is_readonly())
408         d.addCallback(_created)
409         return d
410
411
412     def test_create_from_mdmf_readcap_with_extensions(self):
413         # We should be able to create an MDMF filenode with the
414         # extension parameters without it breaking.
415         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
416         def _created(n):
417             self.failUnless(isinstance(n, MutableFileNode))
418             s = n.get_readonly_uri()
419             self.failUnlessIn(":3:131073", s)
420
421             n2 = self.nodemaker.create_from_cap(s)
422             self.failUnless(isinstance(n2, MutableFileNode))
423             self.failUnless(n2.is_readonly())
424             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
425             hints = n2._downloader_hints
426             self.failUnlessEqual(hints["k"], 3)
427             self.failUnlessEqual(hints["segsize"], 131073)
428         d.addCallback(_created)
429         return d
430
431
432     def test_internal_version_from_cap(self):
433         # MutableFileNodes and MutableFileVersions have an internal
434         # switch that tells them whether they're dealing with an SDMF or
435         # MDMF mutable file when they start doing stuff. We want to make
436         # sure that this is set appropriately given an MDMF cap.
437         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
438         def _created(n):
439             self.uri = n.get_uri()
440             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
441
442             n2 = self.nodemaker.create_from_cap(self.uri)
443             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
444         d.addCallback(_created)
445         return d
446
447
448     def test_serialize(self):
449         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
450         calls = []
451         def _callback(*args, **kwargs):
452             self.failUnlessEqual(args, (4,) )
453             self.failUnlessEqual(kwargs, {"foo": 5})
454             calls.append(1)
455             return 6
456         d = n._do_serialized(_callback, 4, foo=5)
457         def _check_callback(res):
458             self.failUnlessEqual(res, 6)
459             self.failUnlessEqual(calls, [1])
460         d.addCallback(_check_callback)
461
462         def _errback():
463             raise ValueError("heya")
464         d.addCallback(lambda res:
465                       self.shouldFail(ValueError, "_check_errback", "heya",
466                                       n._do_serialized, _errback))
467         return d
468
469     def test_upload_and_download(self):
470         d = self.nodemaker.create_mutable_file()
471         def _created(n):
472             d = defer.succeed(None)
473             d.addCallback(lambda res: n.get_servermap(MODE_READ))
474             d.addCallback(lambda smap: smap.dump(StringIO()))
475             d.addCallback(lambda sio:
476                           self.failUnless("3-of-10" in sio.getvalue()))
477             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
478             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
479             d.addCallback(lambda res: n.download_best_version())
480             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
481             d.addCallback(lambda res: n.get_size_of_best_version())
482             d.addCallback(lambda size:
483                           self.failUnlessEqual(size, len("contents 1")))
484             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
485             d.addCallback(lambda res: n.download_best_version())
486             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
487             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
488             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
489             d.addCallback(lambda res: n.download_best_version())
490             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
491             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
492             d.addCallback(lambda smap:
493                           n.download_version(smap,
494                                              smap.best_recoverable_version()))
495             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
496             # test a file that is large enough to overcome the
497             # mapupdate-to-retrieve data caching (i.e. make the shares larger
498             # than the default readsize, which is 2000 bytes). A 15kB file
499             # will have 5kB shares.
500             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
501             d.addCallback(lambda res: n.download_best_version())
502             d.addCallback(lambda res:
503                           self.failUnlessEqual(res, "large size file" * 1000))
504             return d
505         d.addCallback(_created)
506         return d
507
508
509     def test_upload_and_download_mdmf(self):
510         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
511         def _created(n):
512             d = defer.succeed(None)
513             d.addCallback(lambda ignored:
514                 n.get_servermap(MODE_READ))
515             def _then(servermap):
516                 dumped = servermap.dump(StringIO())
517                 self.failUnlessIn("3-of-10", dumped.getvalue())
518             d.addCallback(_then)
519             # Now overwrite the contents with some new contents. We want 
520             # to make them big enough to force the file to be uploaded
521             # in more than one segment.
522             big_contents = "contents1" * 100000 # about 900 KiB
523             big_contents_uploadable = MutableData(big_contents)
524             d.addCallback(lambda ignored:
525                 n.overwrite(big_contents_uploadable))
526             d.addCallback(lambda ignored:
527                 n.download_best_version())
528             d.addCallback(lambda data:
529                 self.failUnlessEqual(data, big_contents))
530             # Overwrite the contents again with some new contents. As
531             # before, they need to be big enough to force multiple
532             # segments, so that we make the downloader deal with
533             # multiple segments.
534             bigger_contents = "contents2" * 1000000 # about 9MiB 
535             bigger_contents_uploadable = MutableData(bigger_contents)
536             d.addCallback(lambda ignored:
537                 n.overwrite(bigger_contents_uploadable))
538             d.addCallback(lambda ignored:
539                 n.download_best_version())
540             d.addCallback(lambda data:
541                 self.failUnlessEqual(data, bigger_contents))
542             return d
543         d.addCallback(_created)
544         return d
545
546
547     def test_retrieve_pause(self):
548         # We should make sure that the retriever is able to pause
549         # correctly.
550         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
551         def _created(node):
552             self.node = node
553
554             return node.overwrite(MutableData("contents1" * 100000))
555         d.addCallback(_created)
556         # Now we'll retrieve it into a pausing consumer.
557         d.addCallback(lambda ignored:
558             self.node.get_best_mutable_version())
559         def _got_version(version):
560             self.c = PausingConsumer()
561             return version.read(self.c)
562         d.addCallback(_got_version)
563         d.addCallback(lambda ignored:
564             self.failUnlessEqual(self.c.data, "contents1" * 100000))
565         return d
566
567
568     def test_download_from_mdmf_cap(self):
569         # We should be able to download an MDMF file given its cap
570         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
571         def _created(node):
572             self.uri = node.get_uri()
573
574             return node.overwrite(MutableData("contents1" * 100000))
575         def _then(ignored):
576             node = self.nodemaker.create_from_cap(self.uri)
577             return node.download_best_version()
578         def _downloaded(data):
579             self.failUnlessEqual(data, "contents1" * 100000)
580         d.addCallback(_created)
581         d.addCallback(_then)
582         d.addCallback(_downloaded)
583         return d
584
585
586     def test_create_and_download_from_bare_mdmf_cap(self):
587         # MDMF caps have extension parameters on them by default. We
588         # need to make sure that they work without extension parameters.
589         contents = MutableData("contents" * 100000)
590         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
591                                                contents=contents)
592         def _created(node):
593             uri = node.get_uri()
594             self._created = node
595             self.failUnlessIn(":3:131073", uri)
596             # Now strip that off the end of the uri, then try creating
597             # and downloading the node again.
598             bare_uri = uri.replace(":3:131073", "")
599             assert ":3:131073" not in bare_uri
600
601             return self.nodemaker.create_from_cap(bare_uri)
602         d.addCallback(_created)
603         def _created_bare(node):
604             self.failUnlessEqual(node.get_writekey(),
605                                  self._created.get_writekey())
606             self.failUnlessEqual(node.get_readkey(),
607                                  self._created.get_readkey())
608             self.failUnlessEqual(node.get_storage_index(),
609                                  self._created.get_storage_index())
610             return node.download_best_version()
611         d.addCallback(_created_bare)
612         d.addCallback(lambda data:
613             self.failUnlessEqual(data, "contents" * 100000))
614         return d
615
616
617     def test_mdmf_write_count(self):
618         # Publishing an MDMF file should only cause one write for each
619         # share that is to be published. Otherwise, we introduce
620         # undesirable semantics that are a regression from SDMF
621         upload = MutableData("MDMF" * 100000) # about 400 KiB
622         d = self.nodemaker.create_mutable_file(upload,
623                                                version=MDMF_VERSION)
624         def _check_server_write_counts(ignored):
625             sb = self.nodemaker.storage_broker
626             for server in sb.servers.itervalues():
627                 self.failUnlessEqual(server.get_rref().queries, 1)
628         d.addCallback(_check_server_write_counts)
629         return d
630
631
632     def test_create_with_initial_contents(self):
633         upload1 = MutableData("contents 1")
634         d = self.nodemaker.create_mutable_file(upload1)
635         def _created(n):
636             d = n.download_best_version()
637             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
638             upload2 = MutableData("contents 2")
639             d.addCallback(lambda res: n.overwrite(upload2))
640             d.addCallback(lambda res: n.download_best_version())
641             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
642             return d
643         d.addCallback(_created)
644         return d
645
646
647     def test_create_mdmf_with_initial_contents(self):
648         initial_contents = "foobarbaz" * 131072 # 900KiB
649         initial_contents_uploadable = MutableData(initial_contents)
650         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
651                                                version=MDMF_VERSION)
652         def _created(n):
653             d = n.download_best_version()
654             d.addCallback(lambda data:
655                 self.failUnlessEqual(data, initial_contents))
656             uploadable2 = MutableData(initial_contents + "foobarbaz")
657             d.addCallback(lambda ignored:
658                 n.overwrite(uploadable2))
659             d.addCallback(lambda ignored:
660                 n.download_best_version())
661             d.addCallback(lambda data:
662                 self.failUnlessEqual(data, initial_contents +
663                                            "foobarbaz"))
664             return d
665         d.addCallback(_created)
666         return d
667
668
669     def test_response_cache_memory_leak(self):
670         d = self.nodemaker.create_mutable_file("contents")
671         def _created(n):
672             d = n.download_best_version()
673             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
674             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
675
676             def _check_cache(expected):
677                 # The total size of cache entries should not increase on the second download;
678                 # in fact the cache contents should be identical.
679                 d2 = n.download_best_version()
680                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
681                 return d2
682             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
683             return d
684         d.addCallback(_created)
685         return d
686
687     def test_create_with_initial_contents_function(self):
688         data = "initial contents"
689         def _make_contents(n):
690             self.failUnless(isinstance(n, MutableFileNode))
691             key = n.get_writekey()
692             self.failUnless(isinstance(key, str), key)
693             self.failUnlessEqual(len(key), 16) # AES key size
694             return MutableData(data)
695         d = self.nodemaker.create_mutable_file(_make_contents)
696         def _created(n):
697             return n.download_best_version()
698         d.addCallback(_created)
699         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
700         return d
701
702
703     def test_create_mdmf_with_initial_contents_function(self):
704         data = "initial contents" * 100000
705         def _make_contents(n):
706             self.failUnless(isinstance(n, MutableFileNode))
707             key = n.get_writekey()
708             self.failUnless(isinstance(key, str), key)
709             self.failUnlessEqual(len(key), 16)
710             return MutableData(data)
711         d = self.nodemaker.create_mutable_file(_make_contents,
712                                                version=MDMF_VERSION)
713         d.addCallback(lambda n:
714             n.download_best_version())
715         d.addCallback(lambda data2:
716             self.failUnlessEqual(data2, data))
717         return d
718
719
720     def test_create_with_too_large_contents(self):
721         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
722         BIG_uploadable = MutableData(BIG)
723         d = self.nodemaker.create_mutable_file(BIG_uploadable)
724         def _created(n):
725             other_BIG_uploadable = MutableData(BIG)
726             d = n.overwrite(other_BIG_uploadable)
727             return d
728         d.addCallback(_created)
729         return d
730
731     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
732         d = n.get_servermap(MODE_READ)
733         d.addCallback(lambda servermap: servermap.best_recoverable_version())
734         d.addCallback(lambda verinfo:
735                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
736         return d
737
738     def test_modify(self):
739         def _modifier(old_contents, servermap, first_time):
740             new_contents = old_contents + "line2"
741             return new_contents
742         def _non_modifier(old_contents, servermap, first_time):
743             return old_contents
744         def _none_modifier(old_contents, servermap, first_time):
745             return None
746         def _error_modifier(old_contents, servermap, first_time):
747             raise ValueError("oops")
748         def _toobig_modifier(old_contents, servermap, first_time):
749             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
750             return new_content
751         calls = []
752         def _ucw_error_modifier(old_contents, servermap, first_time):
753             # simulate an UncoordinatedWriteError once
754             calls.append(1)
755             if len(calls) <= 1:
756                 raise UncoordinatedWriteError("simulated")
757             new_contents = old_contents + "line3"
758             return new_contents
759         def _ucw_error_non_modifier(old_contents, servermap, first_time):
760             # simulate an UncoordinatedWriteError once, and don't actually
761             # modify the contents on subsequent invocations
762             calls.append(1)
763             if len(calls) <= 1:
764                 raise UncoordinatedWriteError("simulated")
765             return old_contents
766
767         initial_contents = "line1"
768         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
769         def _created(n):
770             d = n.modify(_modifier)
771             d.addCallback(lambda res: n.download_best_version())
772             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
773             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
774
775             d.addCallback(lambda res: n.modify(_non_modifier))
776             d.addCallback(lambda res: n.download_best_version())
777             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
778             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
779
780             d.addCallback(lambda res: n.modify(_none_modifier))
781             d.addCallback(lambda res: n.download_best_version())
782             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
783             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
784
785             d.addCallback(lambda res:
786                           self.shouldFail(ValueError, "error_modifier", None,
787                                           n.modify, _error_modifier))
788             d.addCallback(lambda res: n.download_best_version())
789             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
790             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
791
792
793             d.addCallback(lambda res: n.download_best_version())
794             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
795             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
796
797             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
798             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
799             d.addCallback(lambda res: n.download_best_version())
800             d.addCallback(lambda res: self.failUnlessEqual(res,
801                                                            "line1line2line3"))
802             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
803
804             def _reset_ucw_error_modifier(res):
805                 calls[:] = []
806                 return res
807             d.addCallback(_reset_ucw_error_modifier)
808
809             # in practice, this n.modify call should publish twice: the first
810             # one gets a UCWE, the second does not. But our test jig (in
811             # which the modifier raises the UCWE) skips over the first one,
812             # so in this test there will be only one publish, and the seqnum
813             # will only be one larger than the previous test, not two (i.e. 4
814             # instead of 5).
815             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
816             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
817             d.addCallback(lambda res: n.download_best_version())
818             d.addCallback(lambda res: self.failUnlessEqual(res,
819                                                            "line1line2line3"))
820             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
821             d.addCallback(lambda res: n.modify(_toobig_modifier))
822             return d
823         d.addCallback(_created)
824         return d
825
826
827     def test_modify_backoffer(self):
828         def _modifier(old_contents, servermap, first_time):
829             return old_contents + "line2"
830         calls = []
831         def _ucw_error_modifier(old_contents, servermap, first_time):
832             # simulate an UncoordinatedWriteError once
833             calls.append(1)
834             if len(calls) <= 1:
835                 raise UncoordinatedWriteError("simulated")
836             return old_contents + "line3"
837         def _always_ucw_error_modifier(old_contents, servermap, first_time):
838             raise UncoordinatedWriteError("simulated")
839         def _backoff_stopper(node, f):
840             return f
841         def _backoff_pauser(node, f):
842             d = defer.Deferred()
843             reactor.callLater(0.5, d.callback, None)
844             return d
845
846         # the give-up-er will hit its maximum retry count quickly
847         giveuper = BackoffAgent()
848         giveuper._delay = 0.1
849         giveuper.factor = 1
850
851         d = self.nodemaker.create_mutable_file(MutableData("line1"))
852         def _created(n):
853             d = n.modify(_modifier)
854             d.addCallback(lambda res: n.download_best_version())
855             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
856             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
857
858             d.addCallback(lambda res:
859                           self.shouldFail(UncoordinatedWriteError,
860                                           "_backoff_stopper", None,
861                                           n.modify, _ucw_error_modifier,
862                                           _backoff_stopper))
863             d.addCallback(lambda res: n.download_best_version())
864             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
865             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
866
867             def _reset_ucw_error_modifier(res):
868                 calls[:] = []
869                 return res
870             d.addCallback(_reset_ucw_error_modifier)
871             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
872                                                _backoff_pauser))
873             d.addCallback(lambda res: n.download_best_version())
874             d.addCallback(lambda res: self.failUnlessEqual(res,
875                                                            "line1line2line3"))
876             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
877
878             d.addCallback(lambda res:
879                           self.shouldFail(UncoordinatedWriteError,
880                                           "giveuper", None,
881                                           n.modify, _always_ucw_error_modifier,
882                                           giveuper.delay))
883             d.addCallback(lambda res: n.download_best_version())
884             d.addCallback(lambda res: self.failUnlessEqual(res,
885                                                            "line1line2line3"))
886             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
887
888             return d
889         d.addCallback(_created)
890         return d
891
892     def test_upload_and_download_full_size_keys(self):
893         self.nodemaker.key_generator = client.KeyGenerator()
894         d = self.nodemaker.create_mutable_file()
895         def _created(n):
896             d = defer.succeed(None)
897             d.addCallback(lambda res: n.get_servermap(MODE_READ))
898             d.addCallback(lambda smap: smap.dump(StringIO()))
899             d.addCallback(lambda sio:
900                           self.failUnless("3-of-10" in sio.getvalue()))
901             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
902             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
903             d.addCallback(lambda res: n.download_best_version())
904             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
905             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
906             d.addCallback(lambda res: n.download_best_version())
907             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
908             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
909             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
910             d.addCallback(lambda res: n.download_best_version())
911             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
912             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
913             d.addCallback(lambda smap:
914                           n.download_version(smap,
915                                              smap.best_recoverable_version()))
916             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
917             return d
918         d.addCallback(_created)
919         return d
920
921
922     def test_size_after_servermap_update(self):
923         # a mutable file node should have something to say about how big
924         # it is after a servermap update is performed, since this tells
925         # us how large the best version of that mutable file is.
926         d = self.nodemaker.create_mutable_file()
927         def _created(n):
928             self.n = n
929             return n.get_servermap(MODE_READ)
930         d.addCallback(_created)
931         d.addCallback(lambda ignored:
932             self.failUnlessEqual(self.n.get_size(), 0))
933         d.addCallback(lambda ignored:
934             self.n.overwrite(MutableData("foobarbaz")))
935         d.addCallback(lambda ignored:
936             self.failUnlessEqual(self.n.get_size(), 9))
937         d.addCallback(lambda ignored:
938             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
939         d.addCallback(_created)
940         d.addCallback(lambda ignored:
941             self.failUnlessEqual(self.n.get_size(), 9))
942         return d
943
944
945 class PublishMixin:
946     def publish_one(self):
947         # publish a file and create shares, which can then be manipulated
948         # later.
949         self.CONTENTS = "New contents go here" * 1000
950         self.uploadable = MutableData(self.CONTENTS)
951         self._storage = FakeStorage()
952         self._nodemaker = make_nodemaker(self._storage)
953         self._storage_broker = self._nodemaker.storage_broker
954         d = self._nodemaker.create_mutable_file(self.uploadable)
955         def _created(node):
956             self._fn = node
957             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
958         d.addCallback(_created)
959         return d
960
961     def publish_mdmf(self):
962         # like publish_one, except that the result is guaranteed to be
963         # an MDMF file.
964         # self.CONTENTS should have more than one segment.
965         self.CONTENTS = "This is an MDMF file" * 100000
966         self.uploadable = MutableData(self.CONTENTS)
967         self._storage = FakeStorage()
968         self._nodemaker = make_nodemaker(self._storage)
969         self._storage_broker = self._nodemaker.storage_broker
970         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
971         def _created(node):
972             self._fn = node
973             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
974         d.addCallback(_created)
975         return d
976
977
978     def publish_sdmf(self):
979         # like publish_one, except that the result is guaranteed to be
980         # an SDMF file
981         self.CONTENTS = "This is an SDMF file" * 1000
982         self.uploadable = MutableData(self.CONTENTS)
983         self._storage = FakeStorage()
984         self._nodemaker = make_nodemaker(self._storage)
985         self._storage_broker = self._nodemaker.storage_broker
986         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
987         def _created(node):
988             self._fn = node
989             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
990         d.addCallback(_created)
991         return d
992
993
994     def publish_multiple(self, version=0):
995         self.CONTENTS = ["Contents 0",
996                          "Contents 1",
997                          "Contents 2",
998                          "Contents 3a",
999                          "Contents 3b"]
1000         self.uploadables = [MutableData(d) for d in self.CONTENTS]
1001         self._copied_shares = {}
1002         self._storage = FakeStorage()
1003         self._nodemaker = make_nodemaker(self._storage)
1004         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1005         def _created(node):
1006             self._fn = node
1007             # now create multiple versions of the same file, and accumulate
1008             # their shares, so we can mix and match them later.
1009             d = defer.succeed(None)
1010             d.addCallback(self._copy_shares, 0)
1011             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1012             d.addCallback(self._copy_shares, 1)
1013             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1014             d.addCallback(self._copy_shares, 2)
1015             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1016             d.addCallback(self._copy_shares, 3)
1017             # now we replace all the shares with version s3, and upload a new
1018             # version to get s4b.
1019             rollback = dict([(i,2) for i in range(10)])
1020             d.addCallback(lambda res: self._set_versions(rollback))
1021             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1022             d.addCallback(self._copy_shares, 4)
1023             # we leave the storage in state 4
1024             return d
1025         d.addCallback(_created)
1026         return d
1027
1028
1029     def _copy_shares(self, ignored, index):
1030         shares = self._storage._peers
1031         # we need a deep copy
1032         new_shares = {}
1033         for peerid in shares:
1034             new_shares[peerid] = {}
1035             for shnum in shares[peerid]:
1036                 new_shares[peerid][shnum] = shares[peerid][shnum]
1037         self._copied_shares[index] = new_shares
1038
1039     def _set_versions(self, versionmap):
1040         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1041         # share to be at. Any shnum which is left out of the map will stay at
1042         # its current version.
1043         shares = self._storage._peers
1044         oldshares = self._copied_shares
1045         for peerid in shares:
1046             for shnum in shares[peerid]:
1047                 if shnum in versionmap:
1048                     index = versionmap[shnum]
1049                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1050
1051 class PausingConsumer:
1052     implements(IConsumer)
1053     def __init__(self):
1054         self.data = ""
1055         self.already_paused = False
1056
1057     def registerProducer(self, producer, streaming):
1058         self.producer = producer
1059         self.producer.resumeProducing()
1060
1061     def unregisterProducer(self):
1062         self.producer = None
1063
1064     def _unpause(self, ignored):
1065         self.producer.resumeProducing()
1066
1067     def write(self, data):
1068         self.data += data
1069         if not self.already_paused:
1070            self.producer.pauseProducing()
1071            self.already_paused = True
1072            reactor.callLater(15, self._unpause, None)
1073
1074
1075 class Servermap(unittest.TestCase, PublishMixin):
1076     def setUp(self):
1077         return self.publish_one()
1078
1079     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1080                        update_range=None):
1081         if fn is None:
1082             fn = self._fn
1083         if sb is None:
1084             sb = self._storage_broker
1085         smu = ServermapUpdater(fn, sb, Monitor(),
1086                                ServerMap(), mode, update_range=update_range)
1087         d = smu.update()
1088         return d
1089
1090     def update_servermap(self, oldmap, mode=MODE_CHECK):
1091         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1092                                oldmap, mode)
1093         d = smu.update()
1094         return d
1095
1096     def failUnlessOneRecoverable(self, sm, num_shares):
1097         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1098         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1099         best = sm.best_recoverable_version()
1100         self.failIfEqual(best, None)
1101         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1102         self.failUnlessEqual(len(sm.shares_available()), 1)
1103         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1104         shnum, peerids = sm.make_sharemap().items()[0]
1105         peerid = list(peerids)[0]
1106         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1107         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1108         return sm
1109
1110     def test_basic(self):
1111         d = defer.succeed(None)
1112         ms = self.make_servermap
1113         us = self.update_servermap
1114
1115         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1116         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1117         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1118         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1119         d.addCallback(lambda res: ms(mode=MODE_READ))
1120         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1121         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1122         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1123         # this mode stops at 'k' shares
1124         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1125
1126         # and can we re-use the same servermap? Note that these are sorted in
1127         # increasing order of number of servers queried, since once a server
1128         # gets into the servermap, we'll always ask it for an update.
1129         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1130         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1131         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1132         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1133         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1134         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1135         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1136         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1137         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1138
1139         return d
1140
1141     def test_fetch_privkey(self):
1142         d = defer.succeed(None)
1143         # use the sibling filenode (which hasn't been used yet), and make
1144         # sure it can fetch the privkey. The file is small, so the privkey
1145         # will be fetched on the first (query) pass.
1146         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1147         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1148
1149         # create a new file, which is large enough to knock the privkey out
1150         # of the early part of the file
1151         LARGE = "These are Larger contents" * 200 # about 5KB
1152         LARGE_uploadable = MutableData(LARGE)
1153         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1154         def _created(large_fn):
1155             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1156             return self.make_servermap(MODE_WRITE, large_fn2)
1157         d.addCallback(_created)
1158         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1159         return d
1160
1161
1162     def test_mark_bad(self):
1163         d = defer.succeed(None)
1164         ms = self.make_servermap
1165
1166         d.addCallback(lambda res: ms(mode=MODE_READ))
1167         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1168         def _made_map(sm):
1169             v = sm.best_recoverable_version()
1170             vm = sm.make_versionmap()
1171             shares = list(vm[v])
1172             self.failUnlessEqual(len(shares), 6)
1173             self._corrupted = set()
1174             # mark the first 5 shares as corrupt, then update the servermap.
1175             # The map should not have the marked shares it in any more, and
1176             # new shares should be found to replace the missing ones.
1177             for (shnum, peerid, timestamp) in shares:
1178                 if shnum < 5:
1179                     self._corrupted.add( (peerid, shnum) )
1180                     sm.mark_bad_share(peerid, shnum, "")
1181             return self.update_servermap(sm, MODE_WRITE)
1182         d.addCallback(_made_map)
1183         def _check_map(sm):
1184             # this should find all 5 shares that weren't marked bad
1185             v = sm.best_recoverable_version()
1186             vm = sm.make_versionmap()
1187             shares = list(vm[v])
1188             for (peerid, shnum) in self._corrupted:
1189                 peer_shares = sm.shares_on_peer(peerid)
1190                 self.failIf(shnum in peer_shares,
1191                             "%d was in %s" % (shnum, peer_shares))
1192             self.failUnlessEqual(len(shares), 5)
1193         d.addCallback(_check_map)
1194         return d
1195
1196     def failUnlessNoneRecoverable(self, sm):
1197         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1198         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1199         best = sm.best_recoverable_version()
1200         self.failUnlessEqual(best, None)
1201         self.failUnlessEqual(len(sm.shares_available()), 0)
1202
1203     def test_no_shares(self):
1204         self._storage._peers = {} # delete all shares
1205         ms = self.make_servermap
1206         d = defer.succeed(None)
1207 #
1208         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1209         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1210
1211         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1212         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1213
1214         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1215         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1216
1217         d.addCallback(lambda res: ms(mode=MODE_READ))
1218         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1219
1220         return d
1221
1222     def failUnlessNotQuiteEnough(self, sm):
1223         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1224         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1225         best = sm.best_recoverable_version()
1226         self.failUnlessEqual(best, None)
1227         self.failUnlessEqual(len(sm.shares_available()), 1)
1228         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1229         return sm
1230
1231     def test_not_quite_enough_shares(self):
1232         s = self._storage
1233         ms = self.make_servermap
1234         num_shares = len(s._peers)
1235         for peerid in s._peers:
1236             s._peers[peerid] = {}
1237             num_shares -= 1
1238             if num_shares == 2:
1239                 break
1240         # now there ought to be only two shares left
1241         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1242
1243         d = defer.succeed(None)
1244
1245         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1246         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1247         d.addCallback(lambda sm:
1248                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1249         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1250         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1251         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1252         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1253         d.addCallback(lambda res: ms(mode=MODE_READ))
1254         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1255
1256         return d
1257
1258
1259     def test_servermapupdater_finds_mdmf_files(self):
1260         # setUp already published an MDMF file for us. We just need to
1261         # make sure that when we run the ServermapUpdater, the file is
1262         # reported to have one recoverable version.
1263         d = defer.succeed(None)
1264         d.addCallback(lambda ignored:
1265             self.publish_mdmf())
1266         d.addCallback(lambda ignored:
1267             self.make_servermap(mode=MODE_CHECK))
1268         # Calling make_servermap also updates the servermap in the mode
1269         # that we specify, so we just need to see what it says.
1270         def _check_servermap(sm):
1271             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1272         d.addCallback(_check_servermap)
1273         return d
1274
1275
1276     def test_fetch_update(self):
1277         d = defer.succeed(None)
1278         d.addCallback(lambda ignored:
1279             self.publish_mdmf())
1280         d.addCallback(lambda ignored:
1281             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1282         def _check_servermap(sm):
1283             # 10 shares
1284             self.failUnlessEqual(len(sm.update_data), 10)
1285             # one version
1286             for data in sm.update_data.itervalues():
1287                 self.failUnlessEqual(len(data), 1)
1288         d.addCallback(_check_servermap)
1289         return d
1290
1291
1292     def test_servermapupdater_finds_sdmf_files(self):
1293         d = defer.succeed(None)
1294         d.addCallback(lambda ignored:
1295             self.publish_sdmf())
1296         d.addCallback(lambda ignored:
1297             self.make_servermap(mode=MODE_CHECK))
1298         d.addCallback(lambda servermap:
1299             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1300         return d
1301
1302
1303 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1304     def setUp(self):
1305         return self.publish_one()
1306
1307     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1308         if oldmap is None:
1309             oldmap = ServerMap()
1310         if sb is None:
1311             sb = self._storage_broker
1312         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1313         d = smu.update()
1314         return d
1315
1316     def abbrev_verinfo(self, verinfo):
1317         if verinfo is None:
1318             return None
1319         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1320          offsets_tuple) = verinfo
1321         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1322
1323     def abbrev_verinfo_dict(self, verinfo_d):
1324         output = {}
1325         for verinfo,value in verinfo_d.items():
1326             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1327              offsets_tuple) = verinfo
1328             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1329         return output
1330
1331     def dump_servermap(self, servermap):
1332         print "SERVERMAP", servermap
1333         print "RECOVERABLE", [self.abbrev_verinfo(v)
1334                               for v in servermap.recoverable_versions()]
1335         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1336         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1337
1338     def do_download(self, servermap, version=None):
1339         if version is None:
1340             version = servermap.best_recoverable_version()
1341         r = Retrieve(self._fn, servermap, version)
1342         c = consumer.MemoryConsumer()
1343         d = r.download(consumer=c)
1344         d.addCallback(lambda mc: "".join(mc.chunks))
1345         return d
1346
1347
1348     def test_basic(self):
1349         d = self.make_servermap()
1350         def _do_retrieve(servermap):
1351             self._smap = servermap
1352             #self.dump_servermap(servermap)
1353             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1354             return self.do_download(servermap)
1355         d.addCallback(_do_retrieve)
1356         def _retrieved(new_contents):
1357             self.failUnlessEqual(new_contents, self.CONTENTS)
1358         d.addCallback(_retrieved)
1359         # we should be able to re-use the same servermap, both with and
1360         # without updating it.
1361         d.addCallback(lambda res: self.do_download(self._smap))
1362         d.addCallback(_retrieved)
1363         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1364         d.addCallback(lambda res: self.do_download(self._smap))
1365         d.addCallback(_retrieved)
1366         # clobbering the pubkey should make the servermap updater re-fetch it
1367         def _clobber_pubkey(res):
1368             self._fn._pubkey = None
1369         d.addCallback(_clobber_pubkey)
1370         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1371         d.addCallback(lambda res: self.do_download(self._smap))
1372         d.addCallback(_retrieved)
1373         return d
1374
1375     def test_all_shares_vanished(self):
1376         d = self.make_servermap()
1377         def _remove_shares(servermap):
1378             for shares in self._storage._peers.values():
1379                 shares.clear()
1380             d1 = self.shouldFail(NotEnoughSharesError,
1381                                  "test_all_shares_vanished",
1382                                  "ran out of peers",
1383                                  self.do_download, servermap)
1384             return d1
1385         d.addCallback(_remove_shares)
1386         return d
1387
1388     def test_no_servers(self):
1389         sb2 = make_storagebroker(num_peers=0)
1390         # if there are no servers, then a MODE_READ servermap should come
1391         # back empty
1392         d = self.make_servermap(sb=sb2)
1393         def _check_servermap(servermap):
1394             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1395             self.failIf(servermap.recoverable_versions())
1396             self.failIf(servermap.unrecoverable_versions())
1397             self.failIf(servermap.all_peers())
1398         d.addCallback(_check_servermap)
1399         return d
1400
1401     def test_no_servers_download(self):
1402         sb2 = make_storagebroker(num_peers=0)
1403         self._fn._storage_broker = sb2
1404         d = self.shouldFail(UnrecoverableFileError,
1405                             "test_no_servers_download",
1406                             "no recoverable versions",
1407                             self._fn.download_best_version)
1408         def _restore(res):
1409             # a failed download that occurs while we aren't connected to
1410             # anybody should not prevent a subsequent download from working.
1411             # This isn't quite the webapi-driven test that #463 wants, but it
1412             # should be close enough.
1413             self._fn._storage_broker = self._storage_broker
1414             return self._fn.download_best_version()
1415         def _retrieved(new_contents):
1416             self.failUnlessEqual(new_contents, self.CONTENTS)
1417         d.addCallback(_restore)
1418         d.addCallback(_retrieved)
1419         return d
1420
1421
1422     def _test_corrupt_all(self, offset, substring,
1423                           should_succeed=False,
1424                           corrupt_early=True,
1425                           failure_checker=None,
1426                           fetch_privkey=False):
1427         d = defer.succeed(None)
1428         if corrupt_early:
1429             d.addCallback(corrupt, self._storage, offset)
1430         d.addCallback(lambda res: self.make_servermap())
1431         if not corrupt_early:
1432             d.addCallback(corrupt, self._storage, offset)
1433         def _do_retrieve(servermap):
1434             ver = servermap.best_recoverable_version()
1435             if ver is None and not should_succeed:
1436                 # no recoverable versions == not succeeding. The problem
1437                 # should be noted in the servermap's list of problems.
1438                 if substring:
1439                     allproblems = [str(f) for f in servermap.problems]
1440                     self.failUnlessIn(substring, "".join(allproblems))
1441                 return servermap
1442             if should_succeed:
1443                 d1 = self._fn.download_version(servermap, ver,
1444                                                fetch_privkey)
1445                 d1.addCallback(lambda new_contents:
1446                                self.failUnlessEqual(new_contents, self.CONTENTS))
1447             else:
1448                 d1 = self.shouldFail(NotEnoughSharesError,
1449                                      "_corrupt_all(offset=%s)" % (offset,),
1450                                      substring,
1451                                      self._fn.download_version, servermap,
1452                                                                 ver,
1453                                                                 fetch_privkey)
1454             if failure_checker:
1455                 d1.addCallback(failure_checker)
1456             d1.addCallback(lambda res: servermap)
1457             return d1
1458         d.addCallback(_do_retrieve)
1459         return d
1460
1461     def test_corrupt_all_verbyte(self):
1462         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1463         # error in unpack_share().
1464         d = self._test_corrupt_all(0, "UnknownVersionError")
1465         def _check_servermap(servermap):
1466             # and the dump should mention the problems
1467             s = StringIO()
1468             dump = servermap.dump(s).getvalue()
1469             self.failUnless("30 PROBLEMS" in dump, dump)
1470         d.addCallback(_check_servermap)
1471         return d
1472
1473     def test_corrupt_all_seqnum(self):
1474         # a corrupt sequence number will trigger a bad signature
1475         return self._test_corrupt_all(1, "signature is invalid")
1476
1477     def test_corrupt_all_R(self):
1478         # a corrupt root hash will trigger a bad signature
1479         return self._test_corrupt_all(9, "signature is invalid")
1480
1481     def test_corrupt_all_IV(self):
1482         # a corrupt salt/IV will trigger a bad signature
1483         return self._test_corrupt_all(41, "signature is invalid")
1484
1485     def test_corrupt_all_k(self):
1486         # a corrupt 'k' will trigger a bad signature
1487         return self._test_corrupt_all(57, "signature is invalid")
1488
1489     def test_corrupt_all_N(self):
1490         # a corrupt 'N' will trigger a bad signature
1491         return self._test_corrupt_all(58, "signature is invalid")
1492
1493     def test_corrupt_all_segsize(self):
1494         # a corrupt segsize will trigger a bad signature
1495         return self._test_corrupt_all(59, "signature is invalid")
1496
1497     def test_corrupt_all_datalen(self):
1498         # a corrupt data length will trigger a bad signature
1499         return self._test_corrupt_all(67, "signature is invalid")
1500
1501     def test_corrupt_all_pubkey(self):
1502         # a corrupt pubkey won't match the URI's fingerprint. We need to
1503         # remove the pubkey from the filenode, or else it won't bother trying
1504         # to update it.
1505         self._fn._pubkey = None
1506         return self._test_corrupt_all("pubkey",
1507                                       "pubkey doesn't match fingerprint")
1508
1509     def test_corrupt_all_sig(self):
1510         # a corrupt signature is a bad one
1511         # the signature runs from about [543:799], depending upon the length
1512         # of the pubkey
1513         return self._test_corrupt_all("signature", "signature is invalid")
1514
1515     def test_corrupt_all_share_hash_chain_number(self):
1516         # a corrupt share hash chain entry will show up as a bad hash. If we
1517         # mangle the first byte, that will look like a bad hash number,
1518         # causing an IndexError
1519         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1520
1521     def test_corrupt_all_share_hash_chain_hash(self):
1522         # a corrupt share hash chain entry will show up as a bad hash. If we
1523         # mangle a few bytes in, that will look like a bad hash.
1524         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1525
1526     def test_corrupt_all_block_hash_tree(self):
1527         return self._test_corrupt_all("block_hash_tree",
1528                                       "block hash tree failure")
1529
1530     def test_corrupt_all_block(self):
1531         return self._test_corrupt_all("share_data", "block hash tree failure")
1532
1533     def test_corrupt_all_encprivkey(self):
1534         # a corrupted privkey won't even be noticed by the reader, only by a
1535         # writer.
1536         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1537
1538
1539     def test_corrupt_all_encprivkey_late(self):
1540         # this should work for the same reason as above, but we corrupt 
1541         # after the servermap update to exercise the error handling
1542         # code.
1543         # We need to remove the privkey from the node, or the retrieve
1544         # process won't know to update it.
1545         self._fn._privkey = None
1546         return self._test_corrupt_all("enc_privkey",
1547                                       None, # this shouldn't fail
1548                                       should_succeed=True,
1549                                       corrupt_early=False,
1550                                       fetch_privkey=True)
1551
1552
1553     def test_corrupt_all_seqnum_late(self):
1554         # corrupting the seqnum between mapupdate and retrieve should result
1555         # in NotEnoughSharesError, since each share will look invalid
1556         def _check(res):
1557             f = res[0]
1558             self.failUnless(f.check(NotEnoughSharesError))
1559             self.failUnless("uncoordinated write" in str(f))
1560         return self._test_corrupt_all(1, "ran out of peers",
1561                                       corrupt_early=False,
1562                                       failure_checker=_check)
1563
1564     def test_corrupt_all_block_hash_tree_late(self):
1565         def _check(res):
1566             f = res[0]
1567             self.failUnless(f.check(NotEnoughSharesError))
1568         return self._test_corrupt_all("block_hash_tree",
1569                                       "block hash tree failure",
1570                                       corrupt_early=False,
1571                                       failure_checker=_check)
1572
1573
1574     def test_corrupt_all_block_late(self):
1575         def _check(res):
1576             f = res[0]
1577             self.failUnless(f.check(NotEnoughSharesError))
1578         return self._test_corrupt_all("share_data", "block hash tree failure",
1579                                       corrupt_early=False,
1580                                       failure_checker=_check)
1581
1582
1583     def test_basic_pubkey_at_end(self):
1584         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1585         # download to succeed but forcing a bunch of retries first. Note that
1586         # this is rather pessimistic: our Retrieve process will throw away
1587         # the whole share if the pubkey is bad, even though the rest of the
1588         # share might be good.
1589
1590         self._fn._pubkey = None
1591         k = self._fn.get_required_shares()
1592         N = self._fn.get_total_shares()
1593         d = defer.succeed(None)
1594         d.addCallback(corrupt, self._storage, "pubkey",
1595                       shnums_to_corrupt=range(0, N-k))
1596         d.addCallback(lambda res: self.make_servermap())
1597         def _do_retrieve(servermap):
1598             self.failUnless(servermap.problems)
1599             self.failUnless("pubkey doesn't match fingerprint"
1600                             in str(servermap.problems[0]))
1601             ver = servermap.best_recoverable_version()
1602             r = Retrieve(self._fn, servermap, ver)
1603             c = consumer.MemoryConsumer()
1604             return r.download(c)
1605         d.addCallback(_do_retrieve)
1606         d.addCallback(lambda mc: "".join(mc.chunks))
1607         d.addCallback(lambda new_contents:
1608                       self.failUnlessEqual(new_contents, self.CONTENTS))
1609         return d
1610
1611
1612     def _test_corrupt_some(self, offset, mdmf=False):
1613         if mdmf:
1614             d = self.publish_mdmf()
1615         else:
1616             d = defer.succeed(None)
1617         d.addCallback(lambda ignored:
1618             corrupt(None, self._storage, offset, range(5)))
1619         d.addCallback(lambda ignored:
1620             self.make_servermap())
1621         def _do_retrieve(servermap):
1622             ver = servermap.best_recoverable_version()
1623             self.failUnless(ver)
1624             return self._fn.download_best_version()
1625         d.addCallback(_do_retrieve)
1626         d.addCallback(lambda new_contents:
1627             self.failUnlessEqual(new_contents, self.CONTENTS))
1628         return d
1629
1630
1631     def test_corrupt_some(self):
1632         # corrupt the data of first five shares (so the servermap thinks
1633         # they're good but retrieve marks them as bad), so that the
1634         # MODE_READ set of 6 will be insufficient, forcing node.download to
1635         # retry with more servers.
1636         return self._test_corrupt_some("share_data")
1637
1638
1639     def test_download_fails(self):
1640         d = corrupt(None, self._storage, "signature")
1641         d.addCallback(lambda ignored:
1642             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1643                             "no recoverable versions",
1644                             self._fn.download_best_version))
1645         return d
1646
1647
1648
1649     def test_corrupt_mdmf_block_hash_tree(self):
1650         d = self.publish_mdmf()
1651         d.addCallback(lambda ignored:
1652             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1653                                    "block hash tree failure",
1654                                    corrupt_early=False,
1655                                    should_succeed=False))
1656         return d
1657
1658
1659     def test_corrupt_mdmf_block_hash_tree_late(self):
1660         d = self.publish_mdmf()
1661         d.addCallback(lambda ignored:
1662             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1663                                    "block hash tree failure",
1664                                    corrupt_early=True,
1665                                    should_succeed=False))
1666         return d
1667
1668
1669     def test_corrupt_mdmf_share_data(self):
1670         d = self.publish_mdmf()
1671         d.addCallback(lambda ignored:
1672             # TODO: Find out what the block size is and corrupt a
1673             # specific block, rather than just guessing.
1674             self._test_corrupt_all(("share_data", 12 * 40),
1675                                     "block hash tree failure",
1676                                     corrupt_early=True,
1677                                     should_succeed=False))
1678         return d
1679
1680
1681     def test_corrupt_some_mdmf(self):
1682         return self._test_corrupt_some(("share_data", 12 * 40),
1683                                        mdmf=True)
1684
1685
1686 class CheckerMixin:
1687     def check_good(self, r, where):
1688         self.failUnless(r.is_healthy(), where)
1689         return r
1690
1691     def check_bad(self, r, where):
1692         self.failIf(r.is_healthy(), where)
1693         return r
1694
1695     def check_expected_failure(self, r, expected_exception, substring, where):
1696         for (peerid, storage_index, shnum, f) in r.problems:
1697             if f.check(expected_exception):
1698                 self.failUnless(substring in str(f),
1699                                 "%s: substring '%s' not in '%s'" %
1700                                 (where, substring, str(f)))
1701                 return
1702         self.fail("%s: didn't see expected exception %s in problems %s" %
1703                   (where, expected_exception, r.problems))
1704
1705
1706 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1707     def setUp(self):
1708         return self.publish_one()
1709
1710
1711     def test_check_good(self):
1712         d = self._fn.check(Monitor())
1713         d.addCallback(self.check_good, "test_check_good")
1714         return d
1715
1716     def test_check_mdmf_good(self):
1717         d = self.publish_mdmf()
1718         d.addCallback(lambda ignored:
1719             self._fn.check(Monitor()))
1720         d.addCallback(self.check_good, "test_check_mdmf_good")
1721         return d
1722
1723     def test_check_no_shares(self):
1724         for shares in self._storage._peers.values():
1725             shares.clear()
1726         d = self._fn.check(Monitor())
1727         d.addCallback(self.check_bad, "test_check_no_shares")
1728         return d
1729
1730     def test_check_mdmf_no_shares(self):
1731         d = self.publish_mdmf()
1732         def _then(ignored):
1733             for share in self._storage._peers.values():
1734                 share.clear()
1735         d.addCallback(_then)
1736         d.addCallback(lambda ignored:
1737             self._fn.check(Monitor()))
1738         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1739         return d
1740
1741     def test_check_not_enough_shares(self):
1742         for shares in self._storage._peers.values():
1743             for shnum in shares.keys():
1744                 if shnum > 0:
1745                     del shares[shnum]
1746         d = self._fn.check(Monitor())
1747         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1748         return d
1749
1750     def test_check_mdmf_not_enough_shares(self):
1751         d = self.publish_mdmf()
1752         def _then(ignored):
1753             for shares in self._storage._peers.values():
1754                 for shnum in shares.keys():
1755                     if shnum > 0:
1756                         del shares[shnum]
1757         d.addCallback(_then)
1758         d.addCallback(lambda ignored:
1759             self._fn.check(Monitor()))
1760         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1761         return d
1762
1763
1764     def test_check_all_bad_sig(self):
1765         d = corrupt(None, self._storage, 1) # bad sig
1766         d.addCallback(lambda ignored:
1767             self._fn.check(Monitor()))
1768         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1769         return d
1770
1771     def test_check_mdmf_all_bad_sig(self):
1772         d = self.publish_mdmf()
1773         d.addCallback(lambda ignored:
1774             corrupt(None, self._storage, 1))
1775         d.addCallback(lambda ignored:
1776             self._fn.check(Monitor()))
1777         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1778         return d
1779
1780     def test_check_all_bad_blocks(self):
1781         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1782         # the Checker won't notice this.. it doesn't look at actual data
1783         d.addCallback(lambda ignored:
1784             self._fn.check(Monitor()))
1785         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1786         return d
1787
1788
1789     def test_check_mdmf_all_bad_blocks(self):
1790         d = self.publish_mdmf()
1791         d.addCallback(lambda ignored:
1792             corrupt(None, self._storage, "share_data"))
1793         d.addCallback(lambda ignored:
1794             self._fn.check(Monitor()))
1795         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1796         return d
1797
1798     def test_verify_good(self):
1799         d = self._fn.check(Monitor(), verify=True)
1800         d.addCallback(self.check_good, "test_verify_good")
1801         return d
1802
1803     def test_verify_all_bad_sig(self):
1804         d = corrupt(None, self._storage, 1) # bad sig
1805         d.addCallback(lambda ignored:
1806             self._fn.check(Monitor(), verify=True))
1807         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1808         return d
1809
1810     def test_verify_one_bad_sig(self):
1811         d = corrupt(None, self._storage, 1, [9]) # bad sig
1812         d.addCallback(lambda ignored:
1813             self._fn.check(Monitor(), verify=True))
1814         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1815         return d
1816
1817     def test_verify_one_bad_block(self):
1818         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1819         # the Verifier *will* notice this, since it examines every byte
1820         d.addCallback(lambda ignored:
1821             self._fn.check(Monitor(), verify=True))
1822         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1823         d.addCallback(self.check_expected_failure,
1824                       CorruptShareError, "block hash tree failure",
1825                       "test_verify_one_bad_block")
1826         return d
1827
1828     def test_verify_one_bad_sharehash(self):
1829         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1830         d.addCallback(lambda ignored:
1831             self._fn.check(Monitor(), verify=True))
1832         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1833         d.addCallback(self.check_expected_failure,
1834                       CorruptShareError, "corrupt hashes",
1835                       "test_verify_one_bad_sharehash")
1836         return d
1837
1838     def test_verify_one_bad_encprivkey(self):
1839         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1840         d.addCallback(lambda ignored:
1841             self._fn.check(Monitor(), verify=True))
1842         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1843         d.addCallback(self.check_expected_failure,
1844                       CorruptShareError, "invalid privkey",
1845                       "test_verify_one_bad_encprivkey")
1846         return d
1847
1848     def test_verify_one_bad_encprivkey_uncheckable(self):
1849         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1850         readonly_fn = self._fn.get_readonly()
1851         # a read-only node has no way to validate the privkey
1852         d.addCallback(lambda ignored:
1853             readonly_fn.check(Monitor(), verify=True))
1854         d.addCallback(self.check_good,
1855                       "test_verify_one_bad_encprivkey_uncheckable")
1856         return d
1857
1858
1859     def test_verify_mdmf_good(self):
1860         d = self.publish_mdmf()
1861         d.addCallback(lambda ignored:
1862             self._fn.check(Monitor(), verify=True))
1863         d.addCallback(self.check_good, "test_verify_mdmf_good")
1864         return d
1865
1866
1867     def test_verify_mdmf_one_bad_block(self):
1868         d = self.publish_mdmf()
1869         d.addCallback(lambda ignored:
1870             corrupt(None, self._storage, "share_data", [1]))
1871         d.addCallback(lambda ignored:
1872             self._fn.check(Monitor(), verify=True))
1873         # We should find one bad block here
1874         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1875         d.addCallback(self.check_expected_failure,
1876                       CorruptShareError, "block hash tree failure",
1877                       "test_verify_mdmf_one_bad_block")
1878         return d
1879
1880
1881     def test_verify_mdmf_bad_encprivkey(self):
1882         d = self.publish_mdmf()
1883         d.addCallback(lambda ignored:
1884             corrupt(None, self._storage, "enc_privkey", [0]))
1885         d.addCallback(lambda ignored:
1886             self._fn.check(Monitor(), verify=True))
1887         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1888         d.addCallback(self.check_expected_failure,
1889                       CorruptShareError, "privkey",
1890                       "test_verify_mdmf_bad_encprivkey")
1891         return d
1892
1893
1894     def test_verify_mdmf_bad_sig(self):
1895         d = self.publish_mdmf()
1896         d.addCallback(lambda ignored:
1897             corrupt(None, self._storage, 1, [1]))
1898         d.addCallback(lambda ignored:
1899             self._fn.check(Monitor(), verify=True))
1900         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1901         return d
1902
1903
1904     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1905         d = self.publish_mdmf()
1906         d.addCallback(lambda ignored:
1907             corrupt(None, self._storage, "enc_privkey", [1]))
1908         d.addCallback(lambda ignored:
1909             self._fn.get_readonly())
1910         d.addCallback(lambda fn:
1911             fn.check(Monitor(), verify=True))
1912         d.addCallback(self.check_good,
1913                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1914         return d
1915
1916
1917 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1918
1919     def get_shares(self, s):
1920         all_shares = {} # maps (peerid, shnum) to share data
1921         for peerid in s._peers:
1922             shares = s._peers[peerid]
1923             for shnum in shares:
1924                 data = shares[shnum]
1925                 all_shares[ (peerid, shnum) ] = data
1926         return all_shares
1927
1928     def copy_shares(self, ignored=None):
1929         self.old_shares.append(self.get_shares(self._storage))
1930
1931     def test_repair_nop(self):
1932         self.old_shares = []
1933         d = self.publish_one()
1934         d.addCallback(self.copy_shares)
1935         d.addCallback(lambda res: self._fn.check(Monitor()))
1936         d.addCallback(lambda check_results: self._fn.repair(check_results))
1937         def _check_results(rres):
1938             self.failUnless(IRepairResults.providedBy(rres))
1939             self.failUnless(rres.get_successful())
1940             # TODO: examine results
1941
1942             self.copy_shares()
1943
1944             initial_shares = self.old_shares[0]
1945             new_shares = self.old_shares[1]
1946             # TODO: this really shouldn't change anything. When we implement
1947             # a "minimal-bandwidth" repairer", change this test to assert:
1948             #self.failUnlessEqual(new_shares, initial_shares)
1949
1950             # all shares should be in the same place as before
1951             self.failUnlessEqual(set(initial_shares.keys()),
1952                                  set(new_shares.keys()))
1953             # but they should all be at a newer seqnum. The IV will be
1954             # different, so the roothash will be too.
1955             for key in initial_shares:
1956                 (version0,
1957                  seqnum0,
1958                  root_hash0,
1959                  IV0,
1960                  k0, N0, segsize0, datalen0,
1961                  o0) = unpack_header(initial_shares[key])
1962                 (version1,
1963                  seqnum1,
1964                  root_hash1,
1965                  IV1,
1966                  k1, N1, segsize1, datalen1,
1967                  o1) = unpack_header(new_shares[key])
1968                 self.failUnlessEqual(version0, version1)
1969                 self.failUnlessEqual(seqnum0+1, seqnum1)
1970                 self.failUnlessEqual(k0, k1)
1971                 self.failUnlessEqual(N0, N1)
1972                 self.failUnlessEqual(segsize0, segsize1)
1973                 self.failUnlessEqual(datalen0, datalen1)
1974         d.addCallback(_check_results)
1975         return d
1976
1977     def failIfSharesChanged(self, ignored=None):
1978         old_shares = self.old_shares[-2]
1979         current_shares = self.old_shares[-1]
1980         self.failUnlessEqual(old_shares, current_shares)
1981
1982
1983     def test_unrepairable_0shares(self):
1984         d = self.publish_one()
1985         def _delete_all_shares(ign):
1986             shares = self._storage._peers
1987             for peerid in shares:
1988                 shares[peerid] = {}
1989         d.addCallback(_delete_all_shares)
1990         d.addCallback(lambda ign: self._fn.check(Monitor()))
1991         d.addCallback(lambda check_results: self._fn.repair(check_results))
1992         def _check(crr):
1993             self.failUnlessEqual(crr.get_successful(), False)
1994         d.addCallback(_check)
1995         return d
1996
1997     def test_mdmf_unrepairable_0shares(self):
1998         d = self.publish_mdmf()
1999         def _delete_all_shares(ign):
2000             shares = self._storage._peers
2001             for peerid in shares:
2002                 shares[peerid] = {}
2003         d.addCallback(_delete_all_shares)
2004         d.addCallback(lambda ign: self._fn.check(Monitor()))
2005         d.addCallback(lambda check_results: self._fn.repair(check_results))
2006         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2007         return d
2008
2009
2010     def test_unrepairable_1share(self):
2011         d = self.publish_one()
2012         def _delete_all_shares(ign):
2013             shares = self._storage._peers
2014             for peerid in shares:
2015                 for shnum in list(shares[peerid]):
2016                     if shnum > 0:
2017                         del shares[peerid][shnum]
2018         d.addCallback(_delete_all_shares)
2019         d.addCallback(lambda ign: self._fn.check(Monitor()))
2020         d.addCallback(lambda check_results: self._fn.repair(check_results))
2021         def _check(crr):
2022             self.failUnlessEqual(crr.get_successful(), False)
2023         d.addCallback(_check)
2024         return d
2025
2026     def test_mdmf_unrepairable_1share(self):
2027         d = self.publish_mdmf()
2028         def _delete_all_shares(ign):
2029             shares = self._storage._peers
2030             for peerid in shares:
2031                 for shnum in list(shares[peerid]):
2032                     if shnum > 0:
2033                         del shares[peerid][shnum]
2034         d.addCallback(_delete_all_shares)
2035         d.addCallback(lambda ign: self._fn.check(Monitor()))
2036         d.addCallback(lambda check_results: self._fn.repair(check_results))
2037         def _check(crr):
2038             self.failUnlessEqual(crr.get_successful(), False)
2039         d.addCallback(_check)
2040         return d
2041
2042     def test_repairable_5shares(self):
2043         d = self.publish_mdmf()
2044         def _delete_all_shares(ign):
2045             shares = self._storage._peers
2046             for peerid in shares:
2047                 for shnum in list(shares[peerid]):
2048                     if shnum > 4:
2049                         del shares[peerid][shnum]
2050         d.addCallback(_delete_all_shares)
2051         d.addCallback(lambda ign: self._fn.check(Monitor()))
2052         d.addCallback(lambda check_results: self._fn.repair(check_results))
2053         def _check(crr):
2054             self.failUnlessEqual(crr.get_successful(), True)
2055         d.addCallback(_check)
2056         return d
2057
2058     def test_mdmf_repairable_5shares(self):
2059         d = self.publish_mdmf()
2060         def _delete_some_shares(ign):
2061             shares = self._storage._peers
2062             for peerid in shares:
2063                 for shnum in list(shares[peerid]):
2064                     if shnum > 5:
2065                         del shares[peerid][shnum]
2066         d.addCallback(_delete_some_shares)
2067         d.addCallback(lambda ign: self._fn.check(Monitor()))
2068         def _check(cr):
2069             self.failIf(cr.is_healthy())
2070             self.failUnless(cr.is_recoverable())
2071             return cr
2072         d.addCallback(_check)
2073         d.addCallback(lambda check_results: self._fn.repair(check_results))
2074         def _check1(crr):
2075             self.failUnlessEqual(crr.get_successful(), True)
2076         d.addCallback(_check1)
2077         return d
2078
2079
2080     def test_merge(self):
2081         self.old_shares = []
2082         d = self.publish_multiple()
2083         # repair will refuse to merge multiple highest seqnums unless you
2084         # pass force=True
2085         d.addCallback(lambda res:
2086                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2087                                           1:4,3:4,5:4,7:4,9:4}))
2088         d.addCallback(self.copy_shares)
2089         d.addCallback(lambda res: self._fn.check(Monitor()))
2090         def _try_repair(check_results):
2091             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2092             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2093                                  self._fn.repair, check_results)
2094             d2.addCallback(self.copy_shares)
2095             d2.addCallback(self.failIfSharesChanged)
2096             d2.addCallback(lambda res: check_results)
2097             return d2
2098         d.addCallback(_try_repair)
2099         d.addCallback(lambda check_results:
2100                       self._fn.repair(check_results, force=True))
2101         # this should give us 10 shares of the highest roothash
2102         def _check_repair_results(rres):
2103             self.failUnless(rres.get_successful())
2104             pass # TODO
2105         d.addCallback(_check_repair_results)
2106         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2107         def _check_smap(smap):
2108             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2109             self.failIf(smap.unrecoverable_versions())
2110             # now, which should have won?
2111             roothash_s4a = self.get_roothash_for(3)
2112             roothash_s4b = self.get_roothash_for(4)
2113             if roothash_s4b > roothash_s4a:
2114                 expected_contents = self.CONTENTS[4]
2115             else:
2116                 expected_contents = self.CONTENTS[3]
2117             new_versionid = smap.best_recoverable_version()
2118             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2119             d2 = self._fn.download_version(smap, new_versionid)
2120             d2.addCallback(self.failUnlessEqual, expected_contents)
2121             return d2
2122         d.addCallback(_check_smap)
2123         return d
2124
2125     def test_non_merge(self):
2126         self.old_shares = []
2127         d = self.publish_multiple()
2128         # repair should not refuse a repair that doesn't need to merge. In
2129         # this case, we combine v2 with v3. The repair should ignore v2 and
2130         # copy v3 into a new v5.
2131         d.addCallback(lambda res:
2132                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2133                                           1:3,3:3,5:3,7:3,9:3}))
2134         d.addCallback(lambda res: self._fn.check(Monitor()))
2135         d.addCallback(lambda check_results: self._fn.repair(check_results))
2136         # this should give us 10 shares of v3
2137         def _check_repair_results(rres):
2138             self.failUnless(rres.get_successful())
2139             pass # TODO
2140         d.addCallback(_check_repair_results)
2141         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2142         def _check_smap(smap):
2143             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2144             self.failIf(smap.unrecoverable_versions())
2145             # now, which should have won?
2146             expected_contents = self.CONTENTS[3]
2147             new_versionid = smap.best_recoverable_version()
2148             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2149             d2 = self._fn.download_version(smap, new_versionid)
2150             d2.addCallback(self.failUnlessEqual, expected_contents)
2151             return d2
2152         d.addCallback(_check_smap)
2153         return d
2154
2155     def get_roothash_for(self, index):
2156         # return the roothash for the first share we see in the saved set
2157         shares = self._copied_shares[index]
2158         for peerid in shares:
2159             for shnum in shares[peerid]:
2160                 share = shares[peerid][shnum]
2161                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2162                           unpack_header(share)
2163                 return root_hash
2164
2165     def test_check_and_repair_readcap(self):
2166         # we can't currently repair from a mutable readcap: #625
2167         self.old_shares = []
2168         d = self.publish_one()
2169         d.addCallback(self.copy_shares)
2170         def _get_readcap(res):
2171             self._fn3 = self._fn.get_readonly()
2172             # also delete some shares
2173             for peerid,shares in self._storage._peers.items():
2174                 shares.pop(0, None)
2175         d.addCallback(_get_readcap)
2176         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2177         def _check_results(crr):
2178             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2179             # we should detect the unhealthy, but skip over mutable-readcap
2180             # repairs until #625 is fixed
2181             self.failIf(crr.get_pre_repair_results().is_healthy())
2182             self.failIf(crr.get_repair_attempted())
2183             self.failIf(crr.get_post_repair_results().is_healthy())
2184         d.addCallback(_check_results)
2185         return d
2186
2187 class DevNullDictionary(dict):
2188     def __setitem__(self, key, value):
2189         return
2190
2191 class MultipleEncodings(unittest.TestCase):
2192     def setUp(self):
2193         self.CONTENTS = "New contents go here"
2194         self.uploadable = MutableData(self.CONTENTS)
2195         self._storage = FakeStorage()
2196         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2197         self._storage_broker = self._nodemaker.storage_broker
2198         d = self._nodemaker.create_mutable_file(self.uploadable)
2199         def _created(node):
2200             self._fn = node
2201         d.addCallback(_created)
2202         return d
2203
2204     def _encode(self, k, n, data, version=SDMF_VERSION):
2205         # encode 'data' into a peerid->shares dict.
2206
2207         fn = self._fn
2208         # disable the nodecache, since for these tests we explicitly need
2209         # multiple nodes pointing at the same file
2210         self._nodemaker._node_cache = DevNullDictionary()
2211         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2212         # then we copy over other fields that are normally fetched from the
2213         # existing shares
2214         fn2._pubkey = fn._pubkey
2215         fn2._privkey = fn._privkey
2216         fn2._encprivkey = fn._encprivkey
2217         # and set the encoding parameters to something completely different
2218         fn2._required_shares = k
2219         fn2._total_shares = n
2220
2221         s = self._storage
2222         s._peers = {} # clear existing storage
2223         p2 = Publish(fn2, self._storage_broker, None)
2224         uploadable = MutableData(data)
2225         d = p2.publish(uploadable)
2226         def _published(res):
2227             shares = s._peers
2228             s._peers = {}
2229             return shares
2230         d.addCallback(_published)
2231         return d
2232
2233     def make_servermap(self, mode=MODE_READ, oldmap=None):
2234         if oldmap is None:
2235             oldmap = ServerMap()
2236         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2237                                oldmap, mode)
2238         d = smu.update()
2239         return d
2240
2241     def test_multiple_encodings(self):
2242         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2243         # then mix up the shares, to make sure that download survives seeing
2244         # a variety of encodings. This is actually kind of tricky to set up.
2245
2246         contents1 = "Contents for encoding 1 (3-of-10) go here"
2247         contents2 = "Contents for encoding 2 (4-of-9) go here"
2248         contents3 = "Contents for encoding 3 (4-of-7) go here"
2249
2250         # we make a retrieval object that doesn't know what encoding
2251         # parameters to use
2252         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2253
2254         # now we upload a file through fn1, and grab its shares
2255         d = self._encode(3, 10, contents1)
2256         def _encoded_1(shares):
2257             self._shares1 = shares
2258         d.addCallback(_encoded_1)
2259         d.addCallback(lambda res: self._encode(4, 9, contents2))
2260         def _encoded_2(shares):
2261             self._shares2 = shares
2262         d.addCallback(_encoded_2)
2263         d.addCallback(lambda res: self._encode(4, 7, contents3))
2264         def _encoded_3(shares):
2265             self._shares3 = shares
2266         d.addCallback(_encoded_3)
2267
2268         def _merge(res):
2269             log.msg("merging sharelists")
2270             # we merge the shares from the two sets, leaving each shnum in
2271             # its original location, but using a share from set1 or set2
2272             # according to the following sequence:
2273             #
2274             #  4-of-9  a  s2
2275             #  4-of-9  b  s2
2276             #  4-of-7  c   s3
2277             #  4-of-9  d  s2
2278             #  3-of-9  e s1
2279             #  3-of-9  f s1
2280             #  3-of-9  g s1
2281             #  4-of-9  h  s2
2282             #
2283             # so that neither form can be recovered until fetch [f], at which
2284             # point version-s1 (the 3-of-10 form) should be recoverable. If
2285             # the implementation latches on to the first version it sees,
2286             # then s2 will be recoverable at fetch [g].
2287
2288             # Later, when we implement code that handles multiple versions,
2289             # we can use this framework to assert that all recoverable
2290             # versions are retrieved, and test that 'epsilon' does its job
2291
2292             places = [2, 2, 3, 2, 1, 1, 1, 2]
2293
2294             sharemap = {}
2295             sb = self._storage_broker
2296
2297             for peerid in sorted(sb.get_all_serverids()):
2298                 for shnum in self._shares1.get(peerid, {}):
2299                     if shnum < len(places):
2300                         which = places[shnum]
2301                     else:
2302                         which = "x"
2303                     self._storage._peers[peerid] = peers = {}
2304                     in_1 = shnum in self._shares1[peerid]
2305                     in_2 = shnum in self._shares2.get(peerid, {})
2306                     in_3 = shnum in self._shares3.get(peerid, {})
2307                     if which == 1:
2308                         if in_1:
2309                             peers[shnum] = self._shares1[peerid][shnum]
2310                             sharemap[shnum] = peerid
2311                     elif which == 2:
2312                         if in_2:
2313                             peers[shnum] = self._shares2[peerid][shnum]
2314                             sharemap[shnum] = peerid
2315                     elif which == 3:
2316                         if in_3:
2317                             peers[shnum] = self._shares3[peerid][shnum]
2318                             sharemap[shnum] = peerid
2319
2320             # we don't bother placing any other shares
2321             # now sort the sequence so that share 0 is returned first
2322             new_sequence = [sharemap[shnum]
2323                             for shnum in sorted(sharemap.keys())]
2324             self._storage._sequence = new_sequence
2325             log.msg("merge done")
2326         d.addCallback(_merge)
2327         d.addCallback(lambda res: fn3.download_best_version())
2328         def _retrieved(new_contents):
2329             # the current specified behavior is "first version recoverable"
2330             self.failUnlessEqual(new_contents, contents1)
2331         d.addCallback(_retrieved)
2332         return d
2333
2334
2335 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2336
2337     def setUp(self):
2338         return self.publish_multiple()
2339
2340     def test_multiple_versions(self):
2341         # if we see a mix of versions in the grid, download_best_version
2342         # should get the latest one
2343         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2344         d = self._fn.download_best_version()
2345         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2346         # and the checker should report problems
2347         d.addCallback(lambda res: self._fn.check(Monitor()))
2348         d.addCallback(self.check_bad, "test_multiple_versions")
2349
2350         # but if everything is at version 2, that's what we should download
2351         d.addCallback(lambda res:
2352                       self._set_versions(dict([(i,2) for i in range(10)])))
2353         d.addCallback(lambda res: self._fn.download_best_version())
2354         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2355         # if exactly one share is at version 3, we should still get v2
2356         d.addCallback(lambda res:
2357                       self._set_versions({0:3}))
2358         d.addCallback(lambda res: self._fn.download_best_version())
2359         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2360         # but the servermap should see the unrecoverable version. This
2361         # depends upon the single newer share being queried early.
2362         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2363         def _check_smap(smap):
2364             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2365             newer = smap.unrecoverable_newer_versions()
2366             self.failUnlessEqual(len(newer), 1)
2367             verinfo, health = newer.items()[0]
2368             self.failUnlessEqual(verinfo[0], 4)
2369             self.failUnlessEqual(health, (1,3))
2370             self.failIf(smap.needs_merge())
2371         d.addCallback(_check_smap)
2372         # if we have a mix of two parallel versions (s4a and s4b), we could
2373         # recover either
2374         d.addCallback(lambda res:
2375                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2376                                           1:4,3:4,5:4,7:4,9:4}))
2377         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2378         def _check_smap_mixed(smap):
2379             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2380             newer = smap.unrecoverable_newer_versions()
2381             self.failUnlessEqual(len(newer), 0)
2382             self.failUnless(smap.needs_merge())
2383         d.addCallback(_check_smap_mixed)
2384         d.addCallback(lambda res: self._fn.download_best_version())
2385         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2386                                                   res == self.CONTENTS[4]))
2387         return d
2388
2389     def test_replace(self):
2390         # if we see a mix of versions in the grid, we should be able to
2391         # replace them all with a newer version
2392
2393         # if exactly one share is at version 3, we should download (and
2394         # replace) v2, and the result should be v4. Note that the index we
2395         # give to _set_versions is different than the sequence number.
2396         target = dict([(i,2) for i in range(10)]) # seqnum3
2397         target[0] = 3 # seqnum4
2398         self._set_versions(target)
2399
2400         def _modify(oldversion, servermap, first_time):
2401             return oldversion + " modified"
2402         d = self._fn.modify(_modify)
2403         d.addCallback(lambda res: self._fn.download_best_version())
2404         expected = self.CONTENTS[2] + " modified"
2405         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2406         # and the servermap should indicate that the outlier was replaced too
2407         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2408         def _check_smap(smap):
2409             self.failUnlessEqual(smap.highest_seqnum(), 5)
2410             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2411             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2412         d.addCallback(_check_smap)
2413         return d
2414
2415
2416 class Utils(unittest.TestCase):
2417     def test_cache(self):
2418         c = ResponseCache()
2419         # xdata = base62.b2a(os.urandom(100))[:100]
2420         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2421         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2422         c.add("v1", 1, 0, xdata)
2423         c.add("v1", 1, 2000, ydata)
2424         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2425         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2426         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2427         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2428         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2429         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2430         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2431         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2432         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2433         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2434         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2435         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2436         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2437         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2438         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2439         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2440         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2441         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2442
2443         # test joining fragments
2444         c = ResponseCache()
2445         c.add("v1", 1, 0, xdata[:10])
2446         c.add("v1", 1, 10, xdata[10:20])
2447         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2448
2449 class Exceptions(unittest.TestCase):
2450     def test_repr(self):
2451         nmde = NeedMoreDataError(100, 50, 100)
2452         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2453         ucwe = UncoordinatedWriteError()
2454         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2455
2456 class SameKeyGenerator:
2457     def __init__(self, pubkey, privkey):
2458         self.pubkey = pubkey
2459         self.privkey = privkey
2460     def generate(self, keysize=None):
2461         return defer.succeed( (self.pubkey, self.privkey) )
2462
2463 class FirstServerGetsKilled:
2464     done = False
2465     def notify(self, retval, wrapper, methname):
2466         if not self.done:
2467             wrapper.broken = True
2468             self.done = True
2469         return retval
2470
2471 class FirstServerGetsDeleted:
2472     def __init__(self):
2473         self.done = False
2474         self.silenced = None
2475     def notify(self, retval, wrapper, methname):
2476         if not self.done:
2477             # this query will work, but later queries should think the share
2478             # has been deleted
2479             self.done = True
2480             self.silenced = wrapper
2481             return retval
2482         if wrapper == self.silenced:
2483             assert methname == "slot_testv_and_readv_and_writev"
2484             return (True, {})
2485         return retval
2486
2487 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2488     def test_publish_surprise(self):
2489         self.basedir = "mutable/Problems/test_publish_surprise"
2490         self.set_up_grid()
2491         nm = self.g.clients[0].nodemaker
2492         d = nm.create_mutable_file(MutableData("contents 1"))
2493         def _created(n):
2494             d = defer.succeed(None)
2495             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2496             def _got_smap1(smap):
2497                 # stash the old state of the file
2498                 self.old_map = smap
2499             d.addCallback(_got_smap1)
2500             # then modify the file, leaving the old map untouched
2501             d.addCallback(lambda res: log.msg("starting winning write"))
2502             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2503             # now attempt to modify the file with the old servermap. This
2504             # will look just like an uncoordinated write, in which every
2505             # single share got updated between our mapupdate and our publish
2506             d.addCallback(lambda res: log.msg("starting doomed write"))
2507             d.addCallback(lambda res:
2508                           self.shouldFail(UncoordinatedWriteError,
2509                                           "test_publish_surprise", None,
2510                                           n.upload,
2511                                           MutableData("contents 2a"), self.old_map))
2512             return d
2513         d.addCallback(_created)
2514         return d
2515
2516     def test_retrieve_surprise(self):
2517         self.basedir = "mutable/Problems/test_retrieve_surprise"
2518         self.set_up_grid()
2519         nm = self.g.clients[0].nodemaker
2520         d = nm.create_mutable_file(MutableData("contents 1"))
2521         def _created(n):
2522             d = defer.succeed(None)
2523             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2524             def _got_smap1(smap):
2525                 # stash the old state of the file
2526                 self.old_map = smap
2527             d.addCallback(_got_smap1)
2528             # then modify the file, leaving the old map untouched
2529             d.addCallback(lambda res: log.msg("starting winning write"))
2530             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2531             # now attempt to retrieve the old version with the old servermap.
2532             # This will look like someone has changed the file since we
2533             # updated the servermap.
2534             d.addCallback(lambda res: n._cache._clear())
2535             d.addCallback(lambda res: log.msg("starting doomed read"))
2536             d.addCallback(lambda res:
2537                           self.shouldFail(NotEnoughSharesError,
2538                                           "test_retrieve_surprise",
2539                                           "ran out of peers: have 0 of 1",
2540                                           n.download_version,
2541                                           self.old_map,
2542                                           self.old_map.best_recoverable_version(),
2543                                           ))
2544             return d
2545         d.addCallback(_created)
2546         return d
2547
2548
2549     def test_unexpected_shares(self):
2550         # upload the file, take a servermap, shut down one of the servers,
2551         # upload it again (causing shares to appear on a new server), then
2552         # upload using the old servermap. The last upload should fail with an
2553         # UncoordinatedWriteError, because of the shares that didn't appear
2554         # in the servermap.
2555         self.basedir = "mutable/Problems/test_unexpected_shares"
2556         self.set_up_grid()
2557         nm = self.g.clients[0].nodemaker
2558         d = nm.create_mutable_file(MutableData("contents 1"))
2559         def _created(n):
2560             d = defer.succeed(None)
2561             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2562             def _got_smap1(smap):
2563                 # stash the old state of the file
2564                 self.old_map = smap
2565                 # now shut down one of the servers
2566                 peer0 = list(smap.make_sharemap()[0])[0]
2567                 self.g.remove_server(peer0)
2568                 # then modify the file, leaving the old map untouched
2569                 log.msg("starting winning write")
2570                 return n.overwrite(MutableData("contents 2"))
2571             d.addCallback(_got_smap1)
2572             # now attempt to modify the file with the old servermap. This
2573             # will look just like an uncoordinated write, in which every
2574             # single share got updated between our mapupdate and our publish
2575             d.addCallback(lambda res: log.msg("starting doomed write"))
2576             d.addCallback(lambda res:
2577                           self.shouldFail(UncoordinatedWriteError,
2578                                           "test_surprise", None,
2579                                           n.upload,
2580                                           MutableData("contents 2a"), self.old_map))
2581             return d
2582         d.addCallback(_created)
2583         return d
2584
2585     def test_bad_server(self):
2586         # Break one server, then create the file: the initial publish should
2587         # complete with an alternate server. Breaking a second server should
2588         # not prevent an update from succeeding either.
2589         self.basedir = "mutable/Problems/test_bad_server"
2590         self.set_up_grid()
2591         nm = self.g.clients[0].nodemaker
2592
2593         # to make sure that one of the initial peers is broken, we have to
2594         # get creative. We create an RSA key and compute its storage-index.
2595         # Then we make a KeyGenerator that always returns that one key, and
2596         # use it to create the mutable file. This will get easier when we can
2597         # use #467 static-server-selection to disable permutation and force
2598         # the choice of server for share[0].
2599
2600         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2601         def _got_key( (pubkey, privkey) ):
2602             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2603             pubkey_s = pubkey.serialize()
2604             privkey_s = privkey.serialize()
2605             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2606                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2607             self._storage_index = u.get_storage_index()
2608         d.addCallback(_got_key)
2609         def _break_peer0(res):
2610             si = self._storage_index
2611             servers = nm.storage_broker.get_servers_for_psi(si)
2612             self.g.break_server(servers[0].get_serverid())
2613             self.server1 = servers[1]
2614         d.addCallback(_break_peer0)
2615         # now "create" the file, using the pre-established key, and let the
2616         # initial publish finally happen
2617         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2618         # that ought to work
2619         def _got_node(n):
2620             d = n.download_best_version()
2621             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2622             # now break the second peer
2623             def _break_peer1(res):
2624                 self.g.break_server(self.server1.get_serverid())
2625             d.addCallback(_break_peer1)
2626             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2627             # that ought to work too
2628             d.addCallback(lambda res: n.download_best_version())
2629             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2630             def _explain_error(f):
2631                 print f
2632                 if f.check(NotEnoughServersError):
2633                     print "first_error:", f.value.first_error
2634                 return f
2635             d.addErrback(_explain_error)
2636             return d
2637         d.addCallback(_got_node)
2638         return d
2639
2640     def test_bad_server_overlap(self):
2641         # like test_bad_server, but with no extra unused servers to fall back
2642         # upon. This means that we must re-use a server which we've already
2643         # used. If we don't remember the fact that we sent them one share
2644         # already, we'll mistakenly think we're experiencing an
2645         # UncoordinatedWriteError.
2646
2647         # Break one server, then create the file: the initial publish should
2648         # complete with an alternate server. Breaking a second server should
2649         # not prevent an update from succeeding either.
2650         self.basedir = "mutable/Problems/test_bad_server_overlap"
2651         self.set_up_grid()
2652         nm = self.g.clients[0].nodemaker
2653         sb = nm.storage_broker
2654
2655         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2656         self.g.break_server(peerids[0])
2657
2658         d = nm.create_mutable_file(MutableData("contents 1"))
2659         def _created(n):
2660             d = n.download_best_version()
2661             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2662             # now break one of the remaining servers
2663             def _break_second_server(res):
2664                 self.g.break_server(peerids[1])
2665             d.addCallback(_break_second_server)
2666             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2667             # that ought to work too
2668             d.addCallback(lambda res: n.download_best_version())
2669             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2670             return d
2671         d.addCallback(_created)
2672         return d
2673
2674     def test_publish_all_servers_bad(self):
2675         # Break all servers: the publish should fail
2676         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2677         self.set_up_grid()
2678         nm = self.g.clients[0].nodemaker
2679         for s in nm.storage_broker.get_connected_servers():
2680             s.get_rref().broken = True
2681
2682         d = self.shouldFail(NotEnoughServersError,
2683                             "test_publish_all_servers_bad",
2684                             "ran out of good servers",
2685                             nm.create_mutable_file, MutableData("contents"))
2686         return d
2687
2688     def test_publish_no_servers(self):
2689         # no servers at all: the publish should fail
2690         self.basedir = "mutable/Problems/test_publish_no_servers"
2691         self.set_up_grid(num_servers=0)
2692         nm = self.g.clients[0].nodemaker
2693
2694         d = self.shouldFail(NotEnoughServersError,
2695                             "test_publish_no_servers",
2696                             "Ran out of non-bad servers",
2697                             nm.create_mutable_file, MutableData("contents"))
2698         return d
2699
2700
2701     def test_privkey_query_error(self):
2702         # when a servermap is updated with MODE_WRITE, it tries to get the
2703         # privkey. Something might go wrong during this query attempt.
2704         # Exercise the code in _privkey_query_failed which tries to handle
2705         # such an error.
2706         self.basedir = "mutable/Problems/test_privkey_query_error"
2707         self.set_up_grid(num_servers=20)
2708         nm = self.g.clients[0].nodemaker
2709         nm._node_cache = DevNullDictionary() # disable the nodecache
2710
2711         # we need some contents that are large enough to push the privkey out
2712         # of the early part of the file
2713         LARGE = "These are Larger contents" * 2000 # about 50KB
2714         LARGE_uploadable = MutableData(LARGE)
2715         d = nm.create_mutable_file(LARGE_uploadable)
2716         def _created(n):
2717             self.uri = n.get_uri()
2718             self.n2 = nm.create_from_cap(self.uri)
2719
2720             # When a mapupdate is performed on a node that doesn't yet know
2721             # the privkey, a short read is sent to a batch of servers, to get
2722             # the verinfo and (hopefully, if the file is short enough) the
2723             # encprivkey. Our file is too large to let this first read
2724             # contain the encprivkey. Each non-encprivkey-bearing response
2725             # that arrives (until the node gets the encprivkey) will trigger
2726             # a second read to specifically read the encprivkey.
2727             #
2728             # So, to exercise this case:
2729             #  1. notice which server gets a read() call first
2730             #  2. tell that server to start throwing errors
2731             killer = FirstServerGetsKilled()
2732             for s in nm.storage_broker.get_connected_servers():
2733                 s.get_rref().post_call_notifier = killer.notify
2734         d.addCallback(_created)
2735
2736         # now we update a servermap from a new node (which doesn't have the
2737         # privkey yet, forcing it to use a separate privkey query). Note that
2738         # the map-update will succeed, since we'll just get a copy from one
2739         # of the other shares.
2740         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2741
2742         return d
2743
2744     def test_privkey_query_missing(self):
2745         # like test_privkey_query_error, but the shares are deleted by the
2746         # second query, instead of raising an exception.
2747         self.basedir = "mutable/Problems/test_privkey_query_missing"
2748         self.set_up_grid(num_servers=20)
2749         nm = self.g.clients[0].nodemaker
2750         LARGE = "These are Larger contents" * 2000 # about 50KiB
2751         LARGE_uploadable = MutableData(LARGE)
2752         nm._node_cache = DevNullDictionary() # disable the nodecache
2753
2754         d = nm.create_mutable_file(LARGE_uploadable)
2755         def _created(n):
2756             self.uri = n.get_uri()
2757             self.n2 = nm.create_from_cap(self.uri)
2758             deleter = FirstServerGetsDeleted()
2759             for s in nm.storage_broker.get_connected_servers():
2760                 s.get_rref().post_call_notifier = deleter.notify
2761         d.addCallback(_created)
2762         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2763         return d
2764
2765
2766     def test_block_and_hash_query_error(self):
2767         # This tests for what happens when a query to a remote server
2768         # fails in either the hash validation step or the block getting
2769         # step (because of batching, this is the same actual query).
2770         # We need to have the storage server persist up until the point
2771         # that its prefix is validated, then suddenly die. This
2772         # exercises some exception handling code in Retrieve.
2773         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2774         self.set_up_grid(num_servers=20)
2775         nm = self.g.clients[0].nodemaker
2776         CONTENTS = "contents" * 2000
2777         CONTENTS_uploadable = MutableData(CONTENTS)
2778         d = nm.create_mutable_file(CONTENTS_uploadable)
2779         def _created(node):
2780             self._node = node
2781         d.addCallback(_created)
2782         d.addCallback(lambda ignored:
2783             self._node.get_servermap(MODE_READ))
2784         def _then(servermap):
2785             # we have our servermap. Now we set up the servers like the
2786             # tests above -- the first one that gets a read call should
2787             # start throwing errors, but only after returning its prefix
2788             # for validation. Since we'll download without fetching the
2789             # private key, the next query to the remote server will be
2790             # for either a block and salt or for hashes, either of which
2791             # will exercise the error handling code.
2792             killer = FirstServerGetsKilled()
2793             for s in nm.storage_broker.get_connected_servers():
2794                 s.get_rref().post_call_notifier = killer.notify
2795             ver = servermap.best_recoverable_version()
2796             assert ver
2797             return self._node.download_version(servermap, ver)
2798         d.addCallback(_then)
2799         d.addCallback(lambda data:
2800             self.failUnlessEqual(data, CONTENTS))
2801         return d
2802
2803
2804 class FileHandle(unittest.TestCase):
2805     def setUp(self):
2806         self.test_data = "Test Data" * 50000
2807         self.sio = StringIO(self.test_data)
2808         self.uploadable = MutableFileHandle(self.sio)
2809
2810
2811     def test_filehandle_read(self):
2812         self.basedir = "mutable/FileHandle/test_filehandle_read"
2813         chunk_size = 10
2814         for i in xrange(0, len(self.test_data), chunk_size):
2815             data = self.uploadable.read(chunk_size)
2816             data = "".join(data)
2817             start = i
2818             end = i + chunk_size
2819             self.failUnlessEqual(data, self.test_data[start:end])
2820
2821
2822     def test_filehandle_get_size(self):
2823         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2824         actual_size = len(self.test_data)
2825         size = self.uploadable.get_size()
2826         self.failUnlessEqual(size, actual_size)
2827
2828
2829     def test_filehandle_get_size_out_of_order(self):
2830         # We should be able to call get_size whenever we want without
2831         # disturbing the location of the seek pointer.
2832         chunk_size = 100
2833         data = self.uploadable.read(chunk_size)
2834         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2835
2836         # Now get the size.
2837         size = self.uploadable.get_size()
2838         self.failUnlessEqual(size, len(self.test_data))
2839
2840         # Now get more data. We should be right where we left off.
2841         more_data = self.uploadable.read(chunk_size)
2842         start = chunk_size
2843         end = chunk_size * 2
2844         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2845
2846
2847     def test_filehandle_file(self):
2848         # Make sure that the MutableFileHandle works on a file as well
2849         # as a StringIO object, since in some cases it will be asked to
2850         # deal with files.
2851         self.basedir = self.mktemp()
2852         # necessary? What am I doing wrong here?
2853         os.mkdir(self.basedir)
2854         f_path = os.path.join(self.basedir, "test_file")
2855         f = open(f_path, "w")
2856         f.write(self.test_data)
2857         f.close()
2858         f = open(f_path, "r")
2859
2860         uploadable = MutableFileHandle(f)
2861
2862         data = uploadable.read(len(self.test_data))
2863         self.failUnlessEqual("".join(data), self.test_data)
2864         size = uploadable.get_size()
2865         self.failUnlessEqual(size, len(self.test_data))
2866
2867
2868     def test_close(self):
2869         # Make sure that the MutableFileHandle closes its handle when
2870         # told to do so.
2871         self.uploadable.close()
2872         self.failUnless(self.sio.closed)
2873
2874
2875 class DataHandle(unittest.TestCase):
2876     def setUp(self):
2877         self.test_data = "Test Data" * 50000
2878         self.uploadable = MutableData(self.test_data)
2879
2880
2881     def test_datahandle_read(self):
2882         chunk_size = 10
2883         for i in xrange(0, len(self.test_data), chunk_size):
2884             data = self.uploadable.read(chunk_size)
2885             data = "".join(data)
2886             start = i
2887             end = i + chunk_size
2888             self.failUnlessEqual(data, self.test_data[start:end])
2889
2890
2891     def test_datahandle_get_size(self):
2892         actual_size = len(self.test_data)
2893         size = self.uploadable.get_size()
2894         self.failUnlessEqual(size, actual_size)
2895
2896
2897     def test_datahandle_get_size_out_of_order(self):
2898         # We should be able to call get_size whenever we want without
2899         # disturbing the location of the seek pointer.
2900         chunk_size = 100
2901         data = self.uploadable.read(chunk_size)
2902         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2903
2904         # Now get the size.
2905         size = self.uploadable.get_size()
2906         self.failUnlessEqual(size, len(self.test_data))
2907
2908         # Now get more data. We should be right where we left off.
2909         more_data = self.uploadable.read(chunk_size)
2910         start = chunk_size
2911         end = chunk_size * 2
2912         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2913
2914
2915 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2916               PublishMixin):
2917     def setUp(self):
2918         GridTestMixin.setUp(self)
2919         self.basedir = self.mktemp()
2920         self.set_up_grid()
2921         self.c = self.g.clients[0]
2922         self.nm = self.c.nodemaker
2923         self.data = "test data" * 100000 # about 900 KiB; MDMF
2924         self.small_data = "test data" * 10 # about 90 B; SDMF
2925
2926
2927     def do_upload_mdmf(self):
2928         d = self.nm.create_mutable_file(MutableData(self.data),
2929                                         version=MDMF_VERSION)
2930         def _then(n):
2931             assert isinstance(n, MutableFileNode)
2932             assert n._protocol_version == MDMF_VERSION
2933             self.mdmf_node = n
2934             return n
2935         d.addCallback(_then)
2936         return d
2937
2938     def do_upload_sdmf(self):
2939         d = self.nm.create_mutable_file(MutableData(self.small_data))
2940         def _then(n):
2941             assert isinstance(n, MutableFileNode)
2942             assert n._protocol_version == SDMF_VERSION
2943             self.sdmf_node = n
2944             return n
2945         d.addCallback(_then)
2946         return d
2947
2948     def do_upload_empty_sdmf(self):
2949         d = self.nm.create_mutable_file(MutableData(""))
2950         def _then(n):
2951             assert isinstance(n, MutableFileNode)
2952             self.sdmf_zero_length_node = n
2953             assert n._protocol_version == SDMF_VERSION
2954             return n
2955         d.addCallback(_then)
2956         return d
2957
2958     def do_upload(self):
2959         d = self.do_upload_mdmf()
2960         d.addCallback(lambda ign: self.do_upload_sdmf())
2961         return d
2962
2963     def test_debug(self):
2964         d = self.do_upload_mdmf()
2965         def _debug(n):
2966             fso = debug.FindSharesOptions()
2967             storage_index = base32.b2a(n.get_storage_index())
2968             fso.si_s = storage_index
2969             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2970                             for (i,ss,storedir)
2971                             in self.iterate_servers()]
2972             fso.stdout = StringIO()
2973             fso.stderr = StringIO()
2974             debug.find_shares(fso)
2975             sharefiles = fso.stdout.getvalue().splitlines()
2976             expected = self.nm.default_encoding_parameters["n"]
2977             self.failUnlessEqual(len(sharefiles), expected)
2978
2979             do = debug.DumpOptions()
2980             do["filename"] = sharefiles[0]
2981             do.stdout = StringIO()
2982             debug.dump_share(do)
2983             output = do.stdout.getvalue()
2984             lines = set(output.splitlines())
2985             self.failUnless("Mutable slot found:" in lines, output)
2986             self.failUnless(" share_type: MDMF" in lines, output)
2987             self.failUnless(" num_extra_leases: 0" in lines, output)
2988             self.failUnless(" MDMF contents:" in lines, output)
2989             self.failUnless("  seqnum: 1" in lines, output)
2990             self.failUnless("  required_shares: 3" in lines, output)
2991             self.failUnless("  total_shares: 10" in lines, output)
2992             self.failUnless("  segsize: 131073" in lines, output)
2993             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
2994             vcap = n.get_verify_cap().to_string()
2995             self.failUnless("  verify-cap: %s" % vcap in lines, output)
2996
2997             cso = debug.CatalogSharesOptions()
2998             cso.nodedirs = fso.nodedirs
2999             cso.stdout = StringIO()
3000             cso.stderr = StringIO()
3001             debug.catalog_shares(cso)
3002             shares = cso.stdout.getvalue().splitlines()
3003             oneshare = shares[0] # all shares should be MDMF
3004             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3005             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3006             fields = oneshare.split()
3007             self.failUnlessEqual(fields[0], "MDMF")
3008             self.failUnlessEqual(fields[1], storage_index)
3009             self.failUnlessEqual(fields[2], "3/10")
3010             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3011             self.failUnless(fields[4].startswith("#1:"), fields[3])
3012             # the rest of fields[4] is the roothash, which depends upon
3013             # encryption salts and is not constant. fields[5] is the
3014             # remaining time on the longest lease, which is timing dependent.
3015             # The rest of the line is the quoted pathname to the share.
3016         d.addCallback(_debug)
3017         return d
3018
3019     def test_get_sequence_number(self):
3020         d = self.do_upload()
3021         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3022         d.addCallback(lambda bv:
3023             self.failUnlessEqual(bv.get_sequence_number(), 1))
3024         d.addCallback(lambda ignored:
3025             self.sdmf_node.get_best_readable_version())
3026         d.addCallback(lambda bv:
3027             self.failUnlessEqual(bv.get_sequence_number(), 1))
3028         # Now update. The sequence number in both cases should be 1 in
3029         # both cases.
3030         def _do_update(ignored):
3031             new_data = MutableData("foo bar baz" * 100000)
3032             new_small_data = MutableData("foo bar baz" * 10)
3033             d1 = self.mdmf_node.overwrite(new_data)
3034             d2 = self.sdmf_node.overwrite(new_small_data)
3035             dl = gatherResults([d1, d2])
3036             return dl
3037         d.addCallback(_do_update)
3038         d.addCallback(lambda ignored:
3039             self.mdmf_node.get_best_readable_version())
3040         d.addCallback(lambda bv:
3041             self.failUnlessEqual(bv.get_sequence_number(), 2))
3042         d.addCallback(lambda ignored:
3043             self.sdmf_node.get_best_readable_version())
3044         d.addCallback(lambda bv:
3045             self.failUnlessEqual(bv.get_sequence_number(), 2))
3046         return d
3047
3048
3049     def test_version_extension_api(self):
3050         # We need to define an API by which an uploader can set the
3051         # extension parameters, and by which a downloader can retrieve
3052         # extensions.
3053         d = self.do_upload_mdmf()
3054         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3055         def _got_version(version):
3056             hints = version.get_downloader_hints()
3057             # Should be empty at this point.
3058             self.failUnlessIn("k", hints)
3059             self.failUnlessEqual(hints['k'], 3)
3060             self.failUnlessIn('segsize', hints)
3061             self.failUnlessEqual(hints['segsize'], 131073)
3062         d.addCallback(_got_version)
3063         return d
3064
3065
3066     def test_extensions_from_cap(self):
3067         # If we initialize a mutable file with a cap that has extension
3068         # parameters in it and then grab the extension parameters using
3069         # our API, we should see that they're set correctly.
3070         d = self.do_upload_mdmf()
3071         def _then(ign):
3072             mdmf_uri = self.mdmf_node.get_uri()
3073             new_node = self.nm.create_from_cap(mdmf_uri)
3074             return new_node.get_best_mutable_version()
3075         d.addCallback(_then)
3076         def _got_version(version):
3077             hints = version.get_downloader_hints()
3078             self.failUnlessIn("k", hints)
3079             self.failUnlessEqual(hints["k"], 3)
3080             self.failUnlessIn("segsize", hints)
3081             self.failUnlessEqual(hints["segsize"], 131073)
3082         d.addCallback(_got_version)
3083         return d
3084
3085
3086     def test_extensions_from_upload(self):
3087         # If we create a new mutable file with some contents, we should
3088         # get back an MDMF cap with the right hints in place.
3089         contents = "foo bar baz" * 100000
3090         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3091         def _got_mutable_file(n):
3092             rw_uri = n.get_uri()
3093             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3094             self.failUnlessIn(expected_k, rw_uri)
3095             # XXX: Get this more intelligently.
3096             self.failUnlessIn("131073", rw_uri)
3097
3098             ro_uri = n.get_readonly_uri()
3099             self.failUnlessIn(expected_k, ro_uri)
3100             self.failUnlessIn("131073", ro_uri)
3101         d.addCallback(_got_mutable_file)
3102         return d
3103
3104
3105     def test_cap_after_upload(self):
3106         # If we create a new mutable file and upload things to it, and
3107         # it's an MDMF file, we should get an MDMF cap back from that
3108         # file and should be able to use that.
3109         # That's essentially what MDMF node is, so just check that.
3110         d = self.do_upload_mdmf()
3111         def _then(ign):
3112             mdmf_uri = self.mdmf_node.get_uri()
3113             cap = uri.from_string(mdmf_uri)
3114             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3115             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3116             cap = uri.from_string(readonly_mdmf_uri)
3117             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3118         d.addCallback(_then)
3119         return d
3120
3121     def test_mutable_version(self):
3122         # assert that getting parameters from the IMutableVersion object
3123         # gives us the same data as getting them from the filenode itself
3124         d = self.do_upload()
3125         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3126         def _check_mdmf(bv):
3127             n = self.mdmf_node
3128             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3129             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3130             self.failIf(bv.is_readonly())
3131         d.addCallback(_check_mdmf)
3132         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3133         def _check_sdmf(bv):
3134             n = self.sdmf_node
3135             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3136             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3137             self.failIf(bv.is_readonly())
3138         d.addCallback(_check_sdmf)
3139         return d
3140
3141
3142     def test_get_readonly_version(self):
3143         d = self.do_upload()
3144         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3145         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3146
3147         # Attempting to get a mutable version of a mutable file from a
3148         # filenode initialized with a readcap should return a readonly
3149         # version of that same node.
3150         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3151         d.addCallback(lambda ro: ro.get_best_mutable_version())
3152         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3153
3154         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3155         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3156
3157         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3158         d.addCallback(lambda ro: ro.get_best_mutable_version())
3159         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3160         return d
3161
3162
3163     def test_toplevel_overwrite(self):
3164         new_data = MutableData("foo bar baz" * 100000)
3165         new_small_data = MutableData("foo bar baz" * 10)
3166         d = self.do_upload()
3167         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3168         d.addCallback(lambda ignored:
3169             self.mdmf_node.download_best_version())
3170         d.addCallback(lambda data:
3171             self.failUnlessEqual(data, "foo bar baz" * 100000))
3172         d.addCallback(lambda ignored:
3173             self.sdmf_node.overwrite(new_small_data))
3174         d.addCallback(lambda ignored:
3175             self.sdmf_node.download_best_version())
3176         d.addCallback(lambda data:
3177             self.failUnlessEqual(data, "foo bar baz" * 10))
3178         return d
3179
3180
3181     def test_toplevel_modify(self):
3182         d = self.do_upload()
3183         def modifier(old_contents, servermap, first_time):
3184             return old_contents + "modified"
3185         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3186         d.addCallback(lambda ignored:
3187             self.mdmf_node.download_best_version())
3188         d.addCallback(lambda data:
3189             self.failUnlessIn("modified", data))
3190         d.addCallback(lambda ignored:
3191             self.sdmf_node.modify(modifier))
3192         d.addCallback(lambda ignored:
3193             self.sdmf_node.download_best_version())
3194         d.addCallback(lambda data:
3195             self.failUnlessIn("modified", data))
3196         return d
3197
3198
3199     def test_version_modify(self):
3200         # TODO: When we can publish multiple versions, alter this test
3201         # to modify a version other than the best usable version, then
3202         # test to see that the best recoverable version is that.
3203         d = self.do_upload()
3204         def modifier(old_contents, servermap, first_time):
3205             return old_contents + "modified"
3206         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3207         d.addCallback(lambda ignored:
3208             self.mdmf_node.download_best_version())
3209         d.addCallback(lambda data:
3210             self.failUnlessIn("modified", data))
3211         d.addCallback(lambda ignored:
3212             self.sdmf_node.modify(modifier))
3213         d.addCallback(lambda ignored:
3214             self.sdmf_node.download_best_version())
3215         d.addCallback(lambda data:
3216             self.failUnlessIn("modified", data))
3217         return d
3218
3219
3220     def test_download_version(self):
3221         d = self.publish_multiple()
3222         # We want to have two recoverable versions on the grid.
3223         d.addCallback(lambda res:
3224                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3225                                           1:1,3:1,5:1,7:1,9:1}))
3226         # Now try to download each version. We should get the plaintext
3227         # associated with that version.
3228         d.addCallback(lambda ignored:
3229             self._fn.get_servermap(mode=MODE_READ))
3230         def _got_servermap(smap):
3231             versions = smap.recoverable_versions()
3232             assert len(versions) == 2
3233
3234             self.servermap = smap
3235             self.version1, self.version2 = versions
3236             assert self.version1 != self.version2
3237
3238             self.version1_seqnum = self.version1[0]
3239             self.version2_seqnum = self.version2[0]
3240             self.version1_index = self.version1_seqnum - 1
3241             self.version2_index = self.version2_seqnum - 1
3242
3243         d.addCallback(_got_servermap)
3244         d.addCallback(lambda ignored:
3245             self._fn.download_version(self.servermap, self.version1))
3246         d.addCallback(lambda results:
3247             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3248                                  results))
3249         d.addCallback(lambda ignored:
3250             self._fn.download_version(self.servermap, self.version2))
3251         d.addCallback(lambda results:
3252             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3253                                  results))
3254         return d
3255
3256
3257     def test_download_nonexistent_version(self):
3258         d = self.do_upload_mdmf()
3259         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3260         def _set_servermap(servermap):
3261             self.servermap = servermap
3262         d.addCallback(_set_servermap)
3263         d.addCallback(lambda ignored:
3264            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3265                            None,
3266                            self.mdmf_node.download_version, self.servermap,
3267                            "not a version"))
3268         return d
3269
3270
3271     def test_partial_read(self):
3272         d = self.do_upload_mdmf()
3273         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3274         modes = [("start_on_segment_boundary",
3275                   mathutil.next_multiple(128 * 1024, 3), 50),
3276                  ("ending_one_byte_after_segment_boundary",
3277                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3278                  ("zero_length_at_start", 0, 0),
3279                  ("zero_length_in_middle", 50, 0),
3280                  ("zero_length_at_segment_boundary",
3281                   mathutil.next_multiple(128 * 1024, 3), 0),
3282                  ]
3283         for (name, offset, length) in modes:
3284             d.addCallback(self._do_partial_read, name, offset, length)
3285         # then read only a few bytes at a time, and see that the results are
3286         # what we expect.
3287         def _read_data(version):
3288             c = consumer.MemoryConsumer()
3289             d2 = defer.succeed(None)
3290             for i in xrange(0, len(self.data), 10000):
3291                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3292             d2.addCallback(lambda ignored:
3293                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3294             return d2
3295         d.addCallback(_read_data)
3296         return d
3297     def _do_partial_read(self, version, name, offset, length):
3298         c = consumer.MemoryConsumer()
3299         d = version.read(c, offset, length)
3300         expected = self.data[offset:offset+length]
3301         d.addCallback(lambda ignored: "".join(c.chunks))
3302         def _check(results):
3303             if results != expected:
3304                 print
3305                 print "got: %s ... %s" % (results[:20], results[-20:])
3306                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3307                 self.fail("results[%s] != expected" % name)
3308             return version # daisy-chained to next call
3309         d.addCallback(_check)
3310         return d
3311
3312
3313     def _test_read_and_download(self, node, expected):
3314         d = node.get_best_readable_version()
3315         def _read_data(version):
3316             c = consumer.MemoryConsumer()
3317             d2 = defer.succeed(None)
3318             d2.addCallback(lambda ignored: version.read(c))
3319             d2.addCallback(lambda ignored:
3320                 self.failUnlessEqual(expected, "".join(c.chunks)))
3321             return d2
3322         d.addCallback(_read_data)
3323         d.addCallback(lambda ignored: node.download_best_version())
3324         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3325         return d
3326
3327     def test_read_and_download_mdmf(self):
3328         d = self.do_upload_mdmf()
3329         d.addCallback(self._test_read_and_download, self.data)
3330         return d
3331
3332     def test_read_and_download_sdmf(self):
3333         d = self.do_upload_sdmf()
3334         d.addCallback(self._test_read_and_download, self.small_data)
3335         return d
3336
3337     def test_read_and_download_sdmf_zero_length(self):
3338         d = self.do_upload_empty_sdmf()
3339         d.addCallback(self._test_read_and_download, "")
3340         return d
3341
3342
3343 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3344     timeout = 400 # these tests are too big, 120s is not enough on slow
3345                   # platforms
3346     def setUp(self):
3347         GridTestMixin.setUp(self)
3348         self.basedir = self.mktemp()
3349         self.set_up_grid()
3350         self.c = self.g.clients[0]
3351         self.nm = self.c.nodemaker
3352         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3353         self.small_data = "test data" * 10 # about 90 B; SDMF
3354
3355
3356     def do_upload_sdmf(self):
3357         d = self.nm.create_mutable_file(MutableData(self.small_data))
3358         def _then(n):
3359             assert isinstance(n, MutableFileNode)
3360             self.sdmf_node = n
3361             # Make SDMF node that has 255 shares.
3362             self.nm.default_encoding_parameters['n'] = 255
3363             self.nm.default_encoding_parameters['k'] = 127
3364             return self.nm.create_mutable_file(MutableData(self.small_data))
3365         d.addCallback(_then)
3366         def _then2(n):
3367             assert isinstance(n, MutableFileNode)
3368             self.sdmf_max_shares_node = n
3369         d.addCallback(_then2)
3370         return d
3371
3372     def do_upload_mdmf(self):
3373         d = self.nm.create_mutable_file(MutableData(self.data),
3374                                         version=MDMF_VERSION)
3375         def _then(n):
3376             assert isinstance(n, MutableFileNode)
3377             self.mdmf_node = n
3378             # Make MDMF node that has 255 shares.
3379             self.nm.default_encoding_parameters['n'] = 255
3380             self.nm.default_encoding_parameters['k'] = 127
3381             return self.nm.create_mutable_file(MutableData(self.data),
3382                                                version=MDMF_VERSION)
3383         d.addCallback(_then)
3384         def _then2(n):
3385             assert isinstance(n, MutableFileNode)
3386             self.mdmf_max_shares_node = n
3387         d.addCallback(_then2)
3388         return d
3389
3390     def _test_replace(self, offset, new_data):
3391         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3392         d0 = self.do_upload_mdmf()
3393         def _run(ign):
3394             d = defer.succeed(None)
3395             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3396                 d.addCallback(lambda ign: node.get_best_mutable_version())
3397                 d.addCallback(lambda mv:
3398                     mv.update(MutableData(new_data), offset))
3399                 # close around node.
3400                 d.addCallback(lambda ignored, node=node:
3401                     node.download_best_version())
3402                 def _check(results):
3403                     if results != expected:
3404                         print
3405                         print "got: %s ... %s" % (results[:20], results[-20:])
3406                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3407                         self.fail("results != expected")
3408                 d.addCallback(_check)
3409             return d
3410         d0.addCallback(_run)
3411         return d0
3412
3413     def test_append(self):
3414         # We should be able to append data to a mutable file and get
3415         # what we expect.
3416         return self._test_replace(len(self.data), "appended")
3417
3418     def test_replace_middle(self):
3419         # We should be able to replace data in the middle of a mutable
3420         # file and get what we expect back.
3421         return self._test_replace(100, "replaced")
3422
3423     def test_replace_beginning(self):
3424         # We should be able to replace data at the beginning of the file
3425         # without truncating the file
3426         return self._test_replace(0, "beginning")
3427
3428     def test_replace_segstart1(self):
3429         return self._test_replace(128*1024+1, "NNNN")
3430
3431     def test_replace_zero_length_beginning(self):
3432         return self._test_replace(0, "")
3433
3434     def test_replace_zero_length_middle(self):
3435         return self._test_replace(50, "")
3436
3437     def test_replace_zero_length_segstart1(self):
3438         return self._test_replace(128*1024+1, "")
3439
3440     def test_replace_and_extend(self):
3441         # We should be able to replace data in the middle of a mutable
3442         # file and extend that mutable file and get what we expect.
3443         return self._test_replace(100, "modified " * 100000)
3444
3445
3446     def _check_differences(self, got, expected):
3447         # displaying arbitrary file corruption is tricky for a
3448         # 1MB file of repeating data,, so look for likely places
3449         # with problems and display them separately
3450         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3451         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3452         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3453                     for (start,end) in gotmods]
3454         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3455                     for (start,end) in expmods]
3456         #print "expecting: %s" % expspans
3457
3458         SEGSIZE = 128*1024
3459         if got != expected:
3460             print "differences:"
3461             for segnum in range(len(expected)//SEGSIZE):
3462                 start = segnum * SEGSIZE
3463                 end = (segnum+1) * SEGSIZE
3464                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3465                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3466                 if got_ends != exp_ends:
3467                     print "expected[%d]: %s" % (start, exp_ends)
3468                     print "got     [%d]: %s" % (start, got_ends)
3469             if expspans != gotspans:
3470                 print "expected: %s" % expspans
3471                 print "got     : %s" % gotspans
3472             open("EXPECTED","wb").write(expected)
3473             open("GOT","wb").write(got)
3474             print "wrote data to EXPECTED and GOT"
3475             self.fail("didn't get expected data")
3476
3477
3478     def test_replace_locations(self):
3479         # exercise fencepost conditions
3480         SEGSIZE = 128*1024
3481         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3482         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3483         d0 = self.do_upload_mdmf()
3484         def _run(ign):
3485             expected = self.data
3486             d = defer.succeed(None)
3487             for offset in suspects:
3488                 new_data = letters.next()*2 # "AA", then "BB", etc
3489                 expected = expected[:offset]+new_data+expected[offset+2:]
3490                 d.addCallback(lambda ign:
3491                               self.mdmf_node.get_best_mutable_version())
3492                 def _modify(mv, offset=offset, new_data=new_data):
3493                     # close over 'offset','new_data'
3494                     md = MutableData(new_data)
3495                     return mv.update(md, offset)
3496                 d.addCallback(_modify)
3497                 d.addCallback(lambda ignored:
3498                               self.mdmf_node.download_best_version())
3499                 d.addCallback(self._check_differences, expected)
3500             return d
3501         d0.addCallback(_run)
3502         return d0
3503
3504     def test_replace_locations_max_shares(self):
3505         # exercise fencepost conditions
3506         SEGSIZE = 128*1024
3507         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3508         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3509         d0 = self.do_upload_mdmf()
3510         def _run(ign):
3511             expected = self.data
3512             d = defer.succeed(None)
3513             for offset in suspects:
3514                 new_data = letters.next()*2 # "AA", then "BB", etc
3515                 expected = expected[:offset]+new_data+expected[offset+2:]
3516                 d.addCallback(lambda ign:
3517                               self.mdmf_max_shares_node.get_best_mutable_version())
3518                 def _modify(mv, offset=offset, new_data=new_data):
3519                     # close over 'offset','new_data'
3520                     md = MutableData(new_data)
3521                     return mv.update(md, offset)
3522                 d.addCallback(_modify)
3523                 d.addCallback(lambda ignored:
3524                               self.mdmf_max_shares_node.download_best_version())
3525                 d.addCallback(self._check_differences, expected)
3526             return d
3527         d0.addCallback(_run)
3528         return d0
3529
3530
3531     def test_append_power_of_two(self):
3532         # If we attempt to extend a mutable file so that its segment
3533         # count crosses a power-of-two boundary, the update operation
3534         # should know how to reencode the file.
3535
3536         # Note that the data populating self.mdmf_node is about 900 KiB
3537         # long -- this is 7 segments in the default segment size. So we
3538         # need to add 2 segments worth of data to push it over a
3539         # power-of-two boundary.
3540         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3541         new_data = self.data + (segment * 2)
3542         d0 = self.do_upload_mdmf()
3543         def _run(ign):
3544             d = defer.succeed(None)
3545             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3546                 d.addCallback(lambda ign: node.get_best_mutable_version())
3547                 d.addCallback(lambda mv:
3548                     mv.update(MutableData(segment * 2), len(self.data)))
3549                 d.addCallback(lambda ignored, node=node:
3550                     node.download_best_version())
3551                 d.addCallback(lambda results:
3552                     self.failUnlessEqual(results, new_data))
3553             return d
3554         d0.addCallback(_run)
3555         return d0
3556
3557     def test_update_sdmf(self):
3558         # Running update on a single-segment file should still work.
3559         new_data = self.small_data + "appended"
3560         d0 = self.do_upload_sdmf()
3561         def _run(ign):
3562             d = defer.succeed(None)
3563             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3564                 d.addCallback(lambda ign: node.get_best_mutable_version())
3565                 d.addCallback(lambda mv:
3566                     mv.update(MutableData("appended"), len(self.small_data)))
3567                 d.addCallback(lambda ignored, node=node:
3568                     node.download_best_version())
3569                 d.addCallback(lambda results:
3570                     self.failUnlessEqual(results, new_data))
3571             return d
3572         d0.addCallback(_run)
3573         return d0
3574
3575     def test_replace_in_last_segment(self):
3576         # The wrapper should know how to handle the tail segment
3577         # appropriately.
3578         replace_offset = len(self.data) - 100
3579         new_data = self.data[:replace_offset] + "replaced"
3580         rest_offset = replace_offset + len("replaced")
3581         new_data += self.data[rest_offset:]
3582         d0 = self.do_upload_mdmf()
3583         def _run(ign):
3584             d = defer.succeed(None)
3585             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3586                 d.addCallback(lambda ign: node.get_best_mutable_version())
3587                 d.addCallback(lambda mv:
3588                     mv.update(MutableData("replaced"), replace_offset))
3589                 d.addCallback(lambda ignored, node=node:
3590                     node.download_best_version())
3591                 d.addCallback(lambda results:
3592                     self.failUnlessEqual(results, new_data))
3593             return d
3594         d0.addCallback(_run)
3595         return d0
3596
3597     def test_multiple_segment_replace(self):
3598         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3599         new_data = self.data[:replace_offset]
3600         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3601         new_data += 2 * new_segment
3602         new_data += "replaced"
3603         rest_offset = len(new_data)
3604         new_data += self.data[rest_offset:]
3605         d0 = self.do_upload_mdmf()
3606         def _run(ign):
3607             d = defer.succeed(None)
3608             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3609                 d.addCallback(lambda ign: node.get_best_mutable_version())
3610                 d.addCallback(lambda mv:
3611                     mv.update(MutableData((2 * new_segment) + "replaced"),
3612                               replace_offset))
3613                 d.addCallback(lambda ignored, node=node:
3614                     node.download_best_version())
3615                 d.addCallback(lambda results:
3616                     self.failUnlessEqual(results, new_data))
3617             return d
3618         d0.addCallback(_run)
3619         return d0
3620
3621 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3622     sdmf_old_shares = {}
3623     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3624     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3625     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3626     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3627     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3628     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3629     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3630     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3631     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3632     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3633     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3634     sdmf_old_contents = "This is a test file.\n"
3635     def copy_sdmf_shares(self):
3636         # We'll basically be short-circuiting the upload process.
3637         servernums = self.g.servers_by_number.keys()
3638         assert len(servernums) == 10
3639
3640         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3641         # Get the storage index.
3642         cap = uri.from_string(self.sdmf_old_cap)
3643         si = cap.get_storage_index()
3644
3645         # Now execute each assignment by writing the storage.
3646         for (share, servernum) in assignments:
3647             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3648             storedir = self.get_serverdir(servernum)
3649             storage_path = os.path.join(storedir, "shares",
3650                                         storage_index_to_dir(si))
3651             fileutil.make_dirs(storage_path)
3652             fileutil.write(os.path.join(storage_path, "%d" % share),
3653                            sharedata)
3654         # ...and verify that the shares are there.
3655         shares = self.find_uri_shares(self.sdmf_old_cap)
3656         assert len(shares) == 10
3657
3658     def test_new_downloader_can_read_old_shares(self):
3659         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3660         self.set_up_grid()
3661         self.copy_sdmf_shares()
3662         nm = self.g.clients[0].nodemaker
3663         n = nm.create_from_cap(self.sdmf_old_cap)
3664         d = n.download_best_version()
3665         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3666         return d
3667
3668 class DifferentEncoding(unittest.TestCase):
3669     def setUp(self):
3670         self._storage = s = FakeStorage()
3671         self.nodemaker = make_nodemaker(s)
3672
3673     def test_filenode(self):
3674         # create a file with 3-of-20, then modify it with a client configured
3675         # to do 3-of-10. #1510 tracks a failure here
3676         self.nodemaker.default_encoding_parameters["n"] = 20
3677         d = self.nodemaker.create_mutable_file("old contents")
3678         def _created(n):
3679             filecap = n.get_cap().to_string()
3680             del n # we want a new object, not the cached one
3681             self.nodemaker.default_encoding_parameters["n"] = 10
3682             n2 = self.nodemaker.create_from_cap(filecap)
3683             return n2
3684         d.addCallback(_created)
3685         def modifier(old_contents, servermap, first_time):
3686             return "new contents"
3687         d.addCallback(lambda n: n.modify(modifier))
3688         return d