]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_mutable.py
Retrieve: remove the initial prefix-is-still-good check
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_mutable.py
1
2 import os, re, base64
3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12      ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15      NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23 from allmydata.scripts import debug
24
25 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
26 from allmydata.mutable.common import ResponseCache, \
27      MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
28      NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
29      NotEnoughServersError, CorruptShareError
30 from allmydata.mutable.retrieve import Retrieve
31 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32                                       MutableData, \
33                                       DEFAULT_MAX_SEGMENT_SIZE
34 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
35 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
36 from allmydata.mutable.repairer import MustForceRepairError
37
38 import allmydata.test.common_util as testutil
39 from allmydata.test.common import TEST_RSA_KEY_SIZE
40
41
42 # this "FakeStorage" exists to put the share data in RAM and avoid using real
43 # network connections, both to speed up the tests and to reduce the amount of
44 # non-mutable.py code being exercised.
45
46 class FakeStorage:
47     # this class replaces the collection of storage servers, allowing the
48     # tests to examine and manipulate the published shares. It also lets us
49     # control the order in which read queries are answered, to exercise more
50     # of the error-handling code in Retrieve .
51     #
52     # Note that we ignore the storage index: this FakeStorage instance can
53     # only be used for a single storage index.
54
55
56     def __init__(self):
57         self._peers = {}
58         # _sequence is used to cause the responses to occur in a specific
59         # order. If it is in use, then we will defer queries instead of
60         # answering them right away, accumulating the Deferreds in a dict. We
61         # don't know exactly how many queries we'll get, so exactly one
62         # second after the first query arrives, we will release them all (in
63         # order).
64         self._sequence = None
65         self._pending = {}
66         self._pending_timer = None
67
68     def read(self, peerid, storage_index):
69         shares = self._peers.get(peerid, {})
70         if self._sequence is None:
71             return defer.succeed(shares)
72         d = defer.Deferred()
73         if not self._pending:
74             self._pending_timer = reactor.callLater(1.0, self._fire_readers)
75         if peerid not in self._pending:
76             self._pending[peerid] = []
77         self._pending[peerid].append( (d, shares) )
78         return d
79
80     def _fire_readers(self):
81         self._pending_timer = None
82         pending = self._pending
83         self._pending = {}
84         for peerid in self._sequence:
85             if peerid in pending:
86                 for (d, shares) in pending.pop(peerid):
87                     eventually(d.callback, shares)
88         for peerid in pending:
89             for (d, shares) in pending[peerid]:
90                 eventually(d.callback, shares)
91
92     def write(self, peerid, storage_index, shnum, offset, data):
93         if peerid not in self._peers:
94             self._peers[peerid] = {}
95         shares = self._peers[peerid]
96         f = StringIO()
97         f.write(shares.get(shnum, ""))
98         f.seek(offset)
99         f.write(data)
100         shares[shnum] = f.getvalue()
101
102
103 class FakeStorageServer:
104     def __init__(self, peerid, storage):
105         self.peerid = peerid
106         self.storage = storage
107         self.queries = 0
108     def callRemote(self, methname, *args, **kwargs):
109         self.queries += 1
110         def _call():
111             meth = getattr(self, methname)
112             return meth(*args, **kwargs)
113         d = fireEventually()
114         d.addCallback(lambda res: _call())
115         return d
116
117     def callRemoteOnly(self, methname, *args, **kwargs):
118         self.queries += 1
119         d = self.callRemote(methname, *args, **kwargs)
120         d.addBoth(lambda ignore: None)
121         pass
122
123     def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
124         pass
125
126     def slot_readv(self, storage_index, shnums, readv):
127         d = self.storage.read(self.peerid, storage_index)
128         def _read(shares):
129             response = {}
130             for shnum in shares:
131                 if shnums and shnum not in shnums:
132                     continue
133                 vector = response[shnum] = []
134                 for (offset, length) in readv:
135                     assert isinstance(offset, (int, long)), offset
136                     assert isinstance(length, (int, long)), length
137                     vector.append(shares[shnum][offset:offset+length])
138             return response
139         d.addCallback(_read)
140         return d
141
142     def slot_testv_and_readv_and_writev(self, storage_index, secrets,
143                                         tw_vectors, read_vector):
144         # always-pass: parrot the test vectors back to them.
145         readv = {}
146         for shnum, (testv, writev, new_length) in tw_vectors.items():
147             for (offset, length, op, specimen) in testv:
148                 assert op in ("le", "eq", "ge")
149             # TODO: this isn't right, the read is controlled by read_vector,
150             # not by testv
151             readv[shnum] = [ specimen
152                              for (offset, length, op, specimen)
153                              in testv ]
154             for (offset, data) in writev:
155                 self.storage.write(self.peerid, storage_index, shnum,
156                                    offset, data)
157         answer = (True, readv)
158         return fireEventually(answer)
159
160
161 def flip_bit(original, byte_offset):
162     return (original[:byte_offset] +
163             chr(ord(original[byte_offset]) ^ 0x01) +
164             original[byte_offset+1:])
165
166 def add_two(original, byte_offset):
167     # It isn't enough to simply flip the bit for the version number,
168     # because 1 is a valid version number. So we add two instead.
169     return (original[:byte_offset] +
170             chr(ord(original[byte_offset]) ^ 0x02) +
171             original[byte_offset+1:])
172
173 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
174     # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
175     # list of shnums to corrupt.
176     ds = []
177     for peerid in s._peers:
178         shares = s._peers[peerid]
179         for shnum in shares:
180             if (shnums_to_corrupt is not None
181                 and shnum not in shnums_to_corrupt):
182                 continue
183             data = shares[shnum]
184             # We're feeding the reader all of the share data, so it
185             # won't need to use the rref that we didn't provide, nor the
186             # storage index that we didn't provide. We do this because
187             # the reader will work for both MDMF and SDMF.
188             reader = MDMFSlotReadProxy(None, None, shnum, data)
189             # We need to get the offsets for the next part.
190             d = reader.get_verinfo()
191             def _do_corruption(verinfo, data, shnum):
192                 (seqnum,
193                  root_hash,
194                  IV,
195                  segsize,
196                  datalen,
197                  k, n, prefix, o) = verinfo
198                 if isinstance(offset, tuple):
199                     offset1, offset2 = offset
200                 else:
201                     offset1 = offset
202                     offset2 = 0
203                 if offset1 == "pubkey" and IV:
204                     real_offset = 107
205                 elif offset1 in o:
206                     real_offset = o[offset1]
207                 else:
208                     real_offset = offset1
209                 real_offset = int(real_offset) + offset2 + offset_offset
210                 assert isinstance(real_offset, int), offset
211                 if offset1 == 0: # verbyte
212                     f = add_two
213                 else:
214                     f = flip_bit
215                 shares[shnum] = f(data, real_offset)
216             d.addCallback(_do_corruption, data, shnum)
217             ds.append(d)
218     dl = defer.DeferredList(ds)
219     dl.addCallback(lambda ignored: res)
220     return dl
221
222 def make_storagebroker(s=None, num_peers=10):
223     if not s:
224         s = FakeStorage()
225     peerids = [tagged_hash("peerid", "%d" % i)[:20]
226                for i in range(num_peers)]
227     storage_broker = StorageFarmBroker(None, True)
228     for peerid in peerids:
229         fss = FakeStorageServer(peerid, s)
230         storage_broker.test_add_rref(peerid, fss)
231     return storage_broker
232
233 def make_nodemaker(s=None, num_peers=10):
234     storage_broker = make_storagebroker(s, num_peers)
235     sh = client.SecretHolder("lease secret", "convergence secret")
236     keygen = client.KeyGenerator()
237     keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
238     nodemaker = NodeMaker(storage_broker, sh, None,
239                           None, None,
240                           {"k": 3, "n": 10}, keygen)
241     return nodemaker
242
243 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
244     # this used to be in Publish, but we removed the limit. Some of
245     # these tests test whether the new code correctly allows files
246     # larger than the limit.
247     OLD_MAX_SEGMENT_SIZE = 3500000
248     def setUp(self):
249         self._storage = s = FakeStorage()
250         self.nodemaker = make_nodemaker(s)
251
252     def test_create(self):
253         d = self.nodemaker.create_mutable_file()
254         def _created(n):
255             self.failUnless(isinstance(n, MutableFileNode))
256             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
257             sb = self.nodemaker.storage_broker
258             peer0 = sorted(sb.get_all_serverids())[0]
259             shnums = self._storage._peers[peer0].keys()
260             self.failUnlessEqual(len(shnums), 1)
261         d.addCallback(_created)
262         return d
263
264
265     def test_create_mdmf(self):
266         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
267         def _created(n):
268             self.failUnless(isinstance(n, MutableFileNode))
269             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
270             sb = self.nodemaker.storage_broker
271             peer0 = sorted(sb.get_all_serverids())[0]
272             shnums = self._storage._peers[peer0].keys()
273             self.failUnlessEqual(len(shnums), 1)
274         d.addCallback(_created)
275         return d
276
277     def test_single_share(self):
278         # Make sure that we tolerate publishing a single share.
279         self.nodemaker.default_encoding_parameters['k'] = 1
280         self.nodemaker.default_encoding_parameters['happy'] = 1
281         self.nodemaker.default_encoding_parameters['n'] = 1
282         d = defer.succeed(None)
283         for v in (SDMF_VERSION, MDMF_VERSION):
284             d.addCallback(lambda ignored:
285                 self.nodemaker.create_mutable_file(version=v))
286             def _created(n):
287                 self.failUnless(isinstance(n, MutableFileNode))
288                 self._node = n
289                 return n
290             d.addCallback(_created)
291             d.addCallback(lambda n:
292                 n.overwrite(MutableData("Contents" * 50000)))
293             d.addCallback(lambda ignored:
294                 self._node.download_best_version())
295             d.addCallback(lambda contents:
296                 self.failUnlessEqual(contents, "Contents" * 50000))
297         return d
298
299     def test_max_shares(self):
300         self.nodemaker.default_encoding_parameters['n'] = 255
301         d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
302         def _created(n):
303             self.failUnless(isinstance(n, MutableFileNode))
304             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
305             sb = self.nodemaker.storage_broker
306             num_shares = sum([len(self._storage._peers[x].keys()) for x \
307                               in sb.get_all_serverids()])
308             self.failUnlessEqual(num_shares, 255)
309             self._node = n
310             return n
311         d.addCallback(_created)
312         # Now we upload some contents
313         d.addCallback(lambda n:
314             n.overwrite(MutableData("contents" * 50000)))
315         # ...then download contents
316         d.addCallback(lambda ignored:
317             self._node.download_best_version())
318         # ...and check to make sure everything went okay.
319         d.addCallback(lambda contents:
320             self.failUnlessEqual("contents" * 50000, contents))
321         return d
322
323     def test_max_shares_mdmf(self):
324         # Test how files behave when there are 255 shares.
325         self.nodemaker.default_encoding_parameters['n'] = 255
326         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
327         def _created(n):
328             self.failUnless(isinstance(n, MutableFileNode))
329             self.failUnlessEqual(n.get_storage_index(), n._storage_index)
330             sb = self.nodemaker.storage_broker
331             num_shares = sum([len(self._storage._peers[x].keys()) for x \
332                               in sb.get_all_serverids()])
333             self.failUnlessEqual(num_shares, 255)
334             self._node = n
335             return n
336         d.addCallback(_created)
337         d.addCallback(lambda n:
338             n.overwrite(MutableData("contents" * 50000)))
339         d.addCallback(lambda ignored:
340             self._node.download_best_version())
341         d.addCallback(lambda contents:
342             self.failUnlessEqual(contents, "contents" * 50000))
343         return d
344
345     def test_mdmf_filenode_cap(self):
346         # Test that an MDMF filenode, once created, returns an MDMF URI.
347         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
348         def _created(n):
349             self.failUnless(isinstance(n, MutableFileNode))
350             cap = n.get_cap()
351             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
352             rcap = n.get_readcap()
353             self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
354             vcap = n.get_verify_cap()
355             self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
356         d.addCallback(_created)
357         return d
358
359
360     def test_create_from_mdmf_writecap(self):
361         # Test that the nodemaker is capable of creating an MDMF
362         # filenode given an MDMF cap.
363         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
364         def _created(n):
365             self.failUnless(isinstance(n, MutableFileNode))
366             s = n.get_uri()
367             self.failUnless(s.startswith("URI:MDMF"))
368             n2 = self.nodemaker.create_from_cap(s)
369             self.failUnless(isinstance(n2, MutableFileNode))
370             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
371             self.failUnlessEqual(n.get_uri(), n2.get_uri())
372         d.addCallback(_created)
373         return d
374
375
376     def test_create_from_mdmf_writecap_with_extensions(self):
377         # Test that the nodemaker is capable of creating an MDMF
378         # filenode when given a writecap with extension parameters in
379         # them.
380         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
381         def _created(n):
382             self.failUnless(isinstance(n, MutableFileNode))
383             s = n.get_uri()
384             # We need to cheat a little and delete the nodemaker's
385             # cache, otherwise we'll get the same node instance back.
386             self.failUnlessIn(":3:131073", s)
387             n2 = self.nodemaker.create_from_cap(s)
388
389             self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
390             self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
391             hints = n2._downloader_hints
392             self.failUnlessEqual(hints['k'], 3)
393             self.failUnlessEqual(hints['segsize'], 131073)
394         d.addCallback(_created)
395         return d
396
397
398     def test_create_from_mdmf_readcap(self):
399         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
400         def _created(n):
401             self.failUnless(isinstance(n, MutableFileNode))
402             s = n.get_readonly_uri()
403             n2 = self.nodemaker.create_from_cap(s)
404             self.failUnless(isinstance(n2, MutableFileNode))
405
406             # Check that it's a readonly node
407             self.failUnless(n2.is_readonly())
408         d.addCallback(_created)
409         return d
410
411
412     def test_create_from_mdmf_readcap_with_extensions(self):
413         # We should be able to create an MDMF filenode with the
414         # extension parameters without it breaking.
415         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
416         def _created(n):
417             self.failUnless(isinstance(n, MutableFileNode))
418             s = n.get_readonly_uri()
419             self.failUnlessIn(":3:131073", s)
420
421             n2 = self.nodemaker.create_from_cap(s)
422             self.failUnless(isinstance(n2, MutableFileNode))
423             self.failUnless(n2.is_readonly())
424             self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
425             hints = n2._downloader_hints
426             self.failUnlessEqual(hints["k"], 3)
427             self.failUnlessEqual(hints["segsize"], 131073)
428         d.addCallback(_created)
429         return d
430
431
432     def test_internal_version_from_cap(self):
433         # MutableFileNodes and MutableFileVersions have an internal
434         # switch that tells them whether they're dealing with an SDMF or
435         # MDMF mutable file when they start doing stuff. We want to make
436         # sure that this is set appropriately given an MDMF cap.
437         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
438         def _created(n):
439             self.uri = n.get_uri()
440             self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
441
442             n2 = self.nodemaker.create_from_cap(self.uri)
443             self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
444         d.addCallback(_created)
445         return d
446
447
448     def test_serialize(self):
449         n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
450         calls = []
451         def _callback(*args, **kwargs):
452             self.failUnlessEqual(args, (4,) )
453             self.failUnlessEqual(kwargs, {"foo": 5})
454             calls.append(1)
455             return 6
456         d = n._do_serialized(_callback, 4, foo=5)
457         def _check_callback(res):
458             self.failUnlessEqual(res, 6)
459             self.failUnlessEqual(calls, [1])
460         d.addCallback(_check_callback)
461
462         def _errback():
463             raise ValueError("heya")
464         d.addCallback(lambda res:
465                       self.shouldFail(ValueError, "_check_errback", "heya",
466                                       n._do_serialized, _errback))
467         return d
468
469     def test_upload_and_download(self):
470         d = self.nodemaker.create_mutable_file()
471         def _created(n):
472             d = defer.succeed(None)
473             d.addCallback(lambda res: n.get_servermap(MODE_READ))
474             d.addCallback(lambda smap: smap.dump(StringIO()))
475             d.addCallback(lambda sio:
476                           self.failUnless("3-of-10" in sio.getvalue()))
477             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
478             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
479             d.addCallback(lambda res: n.download_best_version())
480             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
481             d.addCallback(lambda res: n.get_size_of_best_version())
482             d.addCallback(lambda size:
483                           self.failUnlessEqual(size, len("contents 1")))
484             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
485             d.addCallback(lambda res: n.download_best_version())
486             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
487             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
488             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
489             d.addCallback(lambda res: n.download_best_version())
490             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
491             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
492             d.addCallback(lambda smap:
493                           n.download_version(smap,
494                                              smap.best_recoverable_version()))
495             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
496             # test a file that is large enough to overcome the
497             # mapupdate-to-retrieve data caching (i.e. make the shares larger
498             # than the default readsize, which is 2000 bytes). A 15kB file
499             # will have 5kB shares.
500             d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
501             d.addCallback(lambda res: n.download_best_version())
502             d.addCallback(lambda res:
503                           self.failUnlessEqual(res, "large size file" * 1000))
504             return d
505         d.addCallback(_created)
506         return d
507
508
509     def test_upload_and_download_mdmf(self):
510         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
511         def _created(n):
512             d = defer.succeed(None)
513             d.addCallback(lambda ignored:
514                 n.get_servermap(MODE_READ))
515             def _then(servermap):
516                 dumped = servermap.dump(StringIO())
517                 self.failUnlessIn("3-of-10", dumped.getvalue())
518             d.addCallback(_then)
519             # Now overwrite the contents with some new contents. We want 
520             # to make them big enough to force the file to be uploaded
521             # in more than one segment.
522             big_contents = "contents1" * 100000 # about 900 KiB
523             big_contents_uploadable = MutableData(big_contents)
524             d.addCallback(lambda ignored:
525                 n.overwrite(big_contents_uploadable))
526             d.addCallback(lambda ignored:
527                 n.download_best_version())
528             d.addCallback(lambda data:
529                 self.failUnlessEqual(data, big_contents))
530             # Overwrite the contents again with some new contents. As
531             # before, they need to be big enough to force multiple
532             # segments, so that we make the downloader deal with
533             # multiple segments.
534             bigger_contents = "contents2" * 1000000 # about 9MiB 
535             bigger_contents_uploadable = MutableData(bigger_contents)
536             d.addCallback(lambda ignored:
537                 n.overwrite(bigger_contents_uploadable))
538             d.addCallback(lambda ignored:
539                 n.download_best_version())
540             d.addCallback(lambda data:
541                 self.failUnlessEqual(data, bigger_contents))
542             return d
543         d.addCallback(_created)
544         return d
545
546
547     def test_retrieve_pause(self):
548         # We should make sure that the retriever is able to pause
549         # correctly.
550         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
551         def _created(node):
552             self.node = node
553
554             return node.overwrite(MutableData("contents1" * 100000))
555         d.addCallback(_created)
556         # Now we'll retrieve it into a pausing consumer.
557         d.addCallback(lambda ignored:
558             self.node.get_best_mutable_version())
559         def _got_version(version):
560             self.c = PausingConsumer()
561             return version.read(self.c)
562         d.addCallback(_got_version)
563         d.addCallback(lambda ignored:
564             self.failUnlessEqual(self.c.data, "contents1" * 100000))
565         return d
566
567
568     def test_download_from_mdmf_cap(self):
569         # We should be able to download an MDMF file given its cap
570         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
571         def _created(node):
572             self.uri = node.get_uri()
573
574             return node.overwrite(MutableData("contents1" * 100000))
575         def _then(ignored):
576             node = self.nodemaker.create_from_cap(self.uri)
577             return node.download_best_version()
578         def _downloaded(data):
579             self.failUnlessEqual(data, "contents1" * 100000)
580         d.addCallback(_created)
581         d.addCallback(_then)
582         d.addCallback(_downloaded)
583         return d
584
585
586     def test_create_and_download_from_bare_mdmf_cap(self):
587         # MDMF caps have extension parameters on them by default. We
588         # need to make sure that they work without extension parameters.
589         contents = MutableData("contents" * 100000)
590         d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
591                                                contents=contents)
592         def _created(node):
593             uri = node.get_uri()
594             self._created = node
595             self.failUnlessIn(":3:131073", uri)
596             # Now strip that off the end of the uri, then try creating
597             # and downloading the node again.
598             bare_uri = uri.replace(":3:131073", "")
599             assert ":3:131073" not in bare_uri
600
601             return self.nodemaker.create_from_cap(bare_uri)
602         d.addCallback(_created)
603         def _created_bare(node):
604             self.failUnlessEqual(node.get_writekey(),
605                                  self._created.get_writekey())
606             self.failUnlessEqual(node.get_readkey(),
607                                  self._created.get_readkey())
608             self.failUnlessEqual(node.get_storage_index(),
609                                  self._created.get_storage_index())
610             return node.download_best_version()
611         d.addCallback(_created_bare)
612         d.addCallback(lambda data:
613             self.failUnlessEqual(data, "contents" * 100000))
614         return d
615
616
617     def test_mdmf_write_count(self):
618         # Publishing an MDMF file should only cause one write for each
619         # share that is to be published. Otherwise, we introduce
620         # undesirable semantics that are a regression from SDMF
621         upload = MutableData("MDMF" * 100000) # about 400 KiB
622         d = self.nodemaker.create_mutable_file(upload,
623                                                version=MDMF_VERSION)
624         def _check_server_write_counts(ignored):
625             sb = self.nodemaker.storage_broker
626             for server in sb.servers.itervalues():
627                 self.failUnlessEqual(server.get_rref().queries, 1)
628         d.addCallback(_check_server_write_counts)
629         return d
630
631
632     def test_create_with_initial_contents(self):
633         upload1 = MutableData("contents 1")
634         d = self.nodemaker.create_mutable_file(upload1)
635         def _created(n):
636             d = n.download_best_version()
637             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
638             upload2 = MutableData("contents 2")
639             d.addCallback(lambda res: n.overwrite(upload2))
640             d.addCallback(lambda res: n.download_best_version())
641             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
642             return d
643         d.addCallback(_created)
644         return d
645
646
647     def test_create_mdmf_with_initial_contents(self):
648         initial_contents = "foobarbaz" * 131072 # 900KiB
649         initial_contents_uploadable = MutableData(initial_contents)
650         d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
651                                                version=MDMF_VERSION)
652         def _created(n):
653             d = n.download_best_version()
654             d.addCallback(lambda data:
655                 self.failUnlessEqual(data, initial_contents))
656             uploadable2 = MutableData(initial_contents + "foobarbaz")
657             d.addCallback(lambda ignored:
658                 n.overwrite(uploadable2))
659             d.addCallback(lambda ignored:
660                 n.download_best_version())
661             d.addCallback(lambda data:
662                 self.failUnlessEqual(data, initial_contents +
663                                            "foobarbaz"))
664             return d
665         d.addCallback(_created)
666         return d
667
668
669     def test_response_cache_memory_leak(self):
670         d = self.nodemaker.create_mutable_file("contents")
671         def _created(n):
672             d = n.download_best_version()
673             d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
674             d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
675
676             def _check_cache(expected):
677                 # The total size of cache entries should not increase on the second download;
678                 # in fact the cache contents should be identical.
679                 d2 = n.download_best_version()
680                 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
681                 return d2
682             d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
683             return d
684         d.addCallback(_created)
685         return d
686
687     def test_create_with_initial_contents_function(self):
688         data = "initial contents"
689         def _make_contents(n):
690             self.failUnless(isinstance(n, MutableFileNode))
691             key = n.get_writekey()
692             self.failUnless(isinstance(key, str), key)
693             self.failUnlessEqual(len(key), 16) # AES key size
694             return MutableData(data)
695         d = self.nodemaker.create_mutable_file(_make_contents)
696         def _created(n):
697             return n.download_best_version()
698         d.addCallback(_created)
699         d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
700         return d
701
702
703     def test_create_mdmf_with_initial_contents_function(self):
704         data = "initial contents" * 100000
705         def _make_contents(n):
706             self.failUnless(isinstance(n, MutableFileNode))
707             key = n.get_writekey()
708             self.failUnless(isinstance(key, str), key)
709             self.failUnlessEqual(len(key), 16)
710             return MutableData(data)
711         d = self.nodemaker.create_mutable_file(_make_contents,
712                                                version=MDMF_VERSION)
713         d.addCallback(lambda n:
714             n.download_best_version())
715         d.addCallback(lambda data2:
716             self.failUnlessEqual(data2, data))
717         return d
718
719
720     def test_create_with_too_large_contents(self):
721         BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
722         BIG_uploadable = MutableData(BIG)
723         d = self.nodemaker.create_mutable_file(BIG_uploadable)
724         def _created(n):
725             other_BIG_uploadable = MutableData(BIG)
726             d = n.overwrite(other_BIG_uploadable)
727             return d
728         d.addCallback(_created)
729         return d
730
731     def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
732         d = n.get_servermap(MODE_READ)
733         d.addCallback(lambda servermap: servermap.best_recoverable_version())
734         d.addCallback(lambda verinfo:
735                       self.failUnlessEqual(verinfo[0], expected_seqnum, which))
736         return d
737
738     def test_modify(self):
739         def _modifier(old_contents, servermap, first_time):
740             new_contents = old_contents + "line2"
741             return new_contents
742         def _non_modifier(old_contents, servermap, first_time):
743             return old_contents
744         def _none_modifier(old_contents, servermap, first_time):
745             return None
746         def _error_modifier(old_contents, servermap, first_time):
747             raise ValueError("oops")
748         def _toobig_modifier(old_contents, servermap, first_time):
749             new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
750             return new_content
751         calls = []
752         def _ucw_error_modifier(old_contents, servermap, first_time):
753             # simulate an UncoordinatedWriteError once
754             calls.append(1)
755             if len(calls) <= 1:
756                 raise UncoordinatedWriteError("simulated")
757             new_contents = old_contents + "line3"
758             return new_contents
759         def _ucw_error_non_modifier(old_contents, servermap, first_time):
760             # simulate an UncoordinatedWriteError once, and don't actually
761             # modify the contents on subsequent invocations
762             calls.append(1)
763             if len(calls) <= 1:
764                 raise UncoordinatedWriteError("simulated")
765             return old_contents
766
767         initial_contents = "line1"
768         d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
769         def _created(n):
770             d = n.modify(_modifier)
771             d.addCallback(lambda res: n.download_best_version())
772             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
773             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
774
775             d.addCallback(lambda res: n.modify(_non_modifier))
776             d.addCallback(lambda res: n.download_best_version())
777             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
778             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
779
780             d.addCallback(lambda res: n.modify(_none_modifier))
781             d.addCallback(lambda res: n.download_best_version())
782             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
783             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
784
785             d.addCallback(lambda res:
786                           self.shouldFail(ValueError, "error_modifier", None,
787                                           n.modify, _error_modifier))
788             d.addCallback(lambda res: n.download_best_version())
789             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
790             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
791
792
793             d.addCallback(lambda res: n.download_best_version())
794             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
795             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
796
797             d.addCallback(lambda res: n.modify(_ucw_error_modifier))
798             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
799             d.addCallback(lambda res: n.download_best_version())
800             d.addCallback(lambda res: self.failUnlessEqual(res,
801                                                            "line1line2line3"))
802             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
803
804             def _reset_ucw_error_modifier(res):
805                 calls[:] = []
806                 return res
807             d.addCallback(_reset_ucw_error_modifier)
808
809             # in practice, this n.modify call should publish twice: the first
810             # one gets a UCWE, the second does not. But our test jig (in
811             # which the modifier raises the UCWE) skips over the first one,
812             # so in this test there will be only one publish, and the seqnum
813             # will only be one larger than the previous test, not two (i.e. 4
814             # instead of 5).
815             d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
816             d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
817             d.addCallback(lambda res: n.download_best_version())
818             d.addCallback(lambda res: self.failUnlessEqual(res,
819                                                            "line1line2line3"))
820             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
821             d.addCallback(lambda res: n.modify(_toobig_modifier))
822             return d
823         d.addCallback(_created)
824         return d
825
826
827     def test_modify_backoffer(self):
828         def _modifier(old_contents, servermap, first_time):
829             return old_contents + "line2"
830         calls = []
831         def _ucw_error_modifier(old_contents, servermap, first_time):
832             # simulate an UncoordinatedWriteError once
833             calls.append(1)
834             if len(calls) <= 1:
835                 raise UncoordinatedWriteError("simulated")
836             return old_contents + "line3"
837         def _always_ucw_error_modifier(old_contents, servermap, first_time):
838             raise UncoordinatedWriteError("simulated")
839         def _backoff_stopper(node, f):
840             return f
841         def _backoff_pauser(node, f):
842             d = defer.Deferred()
843             reactor.callLater(0.5, d.callback, None)
844             return d
845
846         # the give-up-er will hit its maximum retry count quickly
847         giveuper = BackoffAgent()
848         giveuper._delay = 0.1
849         giveuper.factor = 1
850
851         d = self.nodemaker.create_mutable_file(MutableData("line1"))
852         def _created(n):
853             d = n.modify(_modifier)
854             d.addCallback(lambda res: n.download_best_version())
855             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
856             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
857
858             d.addCallback(lambda res:
859                           self.shouldFail(UncoordinatedWriteError,
860                                           "_backoff_stopper", None,
861                                           n.modify, _ucw_error_modifier,
862                                           _backoff_stopper))
863             d.addCallback(lambda res: n.download_best_version())
864             d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
865             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
866
867             def _reset_ucw_error_modifier(res):
868                 calls[:] = []
869                 return res
870             d.addCallback(_reset_ucw_error_modifier)
871             d.addCallback(lambda res: n.modify(_ucw_error_modifier,
872                                                _backoff_pauser))
873             d.addCallback(lambda res: n.download_best_version())
874             d.addCallback(lambda res: self.failUnlessEqual(res,
875                                                            "line1line2line3"))
876             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
877
878             d.addCallback(lambda res:
879                           self.shouldFail(UncoordinatedWriteError,
880                                           "giveuper", None,
881                                           n.modify, _always_ucw_error_modifier,
882                                           giveuper.delay))
883             d.addCallback(lambda res: n.download_best_version())
884             d.addCallback(lambda res: self.failUnlessEqual(res,
885                                                            "line1line2line3"))
886             d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
887
888             return d
889         d.addCallback(_created)
890         return d
891
892     def test_upload_and_download_full_size_keys(self):
893         self.nodemaker.key_generator = client.KeyGenerator()
894         d = self.nodemaker.create_mutable_file()
895         def _created(n):
896             d = defer.succeed(None)
897             d.addCallback(lambda res: n.get_servermap(MODE_READ))
898             d.addCallback(lambda smap: smap.dump(StringIO()))
899             d.addCallback(lambda sio:
900                           self.failUnless("3-of-10" in sio.getvalue()))
901             d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
902             d.addCallback(lambda res: self.failUnlessIdentical(res, None))
903             d.addCallback(lambda res: n.download_best_version())
904             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
905             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
906             d.addCallback(lambda res: n.download_best_version())
907             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
908             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
909             d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
910             d.addCallback(lambda res: n.download_best_version())
911             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
912             d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
913             d.addCallback(lambda smap:
914                           n.download_version(smap,
915                                              smap.best_recoverable_version()))
916             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
917             return d
918         d.addCallback(_created)
919         return d
920
921
922     def test_size_after_servermap_update(self):
923         # a mutable file node should have something to say about how big
924         # it is after a servermap update is performed, since this tells
925         # us how large the best version of that mutable file is.
926         d = self.nodemaker.create_mutable_file()
927         def _created(n):
928             self.n = n
929             return n.get_servermap(MODE_READ)
930         d.addCallback(_created)
931         d.addCallback(lambda ignored:
932             self.failUnlessEqual(self.n.get_size(), 0))
933         d.addCallback(lambda ignored:
934             self.n.overwrite(MutableData("foobarbaz")))
935         d.addCallback(lambda ignored:
936             self.failUnlessEqual(self.n.get_size(), 9))
937         d.addCallback(lambda ignored:
938             self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
939         d.addCallback(_created)
940         d.addCallback(lambda ignored:
941             self.failUnlessEqual(self.n.get_size(), 9))
942         return d
943
944
945 class PublishMixin:
946     def publish_one(self):
947         # publish a file and create shares, which can then be manipulated
948         # later.
949         self.CONTENTS = "New contents go here" * 1000
950         self.uploadable = MutableData(self.CONTENTS)
951         self._storage = FakeStorage()
952         self._nodemaker = make_nodemaker(self._storage)
953         self._storage_broker = self._nodemaker.storage_broker
954         d = self._nodemaker.create_mutable_file(self.uploadable)
955         def _created(node):
956             self._fn = node
957             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
958         d.addCallback(_created)
959         return d
960
961     def publish_mdmf(self):
962         # like publish_one, except that the result is guaranteed to be
963         # an MDMF file.
964         # self.CONTENTS should have more than one segment.
965         self.CONTENTS = "This is an MDMF file" * 100000
966         self.uploadable = MutableData(self.CONTENTS)
967         self._storage = FakeStorage()
968         self._nodemaker = make_nodemaker(self._storage)
969         self._storage_broker = self._nodemaker.storage_broker
970         d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
971         def _created(node):
972             self._fn = node
973             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
974         d.addCallback(_created)
975         return d
976
977
978     def publish_sdmf(self):
979         # like publish_one, except that the result is guaranteed to be
980         # an SDMF file
981         self.CONTENTS = "This is an SDMF file" * 1000
982         self.uploadable = MutableData(self.CONTENTS)
983         self._storage = FakeStorage()
984         self._nodemaker = make_nodemaker(self._storage)
985         self._storage_broker = self._nodemaker.storage_broker
986         d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
987         def _created(node):
988             self._fn = node
989             self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
990         d.addCallback(_created)
991         return d
992
993
994     def publish_multiple(self, version=0):
995         self.CONTENTS = ["Contents 0",
996                          "Contents 1",
997                          "Contents 2",
998                          "Contents 3a",
999                          "Contents 3b"]
1000         self.uploadables = [MutableData(d) for d in self.CONTENTS]
1001         self._copied_shares = {}
1002         self._storage = FakeStorage()
1003         self._nodemaker = make_nodemaker(self._storage)
1004         d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1005         def _created(node):
1006             self._fn = node
1007             # now create multiple versions of the same file, and accumulate
1008             # their shares, so we can mix and match them later.
1009             d = defer.succeed(None)
1010             d.addCallback(self._copy_shares, 0)
1011             d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1012             d.addCallback(self._copy_shares, 1)
1013             d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1014             d.addCallback(self._copy_shares, 2)
1015             d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1016             d.addCallback(self._copy_shares, 3)
1017             # now we replace all the shares with version s3, and upload a new
1018             # version to get s4b.
1019             rollback = dict([(i,2) for i in range(10)])
1020             d.addCallback(lambda res: self._set_versions(rollback))
1021             d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1022             d.addCallback(self._copy_shares, 4)
1023             # we leave the storage in state 4
1024             return d
1025         d.addCallback(_created)
1026         return d
1027
1028
1029     def _copy_shares(self, ignored, index):
1030         shares = self._storage._peers
1031         # we need a deep copy
1032         new_shares = {}
1033         for peerid in shares:
1034             new_shares[peerid] = {}
1035             for shnum in shares[peerid]:
1036                 new_shares[peerid][shnum] = shares[peerid][shnum]
1037         self._copied_shares[index] = new_shares
1038
1039     def _set_versions(self, versionmap):
1040         # versionmap maps shnums to which version (0,1,2,3,4) we want the
1041         # share to be at. Any shnum which is left out of the map will stay at
1042         # its current version.
1043         shares = self._storage._peers
1044         oldshares = self._copied_shares
1045         for peerid in shares:
1046             for shnum in shares[peerid]:
1047                 if shnum in versionmap:
1048                     index = versionmap[shnum]
1049                     shares[peerid][shnum] = oldshares[index][peerid][shnum]
1050
1051 class PausingConsumer:
1052     implements(IConsumer)
1053     def __init__(self):
1054         self.data = ""
1055         self.already_paused = False
1056
1057     def registerProducer(self, producer, streaming):
1058         self.producer = producer
1059         self.producer.resumeProducing()
1060
1061     def unregisterProducer(self):
1062         self.producer = None
1063
1064     def _unpause(self, ignored):
1065         self.producer.resumeProducing()
1066
1067     def write(self, data):
1068         self.data += data
1069         if not self.already_paused:
1070            self.producer.pauseProducing()
1071            self.already_paused = True
1072            reactor.callLater(15, self._unpause, None)
1073
1074
1075 class Servermap(unittest.TestCase, PublishMixin):
1076     def setUp(self):
1077         return self.publish_one()
1078
1079     def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1080                        update_range=None):
1081         if fn is None:
1082             fn = self._fn
1083         if sb is None:
1084             sb = self._storage_broker
1085         smu = ServermapUpdater(fn, sb, Monitor(),
1086                                ServerMap(), mode, update_range=update_range)
1087         d = smu.update()
1088         return d
1089
1090     def update_servermap(self, oldmap, mode=MODE_CHECK):
1091         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1092                                oldmap, mode)
1093         d = smu.update()
1094         return d
1095
1096     def failUnlessOneRecoverable(self, sm, num_shares):
1097         self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1098         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1099         best = sm.best_recoverable_version()
1100         self.failIfEqual(best, None)
1101         self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1102         self.failUnlessEqual(len(sm.shares_available()), 1)
1103         self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1104         shnum, peerids = sm.make_sharemap().items()[0]
1105         peerid = list(peerids)[0]
1106         self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1107         self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1108         return sm
1109
1110     def test_basic(self):
1111         d = defer.succeed(None)
1112         ms = self.make_servermap
1113         us = self.update_servermap
1114
1115         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1116         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1117         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1118         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1119         d.addCallback(lambda res: ms(mode=MODE_READ))
1120         # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1121         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1122         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1123         # this mode stops at 'k' shares
1124         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1125
1126         # and can we re-use the same servermap? Note that these are sorted in
1127         # increasing order of number of servers queried, since once a server
1128         # gets into the servermap, we'll always ask it for an update.
1129         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1130         d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1131         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1132         d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1133         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1134         d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1135         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1136         d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1137         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1138
1139         return d
1140
1141     def test_fetch_privkey(self):
1142         d = defer.succeed(None)
1143         # use the sibling filenode (which hasn't been used yet), and make
1144         # sure it can fetch the privkey. The file is small, so the privkey
1145         # will be fetched on the first (query) pass.
1146         d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1147         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1148
1149         # create a new file, which is large enough to knock the privkey out
1150         # of the early part of the file
1151         LARGE = "These are Larger contents" * 200 # about 5KB
1152         LARGE_uploadable = MutableData(LARGE)
1153         d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1154         def _created(large_fn):
1155             large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1156             return self.make_servermap(MODE_WRITE, large_fn2)
1157         d.addCallback(_created)
1158         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1159         return d
1160
1161
1162     def test_mark_bad(self):
1163         d = defer.succeed(None)
1164         ms = self.make_servermap
1165
1166         d.addCallback(lambda res: ms(mode=MODE_READ))
1167         d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1168         def _made_map(sm):
1169             v = sm.best_recoverable_version()
1170             vm = sm.make_versionmap()
1171             shares = list(vm[v])
1172             self.failUnlessEqual(len(shares), 6)
1173             self._corrupted = set()
1174             # mark the first 5 shares as corrupt, then update the servermap.
1175             # The map should not have the marked shares it in any more, and
1176             # new shares should be found to replace the missing ones.
1177             for (shnum, peerid, timestamp) in shares:
1178                 if shnum < 5:
1179                     self._corrupted.add( (peerid, shnum) )
1180                     sm.mark_bad_share(peerid, shnum, "")
1181             return self.update_servermap(sm, MODE_WRITE)
1182         d.addCallback(_made_map)
1183         def _check_map(sm):
1184             # this should find all 5 shares that weren't marked bad
1185             v = sm.best_recoverable_version()
1186             vm = sm.make_versionmap()
1187             shares = list(vm[v])
1188             for (peerid, shnum) in self._corrupted:
1189                 peer_shares = sm.shares_on_peer(peerid)
1190                 self.failIf(shnum in peer_shares,
1191                             "%d was in %s" % (shnum, peer_shares))
1192             self.failUnlessEqual(len(shares), 5)
1193         d.addCallback(_check_map)
1194         return d
1195
1196     def failUnlessNoneRecoverable(self, sm):
1197         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1198         self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1199         best = sm.best_recoverable_version()
1200         self.failUnlessEqual(best, None)
1201         self.failUnlessEqual(len(sm.shares_available()), 0)
1202
1203     def test_no_shares(self):
1204         self._storage._peers = {} # delete all shares
1205         ms = self.make_servermap
1206         d = defer.succeed(None)
1207 #
1208         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1209         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1210
1211         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1212         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1213
1214         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1215         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1216
1217         d.addCallback(lambda res: ms(mode=MODE_READ))
1218         d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1219
1220         return d
1221
1222     def failUnlessNotQuiteEnough(self, sm):
1223         self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1224         self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1225         best = sm.best_recoverable_version()
1226         self.failUnlessEqual(best, None)
1227         self.failUnlessEqual(len(sm.shares_available()), 1)
1228         self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1229         return sm
1230
1231     def test_not_quite_enough_shares(self):
1232         s = self._storage
1233         ms = self.make_servermap
1234         num_shares = len(s._peers)
1235         for peerid in s._peers:
1236             s._peers[peerid] = {}
1237             num_shares -= 1
1238             if num_shares == 2:
1239                 break
1240         # now there ought to be only two shares left
1241         assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1242
1243         d = defer.succeed(None)
1244
1245         d.addCallback(lambda res: ms(mode=MODE_CHECK))
1246         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1247         d.addCallback(lambda sm:
1248                       self.failUnlessEqual(len(sm.make_sharemap()), 2))
1249         d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1250         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1251         d.addCallback(lambda res: ms(mode=MODE_WRITE))
1252         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1253         d.addCallback(lambda res: ms(mode=MODE_READ))
1254         d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1255
1256         return d
1257
1258
1259     def test_servermapupdater_finds_mdmf_files(self):
1260         # setUp already published an MDMF file for us. We just need to
1261         # make sure that when we run the ServermapUpdater, the file is
1262         # reported to have one recoverable version.
1263         d = defer.succeed(None)
1264         d.addCallback(lambda ignored:
1265             self.publish_mdmf())
1266         d.addCallback(lambda ignored:
1267             self.make_servermap(mode=MODE_CHECK))
1268         # Calling make_servermap also updates the servermap in the mode
1269         # that we specify, so we just need to see what it says.
1270         def _check_servermap(sm):
1271             self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1272         d.addCallback(_check_servermap)
1273         return d
1274
1275
1276     def test_fetch_update(self):
1277         d = defer.succeed(None)
1278         d.addCallback(lambda ignored:
1279             self.publish_mdmf())
1280         d.addCallback(lambda ignored:
1281             self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1282         def _check_servermap(sm):
1283             # 10 shares
1284             self.failUnlessEqual(len(sm.update_data), 10)
1285             # one version
1286             for data in sm.update_data.itervalues():
1287                 self.failUnlessEqual(len(data), 1)
1288         d.addCallback(_check_servermap)
1289         return d
1290
1291
1292     def test_servermapupdater_finds_sdmf_files(self):
1293         d = defer.succeed(None)
1294         d.addCallback(lambda ignored:
1295             self.publish_sdmf())
1296         d.addCallback(lambda ignored:
1297             self.make_servermap(mode=MODE_CHECK))
1298         d.addCallback(lambda servermap:
1299             self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1300         return d
1301
1302
1303 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1304     def setUp(self):
1305         return self.publish_one()
1306
1307     def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1308         if oldmap is None:
1309             oldmap = ServerMap()
1310         if sb is None:
1311             sb = self._storage_broker
1312         smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1313         d = smu.update()
1314         return d
1315
1316     def abbrev_verinfo(self, verinfo):
1317         if verinfo is None:
1318             return None
1319         (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1320          offsets_tuple) = verinfo
1321         return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1322
1323     def abbrev_verinfo_dict(self, verinfo_d):
1324         output = {}
1325         for verinfo,value in verinfo_d.items():
1326             (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1327              offsets_tuple) = verinfo
1328             output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1329         return output
1330
1331     def dump_servermap(self, servermap):
1332         print "SERVERMAP", servermap
1333         print "RECOVERABLE", [self.abbrev_verinfo(v)
1334                               for v in servermap.recoverable_versions()]
1335         print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1336         print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1337
1338     def do_download(self, servermap, version=None):
1339         if version is None:
1340             version = servermap.best_recoverable_version()
1341         r = Retrieve(self._fn, servermap, version)
1342         c = consumer.MemoryConsumer()
1343         d = r.download(consumer=c)
1344         d.addCallback(lambda mc: "".join(mc.chunks))
1345         return d
1346
1347
1348     def test_basic(self):
1349         d = self.make_servermap()
1350         def _do_retrieve(servermap):
1351             self._smap = servermap
1352             #self.dump_servermap(servermap)
1353             self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1354             return self.do_download(servermap)
1355         d.addCallback(_do_retrieve)
1356         def _retrieved(new_contents):
1357             self.failUnlessEqual(new_contents, self.CONTENTS)
1358         d.addCallback(_retrieved)
1359         # we should be able to re-use the same servermap, both with and
1360         # without updating it.
1361         d.addCallback(lambda res: self.do_download(self._smap))
1362         d.addCallback(_retrieved)
1363         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1364         d.addCallback(lambda res: self.do_download(self._smap))
1365         d.addCallback(_retrieved)
1366         # clobbering the pubkey should make the servermap updater re-fetch it
1367         def _clobber_pubkey(res):
1368             self._fn._pubkey = None
1369         d.addCallback(_clobber_pubkey)
1370         d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1371         d.addCallback(lambda res: self.do_download(self._smap))
1372         d.addCallback(_retrieved)
1373         return d
1374
1375     def test_all_shares_vanished(self):
1376         d = self.make_servermap()
1377         def _remove_shares(servermap):
1378             for shares in self._storage._peers.values():
1379                 shares.clear()
1380             d1 = self.shouldFail(NotEnoughSharesError,
1381                                  "test_all_shares_vanished",
1382                                  "ran out of peers",
1383                                  self.do_download, servermap)
1384             return d1
1385         d.addCallback(_remove_shares)
1386         return d
1387
1388     def test_no_servers(self):
1389         sb2 = make_storagebroker(num_peers=0)
1390         # if there are no servers, then a MODE_READ servermap should come
1391         # back empty
1392         d = self.make_servermap(sb=sb2)
1393         def _check_servermap(servermap):
1394             self.failUnlessEqual(servermap.best_recoverable_version(), None)
1395             self.failIf(servermap.recoverable_versions())
1396             self.failIf(servermap.unrecoverable_versions())
1397             self.failIf(servermap.all_peers())
1398         d.addCallback(_check_servermap)
1399         return d
1400
1401     def test_no_servers_download(self):
1402         sb2 = make_storagebroker(num_peers=0)
1403         self._fn._storage_broker = sb2
1404         d = self.shouldFail(UnrecoverableFileError,
1405                             "test_no_servers_download",
1406                             "no recoverable versions",
1407                             self._fn.download_best_version)
1408         def _restore(res):
1409             # a failed download that occurs while we aren't connected to
1410             # anybody should not prevent a subsequent download from working.
1411             # This isn't quite the webapi-driven test that #463 wants, but it
1412             # should be close enough.
1413             self._fn._storage_broker = self._storage_broker
1414             return self._fn.download_best_version()
1415         def _retrieved(new_contents):
1416             self.failUnlessEqual(new_contents, self.CONTENTS)
1417         d.addCallback(_restore)
1418         d.addCallback(_retrieved)
1419         return d
1420
1421
1422     def _test_corrupt_all(self, offset, substring,
1423                           should_succeed=False,
1424                           corrupt_early=True,
1425                           failure_checker=None,
1426                           fetch_privkey=False):
1427         d = defer.succeed(None)
1428         if corrupt_early:
1429             d.addCallback(corrupt, self._storage, offset)
1430         d.addCallback(lambda res: self.make_servermap())
1431         if not corrupt_early:
1432             d.addCallback(corrupt, self._storage, offset)
1433         def _do_retrieve(servermap):
1434             ver = servermap.best_recoverable_version()
1435             if ver is None and not should_succeed:
1436                 # no recoverable versions == not succeeding. The problem
1437                 # should be noted in the servermap's list of problems.
1438                 if substring:
1439                     allproblems = [str(f) for f in servermap.problems]
1440                     self.failUnlessIn(substring, "".join(allproblems))
1441                 return servermap
1442             if should_succeed:
1443                 d1 = self._fn.download_version(servermap, ver,
1444                                                fetch_privkey)
1445                 d1.addCallback(lambda new_contents:
1446                                self.failUnlessEqual(new_contents, self.CONTENTS))
1447             else:
1448                 d1 = self.shouldFail(NotEnoughSharesError,
1449                                      "_corrupt_all(offset=%s)" % (offset,),
1450                                      substring,
1451                                      self._fn.download_version, servermap,
1452                                                                 ver,
1453                                                                 fetch_privkey)
1454             if failure_checker:
1455                 d1.addCallback(failure_checker)
1456             d1.addCallback(lambda res: servermap)
1457             return d1
1458         d.addCallback(_do_retrieve)
1459         return d
1460
1461     def test_corrupt_all_verbyte(self):
1462         # when the version byte is not 0 or 1, we hit an UnknownVersionError
1463         # error in unpack_share().
1464         d = self._test_corrupt_all(0, "UnknownVersionError")
1465         def _check_servermap(servermap):
1466             # and the dump should mention the problems
1467             s = StringIO()
1468             dump = servermap.dump(s).getvalue()
1469             self.failUnless("30 PROBLEMS" in dump, dump)
1470         d.addCallback(_check_servermap)
1471         return d
1472
1473     def test_corrupt_all_seqnum(self):
1474         # a corrupt sequence number will trigger a bad signature
1475         return self._test_corrupt_all(1, "signature is invalid")
1476
1477     def test_corrupt_all_R(self):
1478         # a corrupt root hash will trigger a bad signature
1479         return self._test_corrupt_all(9, "signature is invalid")
1480
1481     def test_corrupt_all_IV(self):
1482         # a corrupt salt/IV will trigger a bad signature
1483         return self._test_corrupt_all(41, "signature is invalid")
1484
1485     def test_corrupt_all_k(self):
1486         # a corrupt 'k' will trigger a bad signature
1487         return self._test_corrupt_all(57, "signature is invalid")
1488
1489     def test_corrupt_all_N(self):
1490         # a corrupt 'N' will trigger a bad signature
1491         return self._test_corrupt_all(58, "signature is invalid")
1492
1493     def test_corrupt_all_segsize(self):
1494         # a corrupt segsize will trigger a bad signature
1495         return self._test_corrupt_all(59, "signature is invalid")
1496
1497     def test_corrupt_all_datalen(self):
1498         # a corrupt data length will trigger a bad signature
1499         return self._test_corrupt_all(67, "signature is invalid")
1500
1501     def test_corrupt_all_pubkey(self):
1502         # a corrupt pubkey won't match the URI's fingerprint. We need to
1503         # remove the pubkey from the filenode, or else it won't bother trying
1504         # to update it.
1505         self._fn._pubkey = None
1506         return self._test_corrupt_all("pubkey",
1507                                       "pubkey doesn't match fingerprint")
1508
1509     def test_corrupt_all_sig(self):
1510         # a corrupt signature is a bad one
1511         # the signature runs from about [543:799], depending upon the length
1512         # of the pubkey
1513         return self._test_corrupt_all("signature", "signature is invalid")
1514
1515     def test_corrupt_all_share_hash_chain_number(self):
1516         # a corrupt share hash chain entry will show up as a bad hash. If we
1517         # mangle the first byte, that will look like a bad hash number,
1518         # causing an IndexError
1519         return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1520
1521     def test_corrupt_all_share_hash_chain_hash(self):
1522         # a corrupt share hash chain entry will show up as a bad hash. If we
1523         # mangle a few bytes in, that will look like a bad hash.
1524         return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1525
1526     def test_corrupt_all_block_hash_tree(self):
1527         return self._test_corrupt_all("block_hash_tree",
1528                                       "block hash tree failure")
1529
1530     def test_corrupt_all_block(self):
1531         return self._test_corrupt_all("share_data", "block hash tree failure")
1532
1533     def test_corrupt_all_encprivkey(self):
1534         # a corrupted privkey won't even be noticed by the reader, only by a
1535         # writer.
1536         return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1537
1538
1539     def test_corrupt_all_encprivkey_late(self):
1540         # this should work for the same reason as above, but we corrupt 
1541         # after the servermap update to exercise the error handling
1542         # code.
1543         # We need to remove the privkey from the node, or the retrieve
1544         # process won't know to update it.
1545         self._fn._privkey = None
1546         return self._test_corrupt_all("enc_privkey",
1547                                       None, # this shouldn't fail
1548                                       should_succeed=True,
1549                                       corrupt_early=False,
1550                                       fetch_privkey=True)
1551
1552
1553     # disabled until retrieve tests checkstring on each blockfetch. I didn't
1554     # just use a .todo because the failing-but-ignored test emits about 30kB
1555     # of noise.
1556     def OFF_test_corrupt_all_seqnum_late(self):
1557         # corrupting the seqnum between mapupdate and retrieve should result
1558         # in NotEnoughSharesError, since each share will look invalid
1559         def _check(res):
1560             f = res[0]
1561             self.failUnless(f.check(NotEnoughSharesError))
1562             self.failUnless("uncoordinated write" in str(f))
1563         return self._test_corrupt_all(1, "ran out of peers",
1564                                       corrupt_early=False,
1565                                       failure_checker=_check)
1566
1567     def test_corrupt_all_block_hash_tree_late(self):
1568         def _check(res):
1569             f = res[0]
1570             self.failUnless(f.check(NotEnoughSharesError))
1571         return self._test_corrupt_all("block_hash_tree",
1572                                       "block hash tree failure",
1573                                       corrupt_early=False,
1574                                       failure_checker=_check)
1575
1576
1577     def test_corrupt_all_block_late(self):
1578         def _check(res):
1579             f = res[0]
1580             self.failUnless(f.check(NotEnoughSharesError))
1581         return self._test_corrupt_all("share_data", "block hash tree failure",
1582                                       corrupt_early=False,
1583                                       failure_checker=_check)
1584
1585
1586     def test_basic_pubkey_at_end(self):
1587         # we corrupt the pubkey in all but the last 'k' shares, allowing the
1588         # download to succeed but forcing a bunch of retries first. Note that
1589         # this is rather pessimistic: our Retrieve process will throw away
1590         # the whole share if the pubkey is bad, even though the rest of the
1591         # share might be good.
1592
1593         self._fn._pubkey = None
1594         k = self._fn.get_required_shares()
1595         N = self._fn.get_total_shares()
1596         d = defer.succeed(None)
1597         d.addCallback(corrupt, self._storage, "pubkey",
1598                       shnums_to_corrupt=range(0, N-k))
1599         d.addCallback(lambda res: self.make_servermap())
1600         def _do_retrieve(servermap):
1601             self.failUnless(servermap.problems)
1602             self.failUnless("pubkey doesn't match fingerprint"
1603                             in str(servermap.problems[0]))
1604             ver = servermap.best_recoverable_version()
1605             r = Retrieve(self._fn, servermap, ver)
1606             c = consumer.MemoryConsumer()
1607             return r.download(c)
1608         d.addCallback(_do_retrieve)
1609         d.addCallback(lambda mc: "".join(mc.chunks))
1610         d.addCallback(lambda new_contents:
1611                       self.failUnlessEqual(new_contents, self.CONTENTS))
1612         return d
1613
1614
1615     def _test_corrupt_some(self, offset, mdmf=False):
1616         if mdmf:
1617             d = self.publish_mdmf()
1618         else:
1619             d = defer.succeed(None)
1620         d.addCallback(lambda ignored:
1621             corrupt(None, self._storage, offset, range(5)))
1622         d.addCallback(lambda ignored:
1623             self.make_servermap())
1624         def _do_retrieve(servermap):
1625             ver = servermap.best_recoverable_version()
1626             self.failUnless(ver)
1627             return self._fn.download_best_version()
1628         d.addCallback(_do_retrieve)
1629         d.addCallback(lambda new_contents:
1630             self.failUnlessEqual(new_contents, self.CONTENTS))
1631         return d
1632
1633
1634     def test_corrupt_some(self):
1635         # corrupt the data of first five shares (so the servermap thinks
1636         # they're good but retrieve marks them as bad), so that the
1637         # MODE_READ set of 6 will be insufficient, forcing node.download to
1638         # retry with more servers.
1639         return self._test_corrupt_some("share_data")
1640
1641
1642     def test_download_fails(self):
1643         d = corrupt(None, self._storage, "signature")
1644         d.addCallback(lambda ignored:
1645             self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1646                             "no recoverable versions",
1647                             self._fn.download_best_version))
1648         return d
1649
1650
1651
1652     def test_corrupt_mdmf_block_hash_tree(self):
1653         d = self.publish_mdmf()
1654         d.addCallback(lambda ignored:
1655             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1656                                    "block hash tree failure",
1657                                    corrupt_early=False,
1658                                    should_succeed=False))
1659         return d
1660
1661
1662     def test_corrupt_mdmf_block_hash_tree_late(self):
1663         d = self.publish_mdmf()
1664         d.addCallback(lambda ignored:
1665             self._test_corrupt_all(("block_hash_tree", 12 * 32),
1666                                    "block hash tree failure",
1667                                    corrupt_early=True,
1668                                    should_succeed=False))
1669         return d
1670
1671
1672     def test_corrupt_mdmf_share_data(self):
1673         d = self.publish_mdmf()
1674         d.addCallback(lambda ignored:
1675             # TODO: Find out what the block size is and corrupt a
1676             # specific block, rather than just guessing.
1677             self._test_corrupt_all(("share_data", 12 * 40),
1678                                     "block hash tree failure",
1679                                     corrupt_early=True,
1680                                     should_succeed=False))
1681         return d
1682
1683
1684     def test_corrupt_some_mdmf(self):
1685         return self._test_corrupt_some(("share_data", 12 * 40),
1686                                        mdmf=True)
1687
1688
1689 class CheckerMixin:
1690     def check_good(self, r, where):
1691         self.failUnless(r.is_healthy(), where)
1692         return r
1693
1694     def check_bad(self, r, where):
1695         self.failIf(r.is_healthy(), where)
1696         return r
1697
1698     def check_expected_failure(self, r, expected_exception, substring, where):
1699         for (peerid, storage_index, shnum, f) in r.problems:
1700             if f.check(expected_exception):
1701                 self.failUnless(substring in str(f),
1702                                 "%s: substring '%s' not in '%s'" %
1703                                 (where, substring, str(f)))
1704                 return
1705         self.fail("%s: didn't see expected exception %s in problems %s" %
1706                   (where, expected_exception, r.problems))
1707
1708
1709 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1710     def setUp(self):
1711         return self.publish_one()
1712
1713
1714     def test_check_good(self):
1715         d = self._fn.check(Monitor())
1716         d.addCallback(self.check_good, "test_check_good")
1717         return d
1718
1719     def test_check_mdmf_good(self):
1720         d = self.publish_mdmf()
1721         d.addCallback(lambda ignored:
1722             self._fn.check(Monitor()))
1723         d.addCallback(self.check_good, "test_check_mdmf_good")
1724         return d
1725
1726     def test_check_no_shares(self):
1727         for shares in self._storage._peers.values():
1728             shares.clear()
1729         d = self._fn.check(Monitor())
1730         d.addCallback(self.check_bad, "test_check_no_shares")
1731         return d
1732
1733     def test_check_mdmf_no_shares(self):
1734         d = self.publish_mdmf()
1735         def _then(ignored):
1736             for share in self._storage._peers.values():
1737                 share.clear()
1738         d.addCallback(_then)
1739         d.addCallback(lambda ignored:
1740             self._fn.check(Monitor()))
1741         d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1742         return d
1743
1744     def test_check_not_enough_shares(self):
1745         for shares in self._storage._peers.values():
1746             for shnum in shares.keys():
1747                 if shnum > 0:
1748                     del shares[shnum]
1749         d = self._fn.check(Monitor())
1750         d.addCallback(self.check_bad, "test_check_not_enough_shares")
1751         return d
1752
1753     def test_check_mdmf_not_enough_shares(self):
1754         d = self.publish_mdmf()
1755         def _then(ignored):
1756             for shares in self._storage._peers.values():
1757                 for shnum in shares.keys():
1758                     if shnum > 0:
1759                         del shares[shnum]
1760         d.addCallback(_then)
1761         d.addCallback(lambda ignored:
1762             self._fn.check(Monitor()))
1763         d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1764         return d
1765
1766
1767     def test_check_all_bad_sig(self):
1768         d = corrupt(None, self._storage, 1) # bad sig
1769         d.addCallback(lambda ignored:
1770             self._fn.check(Monitor()))
1771         d.addCallback(self.check_bad, "test_check_all_bad_sig")
1772         return d
1773
1774     def test_check_mdmf_all_bad_sig(self):
1775         d = self.publish_mdmf()
1776         d.addCallback(lambda ignored:
1777             corrupt(None, self._storage, 1))
1778         d.addCallback(lambda ignored:
1779             self._fn.check(Monitor()))
1780         d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1781         return d
1782
1783     def test_check_all_bad_blocks(self):
1784         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1785         # the Checker won't notice this.. it doesn't look at actual data
1786         d.addCallback(lambda ignored:
1787             self._fn.check(Monitor()))
1788         d.addCallback(self.check_good, "test_check_all_bad_blocks")
1789         return d
1790
1791
1792     def test_check_mdmf_all_bad_blocks(self):
1793         d = self.publish_mdmf()
1794         d.addCallback(lambda ignored:
1795             corrupt(None, self._storage, "share_data"))
1796         d.addCallback(lambda ignored:
1797             self._fn.check(Monitor()))
1798         d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1799         return d
1800
1801     def test_verify_good(self):
1802         d = self._fn.check(Monitor(), verify=True)
1803         d.addCallback(self.check_good, "test_verify_good")
1804         return d
1805
1806     def test_verify_all_bad_sig(self):
1807         d = corrupt(None, self._storage, 1) # bad sig
1808         d.addCallback(lambda ignored:
1809             self._fn.check(Monitor(), verify=True))
1810         d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1811         return d
1812
1813     def test_verify_one_bad_sig(self):
1814         d = corrupt(None, self._storage, 1, [9]) # bad sig
1815         d.addCallback(lambda ignored:
1816             self._fn.check(Monitor(), verify=True))
1817         d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1818         return d
1819
1820     def test_verify_one_bad_block(self):
1821         d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1822         # the Verifier *will* notice this, since it examines every byte
1823         d.addCallback(lambda ignored:
1824             self._fn.check(Monitor(), verify=True))
1825         d.addCallback(self.check_bad, "test_verify_one_bad_block")
1826         d.addCallback(self.check_expected_failure,
1827                       CorruptShareError, "block hash tree failure",
1828                       "test_verify_one_bad_block")
1829         return d
1830
1831     def test_verify_one_bad_sharehash(self):
1832         d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1833         d.addCallback(lambda ignored:
1834             self._fn.check(Monitor(), verify=True))
1835         d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1836         d.addCallback(self.check_expected_failure,
1837                       CorruptShareError, "corrupt hashes",
1838                       "test_verify_one_bad_sharehash")
1839         return d
1840
1841     def test_verify_one_bad_encprivkey(self):
1842         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1843         d.addCallback(lambda ignored:
1844             self._fn.check(Monitor(), verify=True))
1845         d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1846         d.addCallback(self.check_expected_failure,
1847                       CorruptShareError, "invalid privkey",
1848                       "test_verify_one_bad_encprivkey")
1849         return d
1850
1851     def test_verify_one_bad_encprivkey_uncheckable(self):
1852         d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1853         readonly_fn = self._fn.get_readonly()
1854         # a read-only node has no way to validate the privkey
1855         d.addCallback(lambda ignored:
1856             readonly_fn.check(Monitor(), verify=True))
1857         d.addCallback(self.check_good,
1858                       "test_verify_one_bad_encprivkey_uncheckable")
1859         return d
1860
1861
1862     def test_verify_mdmf_good(self):
1863         d = self.publish_mdmf()
1864         d.addCallback(lambda ignored:
1865             self._fn.check(Monitor(), verify=True))
1866         d.addCallback(self.check_good, "test_verify_mdmf_good")
1867         return d
1868
1869
1870     def test_verify_mdmf_one_bad_block(self):
1871         d = self.publish_mdmf()
1872         d.addCallback(lambda ignored:
1873             corrupt(None, self._storage, "share_data", [1]))
1874         d.addCallback(lambda ignored:
1875             self._fn.check(Monitor(), verify=True))
1876         # We should find one bad block here
1877         d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1878         d.addCallback(self.check_expected_failure,
1879                       CorruptShareError, "block hash tree failure",
1880                       "test_verify_mdmf_one_bad_block")
1881         return d
1882
1883
1884     def test_verify_mdmf_bad_encprivkey(self):
1885         d = self.publish_mdmf()
1886         d.addCallback(lambda ignored:
1887             corrupt(None, self._storage, "enc_privkey", [0]))
1888         d.addCallback(lambda ignored:
1889             self._fn.check(Monitor(), verify=True))
1890         d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1891         d.addCallback(self.check_expected_failure,
1892                       CorruptShareError, "privkey",
1893                       "test_verify_mdmf_bad_encprivkey")
1894         return d
1895
1896
1897     def test_verify_mdmf_bad_sig(self):
1898         d = self.publish_mdmf()
1899         d.addCallback(lambda ignored:
1900             corrupt(None, self._storage, 1, [1]))
1901         d.addCallback(lambda ignored:
1902             self._fn.check(Monitor(), verify=True))
1903         d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1904         return d
1905
1906
1907     def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1908         d = self.publish_mdmf()
1909         d.addCallback(lambda ignored:
1910             corrupt(None, self._storage, "enc_privkey", [1]))
1911         d.addCallback(lambda ignored:
1912             self._fn.get_readonly())
1913         d.addCallback(lambda fn:
1914             fn.check(Monitor(), verify=True))
1915         d.addCallback(self.check_good,
1916                       "test_verify_mdmf_bad_encprivkey_uncheckable")
1917         return d
1918
1919
1920 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1921
1922     def get_shares(self, s):
1923         all_shares = {} # maps (peerid, shnum) to share data
1924         for peerid in s._peers:
1925             shares = s._peers[peerid]
1926             for shnum in shares:
1927                 data = shares[shnum]
1928                 all_shares[ (peerid, shnum) ] = data
1929         return all_shares
1930
1931     def copy_shares(self, ignored=None):
1932         self.old_shares.append(self.get_shares(self._storage))
1933
1934     def test_repair_nop(self):
1935         self.old_shares = []
1936         d = self.publish_one()
1937         d.addCallback(self.copy_shares)
1938         d.addCallback(lambda res: self._fn.check(Monitor()))
1939         d.addCallback(lambda check_results: self._fn.repair(check_results))
1940         def _check_results(rres):
1941             self.failUnless(IRepairResults.providedBy(rres))
1942             self.failUnless(rres.get_successful())
1943             # TODO: examine results
1944
1945             self.copy_shares()
1946
1947             initial_shares = self.old_shares[0]
1948             new_shares = self.old_shares[1]
1949             # TODO: this really shouldn't change anything. When we implement
1950             # a "minimal-bandwidth" repairer", change this test to assert:
1951             #self.failUnlessEqual(new_shares, initial_shares)
1952
1953             # all shares should be in the same place as before
1954             self.failUnlessEqual(set(initial_shares.keys()),
1955                                  set(new_shares.keys()))
1956             # but they should all be at a newer seqnum. The IV will be
1957             # different, so the roothash will be too.
1958             for key in initial_shares:
1959                 (version0,
1960                  seqnum0,
1961                  root_hash0,
1962                  IV0,
1963                  k0, N0, segsize0, datalen0,
1964                  o0) = unpack_header(initial_shares[key])
1965                 (version1,
1966                  seqnum1,
1967                  root_hash1,
1968                  IV1,
1969                  k1, N1, segsize1, datalen1,
1970                  o1) = unpack_header(new_shares[key])
1971                 self.failUnlessEqual(version0, version1)
1972                 self.failUnlessEqual(seqnum0+1, seqnum1)
1973                 self.failUnlessEqual(k0, k1)
1974                 self.failUnlessEqual(N0, N1)
1975                 self.failUnlessEqual(segsize0, segsize1)
1976                 self.failUnlessEqual(datalen0, datalen1)
1977         d.addCallback(_check_results)
1978         return d
1979
1980     def failIfSharesChanged(self, ignored=None):
1981         old_shares = self.old_shares[-2]
1982         current_shares = self.old_shares[-1]
1983         self.failUnlessEqual(old_shares, current_shares)
1984
1985
1986     def test_unrepairable_0shares(self):
1987         d = self.publish_one()
1988         def _delete_all_shares(ign):
1989             shares = self._storage._peers
1990             for peerid in shares:
1991                 shares[peerid] = {}
1992         d.addCallback(_delete_all_shares)
1993         d.addCallback(lambda ign: self._fn.check(Monitor()))
1994         d.addCallback(lambda check_results: self._fn.repair(check_results))
1995         def _check(crr):
1996             self.failUnlessEqual(crr.get_successful(), False)
1997         d.addCallback(_check)
1998         return d
1999
2000     def test_mdmf_unrepairable_0shares(self):
2001         d = self.publish_mdmf()
2002         def _delete_all_shares(ign):
2003             shares = self._storage._peers
2004             for peerid in shares:
2005                 shares[peerid] = {}
2006         d.addCallback(_delete_all_shares)
2007         d.addCallback(lambda ign: self._fn.check(Monitor()))
2008         d.addCallback(lambda check_results: self._fn.repair(check_results))
2009         d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2010         return d
2011
2012
2013     def test_unrepairable_1share(self):
2014         d = self.publish_one()
2015         def _delete_all_shares(ign):
2016             shares = self._storage._peers
2017             for peerid in shares:
2018                 for shnum in list(shares[peerid]):
2019                     if shnum > 0:
2020                         del shares[peerid][shnum]
2021         d.addCallback(_delete_all_shares)
2022         d.addCallback(lambda ign: self._fn.check(Monitor()))
2023         d.addCallback(lambda check_results: self._fn.repair(check_results))
2024         def _check(crr):
2025             self.failUnlessEqual(crr.get_successful(), False)
2026         d.addCallback(_check)
2027         return d
2028
2029     def test_mdmf_unrepairable_1share(self):
2030         d = self.publish_mdmf()
2031         def _delete_all_shares(ign):
2032             shares = self._storage._peers
2033             for peerid in shares:
2034                 for shnum in list(shares[peerid]):
2035                     if shnum > 0:
2036                         del shares[peerid][shnum]
2037         d.addCallback(_delete_all_shares)
2038         d.addCallback(lambda ign: self._fn.check(Monitor()))
2039         d.addCallback(lambda check_results: self._fn.repair(check_results))
2040         def _check(crr):
2041             self.failUnlessEqual(crr.get_successful(), False)
2042         d.addCallback(_check)
2043         return d
2044
2045     def test_repairable_5shares(self):
2046         d = self.publish_mdmf()
2047         def _delete_all_shares(ign):
2048             shares = self._storage._peers
2049             for peerid in shares:
2050                 for shnum in list(shares[peerid]):
2051                     if shnum > 4:
2052                         del shares[peerid][shnum]
2053         d.addCallback(_delete_all_shares)
2054         d.addCallback(lambda ign: self._fn.check(Monitor()))
2055         d.addCallback(lambda check_results: self._fn.repair(check_results))
2056         def _check(crr):
2057             self.failUnlessEqual(crr.get_successful(), True)
2058         d.addCallback(_check)
2059         return d
2060
2061     def test_mdmf_repairable_5shares(self):
2062         d = self.publish_mdmf()
2063         def _delete_some_shares(ign):
2064             shares = self._storage._peers
2065             for peerid in shares:
2066                 for shnum in list(shares[peerid]):
2067                     if shnum > 5:
2068                         del shares[peerid][shnum]
2069         d.addCallback(_delete_some_shares)
2070         d.addCallback(lambda ign: self._fn.check(Monitor()))
2071         def _check(cr):
2072             self.failIf(cr.is_healthy())
2073             self.failUnless(cr.is_recoverable())
2074             return cr
2075         d.addCallback(_check)
2076         d.addCallback(lambda check_results: self._fn.repair(check_results))
2077         def _check1(crr):
2078             self.failUnlessEqual(crr.get_successful(), True)
2079         d.addCallback(_check1)
2080         return d
2081
2082
2083     def test_merge(self):
2084         self.old_shares = []
2085         d = self.publish_multiple()
2086         # repair will refuse to merge multiple highest seqnums unless you
2087         # pass force=True
2088         d.addCallback(lambda res:
2089                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2090                                           1:4,3:4,5:4,7:4,9:4}))
2091         d.addCallback(self.copy_shares)
2092         d.addCallback(lambda res: self._fn.check(Monitor()))
2093         def _try_repair(check_results):
2094             ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2095             d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2096                                  self._fn.repair, check_results)
2097             d2.addCallback(self.copy_shares)
2098             d2.addCallback(self.failIfSharesChanged)
2099             d2.addCallback(lambda res: check_results)
2100             return d2
2101         d.addCallback(_try_repair)
2102         d.addCallback(lambda check_results:
2103                       self._fn.repair(check_results, force=True))
2104         # this should give us 10 shares of the highest roothash
2105         def _check_repair_results(rres):
2106             self.failUnless(rres.get_successful())
2107             pass # TODO
2108         d.addCallback(_check_repair_results)
2109         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2110         def _check_smap(smap):
2111             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2112             self.failIf(smap.unrecoverable_versions())
2113             # now, which should have won?
2114             roothash_s4a = self.get_roothash_for(3)
2115             roothash_s4b = self.get_roothash_for(4)
2116             if roothash_s4b > roothash_s4a:
2117                 expected_contents = self.CONTENTS[4]
2118             else:
2119                 expected_contents = self.CONTENTS[3]
2120             new_versionid = smap.best_recoverable_version()
2121             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2122             d2 = self._fn.download_version(smap, new_versionid)
2123             d2.addCallback(self.failUnlessEqual, expected_contents)
2124             return d2
2125         d.addCallback(_check_smap)
2126         return d
2127
2128     def test_non_merge(self):
2129         self.old_shares = []
2130         d = self.publish_multiple()
2131         # repair should not refuse a repair that doesn't need to merge. In
2132         # this case, we combine v2 with v3. The repair should ignore v2 and
2133         # copy v3 into a new v5.
2134         d.addCallback(lambda res:
2135                       self._set_versions({0:2,2:2,4:2,6:2,8:2,
2136                                           1:3,3:3,5:3,7:3,9:3}))
2137         d.addCallback(lambda res: self._fn.check(Monitor()))
2138         d.addCallback(lambda check_results: self._fn.repair(check_results))
2139         # this should give us 10 shares of v3
2140         def _check_repair_results(rres):
2141             self.failUnless(rres.get_successful())
2142             pass # TODO
2143         d.addCallback(_check_repair_results)
2144         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2145         def _check_smap(smap):
2146             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2147             self.failIf(smap.unrecoverable_versions())
2148             # now, which should have won?
2149             expected_contents = self.CONTENTS[3]
2150             new_versionid = smap.best_recoverable_version()
2151             self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2152             d2 = self._fn.download_version(smap, new_versionid)
2153             d2.addCallback(self.failUnlessEqual, expected_contents)
2154             return d2
2155         d.addCallback(_check_smap)
2156         return d
2157
2158     def get_roothash_for(self, index):
2159         # return the roothash for the first share we see in the saved set
2160         shares = self._copied_shares[index]
2161         for peerid in shares:
2162             for shnum in shares[peerid]:
2163                 share = shares[peerid][shnum]
2164                 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2165                           unpack_header(share)
2166                 return root_hash
2167
2168     def test_check_and_repair_readcap(self):
2169         # we can't currently repair from a mutable readcap: #625
2170         self.old_shares = []
2171         d = self.publish_one()
2172         d.addCallback(self.copy_shares)
2173         def _get_readcap(res):
2174             self._fn3 = self._fn.get_readonly()
2175             # also delete some shares
2176             for peerid,shares in self._storage._peers.items():
2177                 shares.pop(0, None)
2178         d.addCallback(_get_readcap)
2179         d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2180         def _check_results(crr):
2181             self.failUnless(ICheckAndRepairResults.providedBy(crr))
2182             # we should detect the unhealthy, but skip over mutable-readcap
2183             # repairs until #625 is fixed
2184             self.failIf(crr.get_pre_repair_results().is_healthy())
2185             self.failIf(crr.get_repair_attempted())
2186             self.failIf(crr.get_post_repair_results().is_healthy())
2187         d.addCallback(_check_results)
2188         return d
2189
2190 class DevNullDictionary(dict):
2191     def __setitem__(self, key, value):
2192         return
2193
2194 class MultipleEncodings(unittest.TestCase):
2195     def setUp(self):
2196         self.CONTENTS = "New contents go here"
2197         self.uploadable = MutableData(self.CONTENTS)
2198         self._storage = FakeStorage()
2199         self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2200         self._storage_broker = self._nodemaker.storage_broker
2201         d = self._nodemaker.create_mutable_file(self.uploadable)
2202         def _created(node):
2203             self._fn = node
2204         d.addCallback(_created)
2205         return d
2206
2207     def _encode(self, k, n, data, version=SDMF_VERSION):
2208         # encode 'data' into a peerid->shares dict.
2209
2210         fn = self._fn
2211         # disable the nodecache, since for these tests we explicitly need
2212         # multiple nodes pointing at the same file
2213         self._nodemaker._node_cache = DevNullDictionary()
2214         fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2215         # then we copy over other fields that are normally fetched from the
2216         # existing shares
2217         fn2._pubkey = fn._pubkey
2218         fn2._privkey = fn._privkey
2219         fn2._encprivkey = fn._encprivkey
2220         # and set the encoding parameters to something completely different
2221         fn2._required_shares = k
2222         fn2._total_shares = n
2223
2224         s = self._storage
2225         s._peers = {} # clear existing storage
2226         p2 = Publish(fn2, self._storage_broker, None)
2227         uploadable = MutableData(data)
2228         d = p2.publish(uploadable)
2229         def _published(res):
2230             shares = s._peers
2231             s._peers = {}
2232             return shares
2233         d.addCallback(_published)
2234         return d
2235
2236     def make_servermap(self, mode=MODE_READ, oldmap=None):
2237         if oldmap is None:
2238             oldmap = ServerMap()
2239         smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2240                                oldmap, mode)
2241         d = smu.update()
2242         return d
2243
2244     def test_multiple_encodings(self):
2245         # we encode the same file in two different ways (3-of-10 and 4-of-9),
2246         # then mix up the shares, to make sure that download survives seeing
2247         # a variety of encodings. This is actually kind of tricky to set up.
2248
2249         contents1 = "Contents for encoding 1 (3-of-10) go here"
2250         contents2 = "Contents for encoding 2 (4-of-9) go here"
2251         contents3 = "Contents for encoding 3 (4-of-7) go here"
2252
2253         # we make a retrieval object that doesn't know what encoding
2254         # parameters to use
2255         fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2256
2257         # now we upload a file through fn1, and grab its shares
2258         d = self._encode(3, 10, contents1)
2259         def _encoded_1(shares):
2260             self._shares1 = shares
2261         d.addCallback(_encoded_1)
2262         d.addCallback(lambda res: self._encode(4, 9, contents2))
2263         def _encoded_2(shares):
2264             self._shares2 = shares
2265         d.addCallback(_encoded_2)
2266         d.addCallback(lambda res: self._encode(4, 7, contents3))
2267         def _encoded_3(shares):
2268             self._shares3 = shares
2269         d.addCallback(_encoded_3)
2270
2271         def _merge(res):
2272             log.msg("merging sharelists")
2273             # we merge the shares from the two sets, leaving each shnum in
2274             # its original location, but using a share from set1 or set2
2275             # according to the following sequence:
2276             #
2277             #  4-of-9  a  s2
2278             #  4-of-9  b  s2
2279             #  4-of-7  c   s3
2280             #  4-of-9  d  s2
2281             #  3-of-9  e s1
2282             #  3-of-9  f s1
2283             #  3-of-9  g s1
2284             #  4-of-9  h  s2
2285             #
2286             # so that neither form can be recovered until fetch [f], at which
2287             # point version-s1 (the 3-of-10 form) should be recoverable. If
2288             # the implementation latches on to the first version it sees,
2289             # then s2 will be recoverable at fetch [g].
2290
2291             # Later, when we implement code that handles multiple versions,
2292             # we can use this framework to assert that all recoverable
2293             # versions are retrieved, and test that 'epsilon' does its job
2294
2295             places = [2, 2, 3, 2, 1, 1, 1, 2]
2296
2297             sharemap = {}
2298             sb = self._storage_broker
2299
2300             for peerid in sorted(sb.get_all_serverids()):
2301                 for shnum in self._shares1.get(peerid, {}):
2302                     if shnum < len(places):
2303                         which = places[shnum]
2304                     else:
2305                         which = "x"
2306                     self._storage._peers[peerid] = peers = {}
2307                     in_1 = shnum in self._shares1[peerid]
2308                     in_2 = shnum in self._shares2.get(peerid, {})
2309                     in_3 = shnum in self._shares3.get(peerid, {})
2310                     if which == 1:
2311                         if in_1:
2312                             peers[shnum] = self._shares1[peerid][shnum]
2313                             sharemap[shnum] = peerid
2314                     elif which == 2:
2315                         if in_2:
2316                             peers[shnum] = self._shares2[peerid][shnum]
2317                             sharemap[shnum] = peerid
2318                     elif which == 3:
2319                         if in_3:
2320                             peers[shnum] = self._shares3[peerid][shnum]
2321                             sharemap[shnum] = peerid
2322
2323             # we don't bother placing any other shares
2324             # now sort the sequence so that share 0 is returned first
2325             new_sequence = [sharemap[shnum]
2326                             for shnum in sorted(sharemap.keys())]
2327             self._storage._sequence = new_sequence
2328             log.msg("merge done")
2329         d.addCallback(_merge)
2330         d.addCallback(lambda res: fn3.download_best_version())
2331         def _retrieved(new_contents):
2332             # the current specified behavior is "first version recoverable"
2333             self.failUnlessEqual(new_contents, contents1)
2334         d.addCallback(_retrieved)
2335         return d
2336
2337
2338 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2339
2340     def setUp(self):
2341         return self.publish_multiple()
2342
2343     def test_multiple_versions(self):
2344         # if we see a mix of versions in the grid, download_best_version
2345         # should get the latest one
2346         self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2347         d = self._fn.download_best_version()
2348         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2349         # and the checker should report problems
2350         d.addCallback(lambda res: self._fn.check(Monitor()))
2351         d.addCallback(self.check_bad, "test_multiple_versions")
2352
2353         # but if everything is at version 2, that's what we should download
2354         d.addCallback(lambda res:
2355                       self._set_versions(dict([(i,2) for i in range(10)])))
2356         d.addCallback(lambda res: self._fn.download_best_version())
2357         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2358         # if exactly one share is at version 3, we should still get v2
2359         d.addCallback(lambda res:
2360                       self._set_versions({0:3}))
2361         d.addCallback(lambda res: self._fn.download_best_version())
2362         d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2363         # but the servermap should see the unrecoverable version. This
2364         # depends upon the single newer share being queried early.
2365         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2366         def _check_smap(smap):
2367             self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2368             newer = smap.unrecoverable_newer_versions()
2369             self.failUnlessEqual(len(newer), 1)
2370             verinfo, health = newer.items()[0]
2371             self.failUnlessEqual(verinfo[0], 4)
2372             self.failUnlessEqual(health, (1,3))
2373             self.failIf(smap.needs_merge())
2374         d.addCallback(_check_smap)
2375         # if we have a mix of two parallel versions (s4a and s4b), we could
2376         # recover either
2377         d.addCallback(lambda res:
2378                       self._set_versions({0:3,2:3,4:3,6:3,8:3,
2379                                           1:4,3:4,5:4,7:4,9:4}))
2380         d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2381         def _check_smap_mixed(smap):
2382             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2383             newer = smap.unrecoverable_newer_versions()
2384             self.failUnlessEqual(len(newer), 0)
2385             self.failUnless(smap.needs_merge())
2386         d.addCallback(_check_smap_mixed)
2387         d.addCallback(lambda res: self._fn.download_best_version())
2388         d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2389                                                   res == self.CONTENTS[4]))
2390         return d
2391
2392     def test_replace(self):
2393         # if we see a mix of versions in the grid, we should be able to
2394         # replace them all with a newer version
2395
2396         # if exactly one share is at version 3, we should download (and
2397         # replace) v2, and the result should be v4. Note that the index we
2398         # give to _set_versions is different than the sequence number.
2399         target = dict([(i,2) for i in range(10)]) # seqnum3
2400         target[0] = 3 # seqnum4
2401         self._set_versions(target)
2402
2403         def _modify(oldversion, servermap, first_time):
2404             return oldversion + " modified"
2405         d = self._fn.modify(_modify)
2406         d.addCallback(lambda res: self._fn.download_best_version())
2407         expected = self.CONTENTS[2] + " modified"
2408         d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2409         # and the servermap should indicate that the outlier was replaced too
2410         d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2411         def _check_smap(smap):
2412             self.failUnlessEqual(smap.highest_seqnum(), 5)
2413             self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2414             self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2415         d.addCallback(_check_smap)
2416         return d
2417
2418
2419 class Utils(unittest.TestCase):
2420     def test_cache(self):
2421         c = ResponseCache()
2422         # xdata = base62.b2a(os.urandom(100))[:100]
2423         xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2424         ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2425         c.add("v1", 1, 0, xdata)
2426         c.add("v1", 1, 2000, ydata)
2427         self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2428         self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2429         self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2430         self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2431         self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2432         self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2433         self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2434         self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2435         self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2436         self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2437         self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2438         self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2439         self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2440         self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2441         self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2442         self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2443         self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2444         self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2445
2446         # test joining fragments
2447         c = ResponseCache()
2448         c.add("v1", 1, 0, xdata[:10])
2449         c.add("v1", 1, 10, xdata[10:20])
2450         self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2451
2452 class Exceptions(unittest.TestCase):
2453     def test_repr(self):
2454         nmde = NeedMoreDataError(100, 50, 100)
2455         self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2456         ucwe = UncoordinatedWriteError()
2457         self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2458
2459 class SameKeyGenerator:
2460     def __init__(self, pubkey, privkey):
2461         self.pubkey = pubkey
2462         self.privkey = privkey
2463     def generate(self, keysize=None):
2464         return defer.succeed( (self.pubkey, self.privkey) )
2465
2466 class FirstServerGetsKilled:
2467     done = False
2468     def notify(self, retval, wrapper, methname):
2469         if not self.done:
2470             wrapper.broken = True
2471             self.done = True
2472         return retval
2473
2474 class FirstServerGetsDeleted:
2475     def __init__(self):
2476         self.done = False
2477         self.silenced = None
2478     def notify(self, retval, wrapper, methname):
2479         if not self.done:
2480             # this query will work, but later queries should think the share
2481             # has been deleted
2482             self.done = True
2483             self.silenced = wrapper
2484             return retval
2485         if wrapper == self.silenced:
2486             assert methname == "slot_testv_and_readv_and_writev"
2487             return (True, {})
2488         return retval
2489
2490 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2491     def test_publish_surprise(self):
2492         self.basedir = "mutable/Problems/test_publish_surprise"
2493         self.set_up_grid()
2494         nm = self.g.clients[0].nodemaker
2495         d = nm.create_mutable_file(MutableData("contents 1"))
2496         def _created(n):
2497             d = defer.succeed(None)
2498             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2499             def _got_smap1(smap):
2500                 # stash the old state of the file
2501                 self.old_map = smap
2502             d.addCallback(_got_smap1)
2503             # then modify the file, leaving the old map untouched
2504             d.addCallback(lambda res: log.msg("starting winning write"))
2505             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2506             # now attempt to modify the file with the old servermap. This
2507             # will look just like an uncoordinated write, in which every
2508             # single share got updated between our mapupdate and our publish
2509             d.addCallback(lambda res: log.msg("starting doomed write"))
2510             d.addCallback(lambda res:
2511                           self.shouldFail(UncoordinatedWriteError,
2512                                           "test_publish_surprise", None,
2513                                           n.upload,
2514                                           MutableData("contents 2a"), self.old_map))
2515             return d
2516         d.addCallback(_created)
2517         return d
2518
2519     def test_retrieve_surprise(self):
2520         self.basedir = "mutable/Problems/test_retrieve_surprise"
2521         self.set_up_grid()
2522         nm = self.g.clients[0].nodemaker
2523         d = nm.create_mutable_file(MutableData("contents 1"))
2524         def _created(n):
2525             d = defer.succeed(None)
2526             d.addCallback(lambda res: n.get_servermap(MODE_READ))
2527             def _got_smap1(smap):
2528                 # stash the old state of the file
2529                 self.old_map = smap
2530             d.addCallback(_got_smap1)
2531             # then modify the file, leaving the old map untouched
2532             d.addCallback(lambda res: log.msg("starting winning write"))
2533             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2534             # now attempt to retrieve the old version with the old servermap.
2535             # This will look like someone has changed the file since we
2536             # updated the servermap.
2537             d.addCallback(lambda res: n._cache._clear())
2538             d.addCallback(lambda res: log.msg("starting doomed read"))
2539             d.addCallback(lambda res:
2540                           self.shouldFail(NotEnoughSharesError,
2541                                           "test_retrieve_surprise",
2542                                           "ran out of peers: have 0 of 1",
2543                                           n.download_version,
2544                                           self.old_map,
2545                                           self.old_map.best_recoverable_version(),
2546                                           ))
2547             return d
2548         d.addCallback(_created)
2549         return d
2550
2551
2552     def test_unexpected_shares(self):
2553         # upload the file, take a servermap, shut down one of the servers,
2554         # upload it again (causing shares to appear on a new server), then
2555         # upload using the old servermap. The last upload should fail with an
2556         # UncoordinatedWriteError, because of the shares that didn't appear
2557         # in the servermap.
2558         self.basedir = "mutable/Problems/test_unexpected_shares"
2559         self.set_up_grid()
2560         nm = self.g.clients[0].nodemaker
2561         d = nm.create_mutable_file(MutableData("contents 1"))
2562         def _created(n):
2563             d = defer.succeed(None)
2564             d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2565             def _got_smap1(smap):
2566                 # stash the old state of the file
2567                 self.old_map = smap
2568                 # now shut down one of the servers
2569                 peer0 = list(smap.make_sharemap()[0])[0]
2570                 self.g.remove_server(peer0)
2571                 # then modify the file, leaving the old map untouched
2572                 log.msg("starting winning write")
2573                 return n.overwrite(MutableData("contents 2"))
2574             d.addCallback(_got_smap1)
2575             # now attempt to modify the file with the old servermap. This
2576             # will look just like an uncoordinated write, in which every
2577             # single share got updated between our mapupdate and our publish
2578             d.addCallback(lambda res: log.msg("starting doomed write"))
2579             d.addCallback(lambda res:
2580                           self.shouldFail(UncoordinatedWriteError,
2581                                           "test_surprise", None,
2582                                           n.upload,
2583                                           MutableData("contents 2a"), self.old_map))
2584             return d
2585         d.addCallback(_created)
2586         return d
2587
2588     def test_bad_server(self):
2589         # Break one server, then create the file: the initial publish should
2590         # complete with an alternate server. Breaking a second server should
2591         # not prevent an update from succeeding either.
2592         self.basedir = "mutable/Problems/test_bad_server"
2593         self.set_up_grid()
2594         nm = self.g.clients[0].nodemaker
2595
2596         # to make sure that one of the initial peers is broken, we have to
2597         # get creative. We create an RSA key and compute its storage-index.
2598         # Then we make a KeyGenerator that always returns that one key, and
2599         # use it to create the mutable file. This will get easier when we can
2600         # use #467 static-server-selection to disable permutation and force
2601         # the choice of server for share[0].
2602
2603         d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2604         def _got_key( (pubkey, privkey) ):
2605             nm.key_generator = SameKeyGenerator(pubkey, privkey)
2606             pubkey_s = pubkey.serialize()
2607             privkey_s = privkey.serialize()
2608             u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2609                                         ssk_pubkey_fingerprint_hash(pubkey_s))
2610             self._storage_index = u.get_storage_index()
2611         d.addCallback(_got_key)
2612         def _break_peer0(res):
2613             si = self._storage_index
2614             servers = nm.storage_broker.get_servers_for_psi(si)
2615             self.g.break_server(servers[0].get_serverid())
2616             self.server1 = servers[1]
2617         d.addCallback(_break_peer0)
2618         # now "create" the file, using the pre-established key, and let the
2619         # initial publish finally happen
2620         d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2621         # that ought to work
2622         def _got_node(n):
2623             d = n.download_best_version()
2624             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2625             # now break the second peer
2626             def _break_peer1(res):
2627                 self.g.break_server(self.server1.get_serverid())
2628             d.addCallback(_break_peer1)
2629             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2630             # that ought to work too
2631             d.addCallback(lambda res: n.download_best_version())
2632             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2633             def _explain_error(f):
2634                 print f
2635                 if f.check(NotEnoughServersError):
2636                     print "first_error:", f.value.first_error
2637                 return f
2638             d.addErrback(_explain_error)
2639             return d
2640         d.addCallback(_got_node)
2641         return d
2642
2643     def test_bad_server_overlap(self):
2644         # like test_bad_server, but with no extra unused servers to fall back
2645         # upon. This means that we must re-use a server which we've already
2646         # used. If we don't remember the fact that we sent them one share
2647         # already, we'll mistakenly think we're experiencing an
2648         # UncoordinatedWriteError.
2649
2650         # Break one server, then create the file: the initial publish should
2651         # complete with an alternate server. Breaking a second server should
2652         # not prevent an update from succeeding either.
2653         self.basedir = "mutable/Problems/test_bad_server_overlap"
2654         self.set_up_grid()
2655         nm = self.g.clients[0].nodemaker
2656         sb = nm.storage_broker
2657
2658         peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2659         self.g.break_server(peerids[0])
2660
2661         d = nm.create_mutable_file(MutableData("contents 1"))
2662         def _created(n):
2663             d = n.download_best_version()
2664             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2665             # now break one of the remaining servers
2666             def _break_second_server(res):
2667                 self.g.break_server(peerids[1])
2668             d.addCallback(_break_second_server)
2669             d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2670             # that ought to work too
2671             d.addCallback(lambda res: n.download_best_version())
2672             d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2673             return d
2674         d.addCallback(_created)
2675         return d
2676
2677     def test_publish_all_servers_bad(self):
2678         # Break all servers: the publish should fail
2679         self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2680         self.set_up_grid()
2681         nm = self.g.clients[0].nodemaker
2682         for s in nm.storage_broker.get_connected_servers():
2683             s.get_rref().broken = True
2684
2685         d = self.shouldFail(NotEnoughServersError,
2686                             "test_publish_all_servers_bad",
2687                             "ran out of good servers",
2688                             nm.create_mutable_file, MutableData("contents"))
2689         return d
2690
2691     def test_publish_no_servers(self):
2692         # no servers at all: the publish should fail
2693         self.basedir = "mutable/Problems/test_publish_no_servers"
2694         self.set_up_grid(num_servers=0)
2695         nm = self.g.clients[0].nodemaker
2696
2697         d = self.shouldFail(NotEnoughServersError,
2698                             "test_publish_no_servers",
2699                             "Ran out of non-bad servers",
2700                             nm.create_mutable_file, MutableData("contents"))
2701         return d
2702
2703
2704     def test_privkey_query_error(self):
2705         # when a servermap is updated with MODE_WRITE, it tries to get the
2706         # privkey. Something might go wrong during this query attempt.
2707         # Exercise the code in _privkey_query_failed which tries to handle
2708         # such an error.
2709         self.basedir = "mutable/Problems/test_privkey_query_error"
2710         self.set_up_grid(num_servers=20)
2711         nm = self.g.clients[0].nodemaker
2712         nm._node_cache = DevNullDictionary() # disable the nodecache
2713
2714         # we need some contents that are large enough to push the privkey out
2715         # of the early part of the file
2716         LARGE = "These are Larger contents" * 2000 # about 50KB
2717         LARGE_uploadable = MutableData(LARGE)
2718         d = nm.create_mutable_file(LARGE_uploadable)
2719         def _created(n):
2720             self.uri = n.get_uri()
2721             self.n2 = nm.create_from_cap(self.uri)
2722
2723             # When a mapupdate is performed on a node that doesn't yet know
2724             # the privkey, a short read is sent to a batch of servers, to get
2725             # the verinfo and (hopefully, if the file is short enough) the
2726             # encprivkey. Our file is too large to let this first read
2727             # contain the encprivkey. Each non-encprivkey-bearing response
2728             # that arrives (until the node gets the encprivkey) will trigger
2729             # a second read to specifically read the encprivkey.
2730             #
2731             # So, to exercise this case:
2732             #  1. notice which server gets a read() call first
2733             #  2. tell that server to start throwing errors
2734             killer = FirstServerGetsKilled()
2735             for s in nm.storage_broker.get_connected_servers():
2736                 s.get_rref().post_call_notifier = killer.notify
2737         d.addCallback(_created)
2738
2739         # now we update a servermap from a new node (which doesn't have the
2740         # privkey yet, forcing it to use a separate privkey query). Note that
2741         # the map-update will succeed, since we'll just get a copy from one
2742         # of the other shares.
2743         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2744
2745         return d
2746
2747     def test_privkey_query_missing(self):
2748         # like test_privkey_query_error, but the shares are deleted by the
2749         # second query, instead of raising an exception.
2750         self.basedir = "mutable/Problems/test_privkey_query_missing"
2751         self.set_up_grid(num_servers=20)
2752         nm = self.g.clients[0].nodemaker
2753         LARGE = "These are Larger contents" * 2000 # about 50KiB
2754         LARGE_uploadable = MutableData(LARGE)
2755         nm._node_cache = DevNullDictionary() # disable the nodecache
2756
2757         d = nm.create_mutable_file(LARGE_uploadable)
2758         def _created(n):
2759             self.uri = n.get_uri()
2760             self.n2 = nm.create_from_cap(self.uri)
2761             deleter = FirstServerGetsDeleted()
2762             for s in nm.storage_broker.get_connected_servers():
2763                 s.get_rref().post_call_notifier = deleter.notify
2764         d.addCallback(_created)
2765         d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2766         return d
2767
2768
2769     def test_block_and_hash_query_error(self):
2770         # This tests for what happens when a query to a remote server
2771         # fails in either the hash validation step or the block getting
2772         # step (because of batching, this is the same actual query).
2773         # We need to have the storage server persist up until the point
2774         # that its prefix is validated, then suddenly die. This
2775         # exercises some exception handling code in Retrieve.
2776         self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2777         self.set_up_grid(num_servers=20)
2778         nm = self.g.clients[0].nodemaker
2779         CONTENTS = "contents" * 2000
2780         CONTENTS_uploadable = MutableData(CONTENTS)
2781         d = nm.create_mutable_file(CONTENTS_uploadable)
2782         def _created(node):
2783             self._node = node
2784         d.addCallback(_created)
2785         d.addCallback(lambda ignored:
2786             self._node.get_servermap(MODE_READ))
2787         def _then(servermap):
2788             # we have our servermap. Now we set up the servers like the
2789             # tests above -- the first one that gets a read call should
2790             # start throwing errors, but only after returning its prefix
2791             # for validation. Since we'll download without fetching the
2792             # private key, the next query to the remote server will be
2793             # for either a block and salt or for hashes, either of which
2794             # will exercise the error handling code.
2795             killer = FirstServerGetsKilled()
2796             for s in nm.storage_broker.get_connected_servers():
2797                 s.get_rref().post_call_notifier = killer.notify
2798             ver = servermap.best_recoverable_version()
2799             assert ver
2800             return self._node.download_version(servermap, ver)
2801         d.addCallback(_then)
2802         d.addCallback(lambda data:
2803             self.failUnlessEqual(data, CONTENTS))
2804         return d
2805
2806
2807 class FileHandle(unittest.TestCase):
2808     def setUp(self):
2809         self.test_data = "Test Data" * 50000
2810         self.sio = StringIO(self.test_data)
2811         self.uploadable = MutableFileHandle(self.sio)
2812
2813
2814     def test_filehandle_read(self):
2815         self.basedir = "mutable/FileHandle/test_filehandle_read"
2816         chunk_size = 10
2817         for i in xrange(0, len(self.test_data), chunk_size):
2818             data = self.uploadable.read(chunk_size)
2819             data = "".join(data)
2820             start = i
2821             end = i + chunk_size
2822             self.failUnlessEqual(data, self.test_data[start:end])
2823
2824
2825     def test_filehandle_get_size(self):
2826         self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2827         actual_size = len(self.test_data)
2828         size = self.uploadable.get_size()
2829         self.failUnlessEqual(size, actual_size)
2830
2831
2832     def test_filehandle_get_size_out_of_order(self):
2833         # We should be able to call get_size whenever we want without
2834         # disturbing the location of the seek pointer.
2835         chunk_size = 100
2836         data = self.uploadable.read(chunk_size)
2837         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2838
2839         # Now get the size.
2840         size = self.uploadable.get_size()
2841         self.failUnlessEqual(size, len(self.test_data))
2842
2843         # Now get more data. We should be right where we left off.
2844         more_data = self.uploadable.read(chunk_size)
2845         start = chunk_size
2846         end = chunk_size * 2
2847         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2848
2849
2850     def test_filehandle_file(self):
2851         # Make sure that the MutableFileHandle works on a file as well
2852         # as a StringIO object, since in some cases it will be asked to
2853         # deal with files.
2854         self.basedir = self.mktemp()
2855         # necessary? What am I doing wrong here?
2856         os.mkdir(self.basedir)
2857         f_path = os.path.join(self.basedir, "test_file")
2858         f = open(f_path, "w")
2859         f.write(self.test_data)
2860         f.close()
2861         f = open(f_path, "r")
2862
2863         uploadable = MutableFileHandle(f)
2864
2865         data = uploadable.read(len(self.test_data))
2866         self.failUnlessEqual("".join(data), self.test_data)
2867         size = uploadable.get_size()
2868         self.failUnlessEqual(size, len(self.test_data))
2869
2870
2871     def test_close(self):
2872         # Make sure that the MutableFileHandle closes its handle when
2873         # told to do so.
2874         self.uploadable.close()
2875         self.failUnless(self.sio.closed)
2876
2877
2878 class DataHandle(unittest.TestCase):
2879     def setUp(self):
2880         self.test_data = "Test Data" * 50000
2881         self.uploadable = MutableData(self.test_data)
2882
2883
2884     def test_datahandle_read(self):
2885         chunk_size = 10
2886         for i in xrange(0, len(self.test_data), chunk_size):
2887             data = self.uploadable.read(chunk_size)
2888             data = "".join(data)
2889             start = i
2890             end = i + chunk_size
2891             self.failUnlessEqual(data, self.test_data[start:end])
2892
2893
2894     def test_datahandle_get_size(self):
2895         actual_size = len(self.test_data)
2896         size = self.uploadable.get_size()
2897         self.failUnlessEqual(size, actual_size)
2898
2899
2900     def test_datahandle_get_size_out_of_order(self):
2901         # We should be able to call get_size whenever we want without
2902         # disturbing the location of the seek pointer.
2903         chunk_size = 100
2904         data = self.uploadable.read(chunk_size)
2905         self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2906
2907         # Now get the size.
2908         size = self.uploadable.get_size()
2909         self.failUnlessEqual(size, len(self.test_data))
2910
2911         # Now get more data. We should be right where we left off.
2912         more_data = self.uploadable.read(chunk_size)
2913         start = chunk_size
2914         end = chunk_size * 2
2915         self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2916
2917
2918 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2919               PublishMixin):
2920     def setUp(self):
2921         GridTestMixin.setUp(self)
2922         self.basedir = self.mktemp()
2923         self.set_up_grid()
2924         self.c = self.g.clients[0]
2925         self.nm = self.c.nodemaker
2926         self.data = "test data" * 100000 # about 900 KiB; MDMF
2927         self.small_data = "test data" * 10 # about 90 B; SDMF
2928
2929
2930     def do_upload_mdmf(self):
2931         d = self.nm.create_mutable_file(MutableData(self.data),
2932                                         version=MDMF_VERSION)
2933         def _then(n):
2934             assert isinstance(n, MutableFileNode)
2935             assert n._protocol_version == MDMF_VERSION
2936             self.mdmf_node = n
2937             return n
2938         d.addCallback(_then)
2939         return d
2940
2941     def do_upload_sdmf(self):
2942         d = self.nm.create_mutable_file(MutableData(self.small_data))
2943         def _then(n):
2944             assert isinstance(n, MutableFileNode)
2945             assert n._protocol_version == SDMF_VERSION
2946             self.sdmf_node = n
2947             return n
2948         d.addCallback(_then)
2949         return d
2950
2951     def do_upload_empty_sdmf(self):
2952         d = self.nm.create_mutable_file(MutableData(""))
2953         def _then(n):
2954             assert isinstance(n, MutableFileNode)
2955             self.sdmf_zero_length_node = n
2956             assert n._protocol_version == SDMF_VERSION
2957             return n
2958         d.addCallback(_then)
2959         return d
2960
2961     def do_upload(self):
2962         d = self.do_upload_mdmf()
2963         d.addCallback(lambda ign: self.do_upload_sdmf())
2964         return d
2965
2966     def test_debug(self):
2967         d = self.do_upload_mdmf()
2968         def _debug(n):
2969             fso = debug.FindSharesOptions()
2970             storage_index = base32.b2a(n.get_storage_index())
2971             fso.si_s = storage_index
2972             fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2973                             for (i,ss,storedir)
2974                             in self.iterate_servers()]
2975             fso.stdout = StringIO()
2976             fso.stderr = StringIO()
2977             debug.find_shares(fso)
2978             sharefiles = fso.stdout.getvalue().splitlines()
2979             expected = self.nm.default_encoding_parameters["n"]
2980             self.failUnlessEqual(len(sharefiles), expected)
2981
2982             do = debug.DumpOptions()
2983             do["filename"] = sharefiles[0]
2984             do.stdout = StringIO()
2985             debug.dump_share(do)
2986             output = do.stdout.getvalue()
2987             lines = set(output.splitlines())
2988             self.failUnless("Mutable slot found:" in lines, output)
2989             self.failUnless(" share_type: MDMF" in lines, output)
2990             self.failUnless(" num_extra_leases: 0" in lines, output)
2991             self.failUnless(" MDMF contents:" in lines, output)
2992             self.failUnless("  seqnum: 1" in lines, output)
2993             self.failUnless("  required_shares: 3" in lines, output)
2994             self.failUnless("  total_shares: 10" in lines, output)
2995             self.failUnless("  segsize: 131073" in lines, output)
2996             self.failUnless("  datalen: %d" % len(self.data) in lines, output)
2997             vcap = n.get_verify_cap().to_string()
2998             self.failUnless("  verify-cap: %s" % vcap in lines, output)
2999
3000             cso = debug.CatalogSharesOptions()
3001             cso.nodedirs = fso.nodedirs
3002             cso.stdout = StringIO()
3003             cso.stderr = StringIO()
3004             debug.catalog_shares(cso)
3005             shares = cso.stdout.getvalue().splitlines()
3006             oneshare = shares[0] # all shares should be MDMF
3007             self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3008             self.failUnless(oneshare.startswith("MDMF"), oneshare)
3009             fields = oneshare.split()
3010             self.failUnlessEqual(fields[0], "MDMF")
3011             self.failUnlessEqual(fields[1], storage_index)
3012             self.failUnlessEqual(fields[2], "3/10")
3013             self.failUnlessEqual(fields[3], "%d" % len(self.data))
3014             self.failUnless(fields[4].startswith("#1:"), fields[3])
3015             # the rest of fields[4] is the roothash, which depends upon
3016             # encryption salts and is not constant. fields[5] is the
3017             # remaining time on the longest lease, which is timing dependent.
3018             # The rest of the line is the quoted pathname to the share.
3019         d.addCallback(_debug)
3020         return d
3021
3022     def test_get_sequence_number(self):
3023         d = self.do_upload()
3024         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3025         d.addCallback(lambda bv:
3026             self.failUnlessEqual(bv.get_sequence_number(), 1))
3027         d.addCallback(lambda ignored:
3028             self.sdmf_node.get_best_readable_version())
3029         d.addCallback(lambda bv:
3030             self.failUnlessEqual(bv.get_sequence_number(), 1))
3031         # Now update. The sequence number in both cases should be 1 in
3032         # both cases.
3033         def _do_update(ignored):
3034             new_data = MutableData("foo bar baz" * 100000)
3035             new_small_data = MutableData("foo bar baz" * 10)
3036             d1 = self.mdmf_node.overwrite(new_data)
3037             d2 = self.sdmf_node.overwrite(new_small_data)
3038             dl = gatherResults([d1, d2])
3039             return dl
3040         d.addCallback(_do_update)
3041         d.addCallback(lambda ignored:
3042             self.mdmf_node.get_best_readable_version())
3043         d.addCallback(lambda bv:
3044             self.failUnlessEqual(bv.get_sequence_number(), 2))
3045         d.addCallback(lambda ignored:
3046             self.sdmf_node.get_best_readable_version())
3047         d.addCallback(lambda bv:
3048             self.failUnlessEqual(bv.get_sequence_number(), 2))
3049         return d
3050
3051
3052     def test_version_extension_api(self):
3053         # We need to define an API by which an uploader can set the
3054         # extension parameters, and by which a downloader can retrieve
3055         # extensions.
3056         d = self.do_upload_mdmf()
3057         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3058         def _got_version(version):
3059             hints = version.get_downloader_hints()
3060             # Should be empty at this point.
3061             self.failUnlessIn("k", hints)
3062             self.failUnlessEqual(hints['k'], 3)
3063             self.failUnlessIn('segsize', hints)
3064             self.failUnlessEqual(hints['segsize'], 131073)
3065         d.addCallback(_got_version)
3066         return d
3067
3068
3069     def test_extensions_from_cap(self):
3070         # If we initialize a mutable file with a cap that has extension
3071         # parameters in it and then grab the extension parameters using
3072         # our API, we should see that they're set correctly.
3073         d = self.do_upload_mdmf()
3074         def _then(ign):
3075             mdmf_uri = self.mdmf_node.get_uri()
3076             new_node = self.nm.create_from_cap(mdmf_uri)
3077             return new_node.get_best_mutable_version()
3078         d.addCallback(_then)
3079         def _got_version(version):
3080             hints = version.get_downloader_hints()
3081             self.failUnlessIn("k", hints)
3082             self.failUnlessEqual(hints["k"], 3)
3083             self.failUnlessIn("segsize", hints)
3084             self.failUnlessEqual(hints["segsize"], 131073)
3085         d.addCallback(_got_version)
3086         return d
3087
3088
3089     def test_extensions_from_upload(self):
3090         # If we create a new mutable file with some contents, we should
3091         # get back an MDMF cap with the right hints in place.
3092         contents = "foo bar baz" * 100000
3093         d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3094         def _got_mutable_file(n):
3095             rw_uri = n.get_uri()
3096             expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3097             self.failUnlessIn(expected_k, rw_uri)
3098             # XXX: Get this more intelligently.
3099             self.failUnlessIn("131073", rw_uri)
3100
3101             ro_uri = n.get_readonly_uri()
3102             self.failUnlessIn(expected_k, ro_uri)
3103             self.failUnlessIn("131073", ro_uri)
3104         d.addCallback(_got_mutable_file)
3105         return d
3106
3107
3108     def test_cap_after_upload(self):
3109         # If we create a new mutable file and upload things to it, and
3110         # it's an MDMF file, we should get an MDMF cap back from that
3111         # file and should be able to use that.
3112         # That's essentially what MDMF node is, so just check that.
3113         d = self.do_upload_mdmf()
3114         def _then(ign):
3115             mdmf_uri = self.mdmf_node.get_uri()
3116             cap = uri.from_string(mdmf_uri)
3117             self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3118             readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3119             cap = uri.from_string(readonly_mdmf_uri)
3120             self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3121         d.addCallback(_then)
3122         return d
3123
3124     def test_mutable_version(self):
3125         # assert that getting parameters from the IMutableVersion object
3126         # gives us the same data as getting them from the filenode itself
3127         d = self.do_upload()
3128         d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3129         def _check_mdmf(bv):
3130             n = self.mdmf_node
3131             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3132             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3133             self.failIf(bv.is_readonly())
3134         d.addCallback(_check_mdmf)
3135         d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3136         def _check_sdmf(bv):
3137             n = self.sdmf_node
3138             self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3139             self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3140             self.failIf(bv.is_readonly())
3141         d.addCallback(_check_sdmf)
3142         return d
3143
3144
3145     def test_get_readonly_version(self):
3146         d = self.do_upload()
3147         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3148         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3149
3150         # Attempting to get a mutable version of a mutable file from a
3151         # filenode initialized with a readcap should return a readonly
3152         # version of that same node.
3153         d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3154         d.addCallback(lambda ro: ro.get_best_mutable_version())
3155         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3156
3157         d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3158         d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3159
3160         d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3161         d.addCallback(lambda ro: ro.get_best_mutable_version())
3162         d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3163         return d
3164
3165
3166     def test_toplevel_overwrite(self):
3167         new_data = MutableData("foo bar baz" * 100000)
3168         new_small_data = MutableData("foo bar baz" * 10)
3169         d = self.do_upload()
3170         d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3171         d.addCallback(lambda ignored:
3172             self.mdmf_node.download_best_version())
3173         d.addCallback(lambda data:
3174             self.failUnlessEqual(data, "foo bar baz" * 100000))
3175         d.addCallback(lambda ignored:
3176             self.sdmf_node.overwrite(new_small_data))
3177         d.addCallback(lambda ignored:
3178             self.sdmf_node.download_best_version())
3179         d.addCallback(lambda data:
3180             self.failUnlessEqual(data, "foo bar baz" * 10))
3181         return d
3182
3183
3184     def test_toplevel_modify(self):
3185         d = self.do_upload()
3186         def modifier(old_contents, servermap, first_time):
3187             return old_contents + "modified"
3188         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3189         d.addCallback(lambda ignored:
3190             self.mdmf_node.download_best_version())
3191         d.addCallback(lambda data:
3192             self.failUnlessIn("modified", data))
3193         d.addCallback(lambda ignored:
3194             self.sdmf_node.modify(modifier))
3195         d.addCallback(lambda ignored:
3196             self.sdmf_node.download_best_version())
3197         d.addCallback(lambda data:
3198             self.failUnlessIn("modified", data))
3199         return d
3200
3201
3202     def test_version_modify(self):
3203         # TODO: When we can publish multiple versions, alter this test
3204         # to modify a version other than the best usable version, then
3205         # test to see that the best recoverable version is that.
3206         d = self.do_upload()
3207         def modifier(old_contents, servermap, first_time):
3208             return old_contents + "modified"
3209         d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3210         d.addCallback(lambda ignored:
3211             self.mdmf_node.download_best_version())
3212         d.addCallback(lambda data:
3213             self.failUnlessIn("modified", data))
3214         d.addCallback(lambda ignored:
3215             self.sdmf_node.modify(modifier))
3216         d.addCallback(lambda ignored:
3217             self.sdmf_node.download_best_version())
3218         d.addCallback(lambda data:
3219             self.failUnlessIn("modified", data))
3220         return d
3221
3222
3223     def test_download_version(self):
3224         d = self.publish_multiple()
3225         # We want to have two recoverable versions on the grid.
3226         d.addCallback(lambda res:
3227                       self._set_versions({0:0,2:0,4:0,6:0,8:0,
3228                                           1:1,3:1,5:1,7:1,9:1}))
3229         # Now try to download each version. We should get the plaintext
3230         # associated with that version.
3231         d.addCallback(lambda ignored:
3232             self._fn.get_servermap(mode=MODE_READ))
3233         def _got_servermap(smap):
3234             versions = smap.recoverable_versions()
3235             assert len(versions) == 2
3236
3237             self.servermap = smap
3238             self.version1, self.version2 = versions
3239             assert self.version1 != self.version2
3240
3241             self.version1_seqnum = self.version1[0]
3242             self.version2_seqnum = self.version2[0]
3243             self.version1_index = self.version1_seqnum - 1
3244             self.version2_index = self.version2_seqnum - 1
3245
3246         d.addCallback(_got_servermap)
3247         d.addCallback(lambda ignored:
3248             self._fn.download_version(self.servermap, self.version1))
3249         d.addCallback(lambda results:
3250             self.failUnlessEqual(self.CONTENTS[self.version1_index],
3251                                  results))
3252         d.addCallback(lambda ignored:
3253             self._fn.download_version(self.servermap, self.version2))
3254         d.addCallback(lambda results:
3255             self.failUnlessEqual(self.CONTENTS[self.version2_index],
3256                                  results))
3257         return d
3258
3259
3260     def test_download_nonexistent_version(self):
3261         d = self.do_upload_mdmf()
3262         d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3263         def _set_servermap(servermap):
3264             self.servermap = servermap
3265         d.addCallback(_set_servermap)
3266         d.addCallback(lambda ignored:
3267            self.shouldFail(UnrecoverableFileError, "nonexistent version",
3268                            None,
3269                            self.mdmf_node.download_version, self.servermap,
3270                            "not a version"))
3271         return d
3272
3273
3274     def test_partial_read(self):
3275         d = self.do_upload_mdmf()
3276         d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3277         modes = [("start_on_segment_boundary",
3278                   mathutil.next_multiple(128 * 1024, 3), 50),
3279                  ("ending_one_byte_after_segment_boundary",
3280                   mathutil.next_multiple(128 * 1024, 3)-50, 51),
3281                  ("zero_length_at_start", 0, 0),
3282                  ("zero_length_in_middle", 50, 0),
3283                  ("zero_length_at_segment_boundary",
3284                   mathutil.next_multiple(128 * 1024, 3), 0),
3285                  ]
3286         for (name, offset, length) in modes:
3287             d.addCallback(self._do_partial_read, name, offset, length)
3288         # then read only a few bytes at a time, and see that the results are
3289         # what we expect.
3290         def _read_data(version):
3291             c = consumer.MemoryConsumer()
3292             d2 = defer.succeed(None)
3293             for i in xrange(0, len(self.data), 10000):
3294                 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3295             d2.addCallback(lambda ignored:
3296                 self.failUnlessEqual(self.data, "".join(c.chunks)))
3297             return d2
3298         d.addCallback(_read_data)
3299         return d
3300     def _do_partial_read(self, version, name, offset, length):
3301         c = consumer.MemoryConsumer()
3302         d = version.read(c, offset, length)
3303         expected = self.data[offset:offset+length]
3304         d.addCallback(lambda ignored: "".join(c.chunks))
3305         def _check(results):
3306             if results != expected:
3307                 print
3308                 print "got: %s ... %s" % (results[:20], results[-20:])
3309                 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3310                 self.fail("results[%s] != expected" % name)
3311             return version # daisy-chained to next call
3312         d.addCallback(_check)
3313         return d
3314
3315
3316     def _test_read_and_download(self, node, expected):
3317         d = node.get_best_readable_version()
3318         def _read_data(version):
3319             c = consumer.MemoryConsumer()
3320             d2 = defer.succeed(None)
3321             d2.addCallback(lambda ignored: version.read(c))
3322             d2.addCallback(lambda ignored:
3323                 self.failUnlessEqual(expected, "".join(c.chunks)))
3324             return d2
3325         d.addCallback(_read_data)
3326         d.addCallback(lambda ignored: node.download_best_version())
3327         d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3328         return d
3329
3330     def test_read_and_download_mdmf(self):
3331         d = self.do_upload_mdmf()
3332         d.addCallback(self._test_read_and_download, self.data)
3333         return d
3334
3335     def test_read_and_download_sdmf(self):
3336         d = self.do_upload_sdmf()
3337         d.addCallback(self._test_read_and_download, self.small_data)
3338         return d
3339
3340     def test_read_and_download_sdmf_zero_length(self):
3341         d = self.do_upload_empty_sdmf()
3342         d.addCallback(self._test_read_and_download, "")
3343         return d
3344
3345
3346 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3347     timeout = 400 # these tests are too big, 120s is not enough on slow
3348                   # platforms
3349     def setUp(self):
3350         GridTestMixin.setUp(self)
3351         self.basedir = self.mktemp()
3352         self.set_up_grid()
3353         self.c = self.g.clients[0]
3354         self.nm = self.c.nodemaker
3355         self.data = "testdata " * 100000 # about 900 KiB; MDMF
3356         self.small_data = "test data" * 10 # about 90 B; SDMF
3357
3358
3359     def do_upload_sdmf(self):
3360         d = self.nm.create_mutable_file(MutableData(self.small_data))
3361         def _then(n):
3362             assert isinstance(n, MutableFileNode)
3363             self.sdmf_node = n
3364             # Make SDMF node that has 255 shares.
3365             self.nm.default_encoding_parameters['n'] = 255
3366             self.nm.default_encoding_parameters['k'] = 127
3367             return self.nm.create_mutable_file(MutableData(self.small_data))
3368         d.addCallback(_then)
3369         def _then2(n):
3370             assert isinstance(n, MutableFileNode)
3371             self.sdmf_max_shares_node = n
3372         d.addCallback(_then2)
3373         return d
3374
3375     def do_upload_mdmf(self):
3376         d = self.nm.create_mutable_file(MutableData(self.data),
3377                                         version=MDMF_VERSION)
3378         def _then(n):
3379             assert isinstance(n, MutableFileNode)
3380             self.mdmf_node = n
3381             # Make MDMF node that has 255 shares.
3382             self.nm.default_encoding_parameters['n'] = 255
3383             self.nm.default_encoding_parameters['k'] = 127
3384             return self.nm.create_mutable_file(MutableData(self.data),
3385                                                version=MDMF_VERSION)
3386         d.addCallback(_then)
3387         def _then2(n):
3388             assert isinstance(n, MutableFileNode)
3389             self.mdmf_max_shares_node = n
3390         d.addCallback(_then2)
3391         return d
3392
3393     def _test_replace(self, offset, new_data):
3394         expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3395         d0 = self.do_upload_mdmf()
3396         def _run(ign):
3397             d = defer.succeed(None)
3398             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3399                 d.addCallback(lambda ign: node.get_best_mutable_version())
3400                 d.addCallback(lambda mv:
3401                     mv.update(MutableData(new_data), offset))
3402                 # close around node.
3403                 d.addCallback(lambda ignored, node=node:
3404                     node.download_best_version())
3405                 def _check(results):
3406                     if results != expected:
3407                         print
3408                         print "got: %s ... %s" % (results[:20], results[-20:])
3409                         print "exp: %s ... %s" % (expected[:20], expected[-20:])
3410                         self.fail("results != expected")
3411                 d.addCallback(_check)
3412             return d
3413         d0.addCallback(_run)
3414         return d0
3415
3416     def test_append(self):
3417         # We should be able to append data to a mutable file and get
3418         # what we expect.
3419         return self._test_replace(len(self.data), "appended")
3420
3421     def test_replace_middle(self):
3422         # We should be able to replace data in the middle of a mutable
3423         # file and get what we expect back.
3424         return self._test_replace(100, "replaced")
3425
3426     def test_replace_beginning(self):
3427         # We should be able to replace data at the beginning of the file
3428         # without truncating the file
3429         return self._test_replace(0, "beginning")
3430
3431     def test_replace_segstart1(self):
3432         return self._test_replace(128*1024+1, "NNNN")
3433
3434     def test_replace_zero_length_beginning(self):
3435         return self._test_replace(0, "")
3436
3437     def test_replace_zero_length_middle(self):
3438         return self._test_replace(50, "")
3439
3440     def test_replace_zero_length_segstart1(self):
3441         return self._test_replace(128*1024+1, "")
3442
3443     def test_replace_and_extend(self):
3444         # We should be able to replace data in the middle of a mutable
3445         # file and extend that mutable file and get what we expect.
3446         return self._test_replace(100, "modified " * 100000)
3447
3448
3449     def _check_differences(self, got, expected):
3450         # displaying arbitrary file corruption is tricky for a
3451         # 1MB file of repeating data,, so look for likely places
3452         # with problems and display them separately
3453         gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3454         expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3455         gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3456                     for (start,end) in gotmods]
3457         expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3458                     for (start,end) in expmods]
3459         #print "expecting: %s" % expspans
3460
3461         SEGSIZE = 128*1024
3462         if got != expected:
3463             print "differences:"
3464             for segnum in range(len(expected)//SEGSIZE):
3465                 start = segnum * SEGSIZE
3466                 end = (segnum+1) * SEGSIZE
3467                 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3468                 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3469                 if got_ends != exp_ends:
3470                     print "expected[%d]: %s" % (start, exp_ends)
3471                     print "got     [%d]: %s" % (start, got_ends)
3472             if expspans != gotspans:
3473                 print "expected: %s" % expspans
3474                 print "got     : %s" % gotspans
3475             open("EXPECTED","wb").write(expected)
3476             open("GOT","wb").write(got)
3477             print "wrote data to EXPECTED and GOT"
3478             self.fail("didn't get expected data")
3479
3480
3481     def test_replace_locations(self):
3482         # exercise fencepost conditions
3483         SEGSIZE = 128*1024
3484         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3485         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3486         d0 = self.do_upload_mdmf()
3487         def _run(ign):
3488             expected = self.data
3489             d = defer.succeed(None)
3490             for offset in suspects:
3491                 new_data = letters.next()*2 # "AA", then "BB", etc
3492                 expected = expected[:offset]+new_data+expected[offset+2:]
3493                 d.addCallback(lambda ign:
3494                               self.mdmf_node.get_best_mutable_version())
3495                 def _modify(mv, offset=offset, new_data=new_data):
3496                     # close over 'offset','new_data'
3497                     md = MutableData(new_data)
3498                     return mv.update(md, offset)
3499                 d.addCallback(_modify)
3500                 d.addCallback(lambda ignored:
3501                               self.mdmf_node.download_best_version())
3502                 d.addCallback(self._check_differences, expected)
3503             return d
3504         d0.addCallback(_run)
3505         return d0
3506
3507     def test_replace_locations_max_shares(self):
3508         # exercise fencepost conditions
3509         SEGSIZE = 128*1024
3510         suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3511         letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3512         d0 = self.do_upload_mdmf()
3513         def _run(ign):
3514             expected = self.data
3515             d = defer.succeed(None)
3516             for offset in suspects:
3517                 new_data = letters.next()*2 # "AA", then "BB", etc
3518                 expected = expected[:offset]+new_data+expected[offset+2:]
3519                 d.addCallback(lambda ign:
3520                               self.mdmf_max_shares_node.get_best_mutable_version())
3521                 def _modify(mv, offset=offset, new_data=new_data):
3522                     # close over 'offset','new_data'
3523                     md = MutableData(new_data)
3524                     return mv.update(md, offset)
3525                 d.addCallback(_modify)
3526                 d.addCallback(lambda ignored:
3527                               self.mdmf_max_shares_node.download_best_version())
3528                 d.addCallback(self._check_differences, expected)
3529             return d
3530         d0.addCallback(_run)
3531         return d0
3532
3533
3534     def test_append_power_of_two(self):
3535         # If we attempt to extend a mutable file so that its segment
3536         # count crosses a power-of-two boundary, the update operation
3537         # should know how to reencode the file.
3538
3539         # Note that the data populating self.mdmf_node is about 900 KiB
3540         # long -- this is 7 segments in the default segment size. So we
3541         # need to add 2 segments worth of data to push it over a
3542         # power-of-two boundary.
3543         segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3544         new_data = self.data + (segment * 2)
3545         d0 = self.do_upload_mdmf()
3546         def _run(ign):
3547             d = defer.succeed(None)
3548             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3549                 d.addCallback(lambda ign: node.get_best_mutable_version())
3550                 d.addCallback(lambda mv:
3551                     mv.update(MutableData(segment * 2), len(self.data)))
3552                 d.addCallback(lambda ignored, node=node:
3553                     node.download_best_version())
3554                 d.addCallback(lambda results:
3555                     self.failUnlessEqual(results, new_data))
3556             return d
3557         d0.addCallback(_run)
3558         return d0
3559
3560     def test_update_sdmf(self):
3561         # Running update on a single-segment file should still work.
3562         new_data = self.small_data + "appended"
3563         d0 = self.do_upload_sdmf()
3564         def _run(ign):
3565             d = defer.succeed(None)
3566             for node in (self.sdmf_node, self.sdmf_max_shares_node):
3567                 d.addCallback(lambda ign: node.get_best_mutable_version())
3568                 d.addCallback(lambda mv:
3569                     mv.update(MutableData("appended"), len(self.small_data)))
3570                 d.addCallback(lambda ignored, node=node:
3571                     node.download_best_version())
3572                 d.addCallback(lambda results:
3573                     self.failUnlessEqual(results, new_data))
3574             return d
3575         d0.addCallback(_run)
3576         return d0
3577
3578     def test_replace_in_last_segment(self):
3579         # The wrapper should know how to handle the tail segment
3580         # appropriately.
3581         replace_offset = len(self.data) - 100
3582         new_data = self.data[:replace_offset] + "replaced"
3583         rest_offset = replace_offset + len("replaced")
3584         new_data += self.data[rest_offset:]
3585         d0 = self.do_upload_mdmf()
3586         def _run(ign):
3587             d = defer.succeed(None)
3588             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3589                 d.addCallback(lambda ign: node.get_best_mutable_version())
3590                 d.addCallback(lambda mv:
3591                     mv.update(MutableData("replaced"), replace_offset))
3592                 d.addCallback(lambda ignored, node=node:
3593                     node.download_best_version())
3594                 d.addCallback(lambda results:
3595                     self.failUnlessEqual(results, new_data))
3596             return d
3597         d0.addCallback(_run)
3598         return d0
3599
3600     def test_multiple_segment_replace(self):
3601         replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3602         new_data = self.data[:replace_offset]
3603         new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3604         new_data += 2 * new_segment
3605         new_data += "replaced"
3606         rest_offset = len(new_data)
3607         new_data += self.data[rest_offset:]
3608         d0 = self.do_upload_mdmf()
3609         def _run(ign):
3610             d = defer.succeed(None)
3611             for node in (self.mdmf_node, self.mdmf_max_shares_node):
3612                 d.addCallback(lambda ign: node.get_best_mutable_version())
3613                 d.addCallback(lambda mv:
3614                     mv.update(MutableData((2 * new_segment) + "replaced"),
3615                               replace_offset))
3616                 d.addCallback(lambda ignored, node=node:
3617                     node.download_best_version())
3618                 d.addCallback(lambda results:
3619                     self.failUnlessEqual(results, new_data))
3620             return d
3621         d0.addCallback(_run)
3622         return d0
3623
3624 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3625     sdmf_old_shares = {}
3626     sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3627     sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3628     sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3629     sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3630     sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3631     sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3632     sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3633     sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3634     sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3635     sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3636     sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3637     sdmf_old_contents = "This is a test file.\n"
3638     def copy_sdmf_shares(self):
3639         # We'll basically be short-circuiting the upload process.
3640         servernums = self.g.servers_by_number.keys()
3641         assert len(servernums) == 10
3642
3643         assignments = zip(self.sdmf_old_shares.keys(), servernums)
3644         # Get the storage index.
3645         cap = uri.from_string(self.sdmf_old_cap)
3646         si = cap.get_storage_index()
3647
3648         # Now execute each assignment by writing the storage.
3649         for (share, servernum) in assignments:
3650             sharedata = base64.b64decode(self.sdmf_old_shares[share])
3651             storedir = self.get_serverdir(servernum)
3652             storage_path = os.path.join(storedir, "shares",
3653                                         storage_index_to_dir(si))
3654             fileutil.make_dirs(storage_path)
3655             fileutil.write(os.path.join(storage_path, "%d" % share),
3656                            sharedata)
3657         # ...and verify that the shares are there.
3658         shares = self.find_uri_shares(self.sdmf_old_cap)
3659         assert len(shares) == 10
3660
3661     def test_new_downloader_can_read_old_shares(self):
3662         self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3663         self.set_up_grid()
3664         self.copy_sdmf_shares()
3665         nm = self.g.clients[0].nodemaker
3666         n = nm.create_from_cap(self.sdmf_old_cap)
3667         d = n.download_best_version()
3668         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3669         return d
3670
3671 class DifferentEncoding(unittest.TestCase):
3672     def setUp(self):
3673         self._storage = s = FakeStorage()
3674         self.nodemaker = make_nodemaker(s)
3675
3676     def test_filenode(self):
3677         # create a file with 3-of-20, then modify it with a client configured
3678         # to do 3-of-10. #1510 tracks a failure here
3679         self.nodemaker.default_encoding_parameters["n"] = 20
3680         d = self.nodemaker.create_mutable_file("old contents")
3681         def _created(n):
3682             filecap = n.get_cap().to_string()
3683             del n # we want a new object, not the cached one
3684             self.nodemaker.default_encoding_parameters["n"] = 10
3685             n2 = self.nodemaker.create_from_cap(filecap)
3686             return n2
3687         d.addCallback(_created)
3688         def modifier(old_contents, servermap, first_time):
3689             return "new contents"
3690         d.addCallback(lambda n: n.modify(modifier))
3691         return d