3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from allmydata import uri, client
7 from allmydata.nodemaker import NodeMaker
8 from allmydata.util import base32, consumer, fileutil, mathutil
9 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
10 ssk_pubkey_fingerprint_hash
11 from allmydata.util.consumer import MemoryConsumer
12 from allmydata.util.deferredutil import gatherResults
13 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
14 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import ShouldFailMixin
17 from allmydata.test.no_network import GridTestMixin
18 from foolscap.api import eventually, fireEventually
19 from foolscap.logging import log
20 from allmydata.storage_client import StorageFarmBroker
21 from allmydata.storage.common import storage_index_to_dir
22 from allmydata.scripts import debug
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28 NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32 DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39 from allmydata.test.test_download import PausingConsumer, \
40 PausingAndStoppingConsumer, StoppingConsumer, \
41 ImmediatelyStoppingConsumer
44 # this "FakeStorage" exists to put the share data in RAM and avoid using real
45 # network connections, both to speed up the tests and to reduce the amount of
46 # non-mutable.py code being exercised.
49 # this class replaces the collection of storage servers, allowing the
50 # tests to examine and manipulate the published shares. It also lets us
51 # control the order in which read queries are answered, to exercise more
52 # of the error-handling code in Retrieve .
54 # Note that we ignore the storage index: this FakeStorage instance can
55 # only be used for a single storage index.
60 # _sequence is used to cause the responses to occur in a specific
61 # order. If it is in use, then we will defer queries instead of
62 # answering them right away, accumulating the Deferreds in a dict. We
63 # don't know exactly how many queries we'll get, so exactly one
64 # second after the first query arrives, we will release them all (in
68 self._pending_timer = None
70 def read(self, peerid, storage_index):
71 shares = self._peers.get(peerid, {})
72 if self._sequence is None:
73 return defer.succeed(shares)
76 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
77 if peerid not in self._pending:
78 self._pending[peerid] = []
79 self._pending[peerid].append( (d, shares) )
82 def _fire_readers(self):
83 self._pending_timer = None
84 pending = self._pending
86 for peerid in self._sequence:
88 for (d, shares) in pending.pop(peerid):
89 eventually(d.callback, shares)
90 for peerid in pending:
91 for (d, shares) in pending[peerid]:
92 eventually(d.callback, shares)
94 def write(self, peerid, storage_index, shnum, offset, data):
95 if peerid not in self._peers:
96 self._peers[peerid] = {}
97 shares = self._peers[peerid]
99 f.write(shares.get(shnum, ""))
102 shares[shnum] = f.getvalue()
105 class FakeStorageServer:
106 def __init__(self, peerid, storage):
108 self.storage = storage
110 def callRemote(self, methname, *args, **kwargs):
113 meth = getattr(self, methname)
114 return meth(*args, **kwargs)
116 d.addCallback(lambda res: _call())
119 def callRemoteOnly(self, methname, *args, **kwargs):
121 d = self.callRemote(methname, *args, **kwargs)
122 d.addBoth(lambda ignore: None)
125 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
128 def slot_readv(self, storage_index, shnums, readv):
129 d = self.storage.read(self.peerid, storage_index)
133 if shnums and shnum not in shnums:
135 vector = response[shnum] = []
136 for (offset, length) in readv:
137 assert isinstance(offset, (int, long)), offset
138 assert isinstance(length, (int, long)), length
139 vector.append(shares[shnum][offset:offset+length])
144 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
145 tw_vectors, read_vector):
146 # always-pass: parrot the test vectors back to them.
148 for shnum, (testv, writev, new_length) in tw_vectors.items():
149 for (offset, length, op, specimen) in testv:
150 assert op in ("le", "eq", "ge")
151 # TODO: this isn't right, the read is controlled by read_vector,
153 readv[shnum] = [ specimen
154 for (offset, length, op, specimen)
156 for (offset, data) in writev:
157 self.storage.write(self.peerid, storage_index, shnum,
159 answer = (True, readv)
160 return fireEventually(answer)
163 def flip_bit(original, byte_offset):
164 return (original[:byte_offset] +
165 chr(ord(original[byte_offset]) ^ 0x01) +
166 original[byte_offset+1:])
168 def add_two(original, byte_offset):
169 # It isn't enough to simply flip the bit for the version number,
170 # because 1 is a valid version number. So we add two instead.
171 return (original[:byte_offset] +
172 chr(ord(original[byte_offset]) ^ 0x02) +
173 original[byte_offset+1:])
175 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
176 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
177 # list of shnums to corrupt.
179 for peerid in s._peers:
180 shares = s._peers[peerid]
182 if (shnums_to_corrupt is not None
183 and shnum not in shnums_to_corrupt):
186 # We're feeding the reader all of the share data, so it
187 # won't need to use the rref that we didn't provide, nor the
188 # storage index that we didn't provide. We do this because
189 # the reader will work for both MDMF and SDMF.
190 reader = MDMFSlotReadProxy(None, None, shnum, data)
191 # We need to get the offsets for the next part.
192 d = reader.get_verinfo()
193 def _do_corruption(verinfo, data, shnum, shares):
199 k, n, prefix, o) = verinfo
200 if isinstance(offset, tuple):
201 offset1, offset2 = offset
205 if offset1 == "pubkey" and IV:
208 real_offset = o[offset1]
210 real_offset = offset1
211 real_offset = int(real_offset) + offset2 + offset_offset
212 assert isinstance(real_offset, int), offset
213 if offset1 == 0: # verbyte
217 shares[shnum] = f(data, real_offset)
218 d.addCallback(_do_corruption, data, shnum, shares)
220 dl = defer.DeferredList(ds)
221 dl.addCallback(lambda ignored: res)
224 def make_storagebroker(s=None, num_peers=10):
227 peerids = [tagged_hash("peerid", "%d" % i)[:20]
228 for i in range(num_peers)]
229 storage_broker = StorageFarmBroker(None, True)
230 for peerid in peerids:
231 fss = FakeStorageServer(peerid, s)
232 storage_broker.test_add_rref(peerid, fss)
233 return storage_broker
235 def make_nodemaker(s=None, num_peers=10):
236 storage_broker = make_storagebroker(s, num_peers)
237 sh = client.SecretHolder("lease secret", "convergence secret")
238 keygen = client.KeyGenerator()
239 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
240 nodemaker = NodeMaker(storage_broker, sh, None,
242 {"k": 3, "n": 10}, SDMF_VERSION, keygen)
245 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
246 # this used to be in Publish, but we removed the limit. Some of
247 # these tests test whether the new code correctly allows files
248 # larger than the limit.
249 OLD_MAX_SEGMENT_SIZE = 3500000
251 self._storage = s = FakeStorage()
252 self.nodemaker = make_nodemaker(s)
254 def test_create(self):
255 d = self.nodemaker.create_mutable_file()
257 self.failUnless(isinstance(n, MutableFileNode))
258 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
259 sb = self.nodemaker.storage_broker
260 peer0 = sorted(sb.get_all_serverids())[0]
261 shnums = self._storage._peers[peer0].keys()
262 self.failUnlessEqual(len(shnums), 1)
263 d.addCallback(_created)
267 def test_create_mdmf(self):
268 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
270 self.failUnless(isinstance(n, MutableFileNode))
271 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
272 sb = self.nodemaker.storage_broker
273 peer0 = sorted(sb.get_all_serverids())[0]
274 shnums = self._storage._peers[peer0].keys()
275 self.failUnlessEqual(len(shnums), 1)
276 d.addCallback(_created)
279 def test_single_share(self):
280 # Make sure that we tolerate publishing a single share.
281 self.nodemaker.default_encoding_parameters['k'] = 1
282 self.nodemaker.default_encoding_parameters['happy'] = 1
283 self.nodemaker.default_encoding_parameters['n'] = 1
284 d = defer.succeed(None)
285 for v in (SDMF_VERSION, MDMF_VERSION):
286 d.addCallback(lambda ignored, v=v:
287 self.nodemaker.create_mutable_file(version=v))
289 self.failUnless(isinstance(n, MutableFileNode))
292 d.addCallback(_created)
293 d.addCallback(lambda n:
294 n.overwrite(MutableData("Contents" * 50000)))
295 d.addCallback(lambda ignored:
296 self._node.download_best_version())
297 d.addCallback(lambda contents:
298 self.failUnlessEqual(contents, "Contents" * 50000))
301 def test_max_shares(self):
302 self.nodemaker.default_encoding_parameters['n'] = 255
303 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
305 self.failUnless(isinstance(n, MutableFileNode))
306 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
307 sb = self.nodemaker.storage_broker
308 num_shares = sum([len(self._storage._peers[x].keys()) for x \
309 in sb.get_all_serverids()])
310 self.failUnlessEqual(num_shares, 255)
313 d.addCallback(_created)
314 # Now we upload some contents
315 d.addCallback(lambda n:
316 n.overwrite(MutableData("contents" * 50000)))
317 # ...then download contents
318 d.addCallback(lambda ignored:
319 self._node.download_best_version())
320 # ...and check to make sure everything went okay.
321 d.addCallback(lambda contents:
322 self.failUnlessEqual("contents" * 50000, contents))
325 def test_max_shares_mdmf(self):
326 # Test how files behave when there are 255 shares.
327 self.nodemaker.default_encoding_parameters['n'] = 255
328 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
330 self.failUnless(isinstance(n, MutableFileNode))
331 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
332 sb = self.nodemaker.storage_broker
333 num_shares = sum([len(self._storage._peers[x].keys()) for x \
334 in sb.get_all_serverids()])
335 self.failUnlessEqual(num_shares, 255)
338 d.addCallback(_created)
339 d.addCallback(lambda n:
340 n.overwrite(MutableData("contents" * 50000)))
341 d.addCallback(lambda ignored:
342 self._node.download_best_version())
343 d.addCallback(lambda contents:
344 self.failUnlessEqual(contents, "contents" * 50000))
347 def test_mdmf_filenode_cap(self):
348 # Test that an MDMF filenode, once created, returns an MDMF URI.
349 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
351 self.failUnless(isinstance(n, MutableFileNode))
353 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
354 rcap = n.get_readcap()
355 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
356 vcap = n.get_verify_cap()
357 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
358 d.addCallback(_created)
362 def test_create_from_mdmf_writecap(self):
363 # Test that the nodemaker is capable of creating an MDMF
364 # filenode given an MDMF cap.
365 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
367 self.failUnless(isinstance(n, MutableFileNode))
369 self.failUnless(s.startswith("URI:MDMF"))
370 n2 = self.nodemaker.create_from_cap(s)
371 self.failUnless(isinstance(n2, MutableFileNode))
372 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
373 self.failUnlessEqual(n.get_uri(), n2.get_uri())
374 d.addCallback(_created)
378 def test_create_from_mdmf_readcap(self):
379 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
381 self.failUnless(isinstance(n, MutableFileNode))
382 s = n.get_readonly_uri()
383 n2 = self.nodemaker.create_from_cap(s)
384 self.failUnless(isinstance(n2, MutableFileNode))
386 # Check that it's a readonly node
387 self.failUnless(n2.is_readonly())
388 d.addCallback(_created)
392 def test_internal_version_from_cap(self):
393 # MutableFileNodes and MutableFileVersions have an internal
394 # switch that tells them whether they're dealing with an SDMF or
395 # MDMF mutable file when they start doing stuff. We want to make
396 # sure that this is set appropriately given an MDMF cap.
397 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
399 self.uri = n.get_uri()
400 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
402 n2 = self.nodemaker.create_from_cap(self.uri)
403 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
404 d.addCallback(_created)
408 def test_serialize(self):
409 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
411 def _callback(*args, **kwargs):
412 self.failUnlessEqual(args, (4,) )
413 self.failUnlessEqual(kwargs, {"foo": 5})
416 d = n._do_serialized(_callback, 4, foo=5)
417 def _check_callback(res):
418 self.failUnlessEqual(res, 6)
419 self.failUnlessEqual(calls, [1])
420 d.addCallback(_check_callback)
423 raise ValueError("heya")
424 d.addCallback(lambda res:
425 self.shouldFail(ValueError, "_check_errback", "heya",
426 n._do_serialized, _errback))
429 def test_upload_and_download(self):
430 d = self.nodemaker.create_mutable_file()
432 d = defer.succeed(None)
433 d.addCallback(lambda res: n.get_servermap(MODE_READ))
434 d.addCallback(lambda smap: smap.dump(StringIO()))
435 d.addCallback(lambda sio:
436 self.failUnless("3-of-10" in sio.getvalue()))
437 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
438 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
439 d.addCallback(lambda res: n.download_best_version())
440 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
441 d.addCallback(lambda res: n.get_size_of_best_version())
442 d.addCallback(lambda size:
443 self.failUnlessEqual(size, len("contents 1")))
444 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
445 d.addCallback(lambda res: n.download_best_version())
446 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
447 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
448 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
449 d.addCallback(lambda res: n.download_best_version())
450 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
451 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
452 d.addCallback(lambda smap:
453 n.download_version(smap,
454 smap.best_recoverable_version()))
455 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
456 # test a file that is large enough to overcome the
457 # mapupdate-to-retrieve data caching (i.e. make the shares larger
458 # than the default readsize, which is 2000 bytes). A 15kB file
459 # will have 5kB shares.
460 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
461 d.addCallback(lambda res: n.download_best_version())
462 d.addCallback(lambda res:
463 self.failUnlessEqual(res, "large size file" * 1000))
465 d.addCallback(_created)
469 def test_upload_and_download_mdmf(self):
470 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
472 d = defer.succeed(None)
473 d.addCallback(lambda ignored:
474 n.get_servermap(MODE_READ))
475 def _then(servermap):
476 dumped = servermap.dump(StringIO())
477 self.failUnlessIn("3-of-10", dumped.getvalue())
479 # Now overwrite the contents with some new contents. We want
480 # to make them big enough to force the file to be uploaded
481 # in more than one segment.
482 big_contents = "contents1" * 100000 # about 900 KiB
483 big_contents_uploadable = MutableData(big_contents)
484 d.addCallback(lambda ignored:
485 n.overwrite(big_contents_uploadable))
486 d.addCallback(lambda ignored:
487 n.download_best_version())
488 d.addCallback(lambda data:
489 self.failUnlessEqual(data, big_contents))
490 # Overwrite the contents again with some new contents. As
491 # before, they need to be big enough to force multiple
492 # segments, so that we make the downloader deal with
494 bigger_contents = "contents2" * 1000000 # about 9MiB
495 bigger_contents_uploadable = MutableData(bigger_contents)
496 d.addCallback(lambda ignored:
497 n.overwrite(bigger_contents_uploadable))
498 d.addCallback(lambda ignored:
499 n.download_best_version())
500 d.addCallback(lambda data:
501 self.failUnlessEqual(data, bigger_contents))
503 d.addCallback(_created)
507 def test_retrieve_producer_mdmf(self):
508 # We should make sure that the retriever is able to pause and stop
510 data = "contents1" * 100000
511 d = self.nodemaker.create_mutable_file(MutableData(data),
512 version=MDMF_VERSION)
513 d.addCallback(lambda node: node.get_best_mutable_version())
514 d.addCallback(self._test_retrieve_producer, "MDMF", data)
517 # note: SDMF has only one big segment, so we can't use the usual
518 # after-the-first-write() trick to pause or stop the download.
519 # Disabled until we find a better approach.
520 def OFF_test_retrieve_producer_sdmf(self):
521 data = "contents1" * 100000
522 d = self.nodemaker.create_mutable_file(MutableData(data),
523 version=SDMF_VERSION)
524 d.addCallback(lambda node: node.get_best_mutable_version())
525 d.addCallback(self._test_retrieve_producer, "SDMF", data)
528 def _test_retrieve_producer(self, version, kind, data):
529 # Now we'll retrieve it into a pausing consumer.
530 c = PausingConsumer()
532 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
534 c2 = PausingAndStoppingConsumer()
535 d.addCallback(lambda ign:
536 self.shouldFail(DownloadStopped, kind+"_pause_stop",
537 "our Consumer called stopProducing()",
540 c3 = StoppingConsumer()
541 d.addCallback(lambda ign:
542 self.shouldFail(DownloadStopped, kind+"_stop",
543 "our Consumer called stopProducing()",
546 c4 = ImmediatelyStoppingConsumer()
547 d.addCallback(lambda ign:
548 self.shouldFail(DownloadStopped, kind+"_stop_imm",
549 "our Consumer called stopProducing()",
553 c5 = MemoryConsumer()
554 d1 = version.read(c5)
555 c5.producer.stopProducing()
556 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
557 "our Consumer called stopProducing()",
562 def test_download_from_mdmf_cap(self):
563 # We should be able to download an MDMF file given its cap
564 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
566 self.uri = node.get_uri()
567 # also confirm that the cap has no extension fields
568 pieces = self.uri.split(":")
569 self.failUnlessEqual(len(pieces), 4)
571 return node.overwrite(MutableData("contents1" * 100000))
573 node = self.nodemaker.create_from_cap(self.uri)
574 return node.download_best_version()
575 def _downloaded(data):
576 self.failUnlessEqual(data, "contents1" * 100000)
577 d.addCallback(_created)
579 d.addCallback(_downloaded)
583 def test_mdmf_write_count(self):
584 # Publishing an MDMF file should only cause one write for each
585 # share that is to be published. Otherwise, we introduce
586 # undesirable semantics that are a regression from SDMF
587 upload = MutableData("MDMF" * 100000) # about 400 KiB
588 d = self.nodemaker.create_mutable_file(upload,
589 version=MDMF_VERSION)
590 def _check_server_write_counts(ignored):
591 sb = self.nodemaker.storage_broker
592 for server in sb.servers.itervalues():
593 self.failUnlessEqual(server.get_rref().queries, 1)
594 d.addCallback(_check_server_write_counts)
598 def test_create_with_initial_contents(self):
599 upload1 = MutableData("contents 1")
600 d = self.nodemaker.create_mutable_file(upload1)
602 d = n.download_best_version()
603 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
604 upload2 = MutableData("contents 2")
605 d.addCallback(lambda res: n.overwrite(upload2))
606 d.addCallback(lambda res: n.download_best_version())
607 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
609 d.addCallback(_created)
613 def test_create_mdmf_with_initial_contents(self):
614 initial_contents = "foobarbaz" * 131072 # 900KiB
615 initial_contents_uploadable = MutableData(initial_contents)
616 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
617 version=MDMF_VERSION)
619 d = n.download_best_version()
620 d.addCallback(lambda data:
621 self.failUnlessEqual(data, initial_contents))
622 uploadable2 = MutableData(initial_contents + "foobarbaz")
623 d.addCallback(lambda ignored:
624 n.overwrite(uploadable2))
625 d.addCallback(lambda ignored:
626 n.download_best_version())
627 d.addCallback(lambda data:
628 self.failUnlessEqual(data, initial_contents +
631 d.addCallback(_created)
635 def test_response_cache_memory_leak(self):
636 d = self.nodemaker.create_mutable_file("contents")
638 d = n.download_best_version()
639 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
640 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
642 def _check_cache(expected):
643 # The total size of cache entries should not increase on the second download;
644 # in fact the cache contents should be identical.
645 d2 = n.download_best_version()
646 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
648 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
650 d.addCallback(_created)
653 def test_create_with_initial_contents_function(self):
654 data = "initial contents"
655 def _make_contents(n):
656 self.failUnless(isinstance(n, MutableFileNode))
657 key = n.get_writekey()
658 self.failUnless(isinstance(key, str), key)
659 self.failUnlessEqual(len(key), 16) # AES key size
660 return MutableData(data)
661 d = self.nodemaker.create_mutable_file(_make_contents)
663 return n.download_best_version()
664 d.addCallback(_created)
665 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
669 def test_create_mdmf_with_initial_contents_function(self):
670 data = "initial contents" * 100000
671 def _make_contents(n):
672 self.failUnless(isinstance(n, MutableFileNode))
673 key = n.get_writekey()
674 self.failUnless(isinstance(key, str), key)
675 self.failUnlessEqual(len(key), 16)
676 return MutableData(data)
677 d = self.nodemaker.create_mutable_file(_make_contents,
678 version=MDMF_VERSION)
679 d.addCallback(lambda n:
680 n.download_best_version())
681 d.addCallback(lambda data2:
682 self.failUnlessEqual(data2, data))
686 def test_create_with_too_large_contents(self):
687 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
688 BIG_uploadable = MutableData(BIG)
689 d = self.nodemaker.create_mutable_file(BIG_uploadable)
691 other_BIG_uploadable = MutableData(BIG)
692 d = n.overwrite(other_BIG_uploadable)
694 d.addCallback(_created)
697 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
698 d = n.get_servermap(MODE_READ)
699 d.addCallback(lambda servermap: servermap.best_recoverable_version())
700 d.addCallback(lambda verinfo:
701 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
704 def test_modify(self):
705 def _modifier(old_contents, servermap, first_time):
706 new_contents = old_contents + "line2"
708 def _non_modifier(old_contents, servermap, first_time):
710 def _none_modifier(old_contents, servermap, first_time):
712 def _error_modifier(old_contents, servermap, first_time):
713 raise ValueError("oops")
714 def _toobig_modifier(old_contents, servermap, first_time):
715 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
718 def _ucw_error_modifier(old_contents, servermap, first_time):
719 # simulate an UncoordinatedWriteError once
722 raise UncoordinatedWriteError("simulated")
723 new_contents = old_contents + "line3"
725 def _ucw_error_non_modifier(old_contents, servermap, first_time):
726 # simulate an UncoordinatedWriteError once, and don't actually
727 # modify the contents on subsequent invocations
730 raise UncoordinatedWriteError("simulated")
733 initial_contents = "line1"
734 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
736 d = n.modify(_modifier)
737 d.addCallback(lambda res: n.download_best_version())
738 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
739 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
741 d.addCallback(lambda res: n.modify(_non_modifier))
742 d.addCallback(lambda res: n.download_best_version())
743 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
744 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
746 d.addCallback(lambda res: n.modify(_none_modifier))
747 d.addCallback(lambda res: n.download_best_version())
748 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
749 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
751 d.addCallback(lambda res:
752 self.shouldFail(ValueError, "error_modifier", None,
753 n.modify, _error_modifier))
754 d.addCallback(lambda res: n.download_best_version())
755 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
756 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
759 d.addCallback(lambda res: n.download_best_version())
760 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
761 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
763 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
764 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
765 d.addCallback(lambda res: n.download_best_version())
766 d.addCallback(lambda res: self.failUnlessEqual(res,
768 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
770 def _reset_ucw_error_modifier(res):
773 d.addCallback(_reset_ucw_error_modifier)
775 # in practice, this n.modify call should publish twice: the first
776 # one gets a UCWE, the second does not. But our test jig (in
777 # which the modifier raises the UCWE) skips over the first one,
778 # so in this test there will be only one publish, and the seqnum
779 # will only be one larger than the previous test, not two (i.e. 4
781 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
782 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
783 d.addCallback(lambda res: n.download_best_version())
784 d.addCallback(lambda res: self.failUnlessEqual(res,
786 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
787 d.addCallback(lambda res: n.modify(_toobig_modifier))
789 d.addCallback(_created)
793 def test_modify_backoffer(self):
794 def _modifier(old_contents, servermap, first_time):
795 return old_contents + "line2"
797 def _ucw_error_modifier(old_contents, servermap, first_time):
798 # simulate an UncoordinatedWriteError once
801 raise UncoordinatedWriteError("simulated")
802 return old_contents + "line3"
803 def _always_ucw_error_modifier(old_contents, servermap, first_time):
804 raise UncoordinatedWriteError("simulated")
805 def _backoff_stopper(node, f):
807 def _backoff_pauser(node, f):
809 reactor.callLater(0.5, d.callback, None)
812 # the give-up-er will hit its maximum retry count quickly
813 giveuper = BackoffAgent()
814 giveuper._delay = 0.1
817 d = self.nodemaker.create_mutable_file(MutableData("line1"))
819 d = n.modify(_modifier)
820 d.addCallback(lambda res: n.download_best_version())
821 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
822 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
824 d.addCallback(lambda res:
825 self.shouldFail(UncoordinatedWriteError,
826 "_backoff_stopper", None,
827 n.modify, _ucw_error_modifier,
829 d.addCallback(lambda res: n.download_best_version())
830 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
831 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
833 def _reset_ucw_error_modifier(res):
836 d.addCallback(_reset_ucw_error_modifier)
837 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
839 d.addCallback(lambda res: n.download_best_version())
840 d.addCallback(lambda res: self.failUnlessEqual(res,
842 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
844 d.addCallback(lambda res:
845 self.shouldFail(UncoordinatedWriteError,
847 n.modify, _always_ucw_error_modifier,
849 d.addCallback(lambda res: n.download_best_version())
850 d.addCallback(lambda res: self.failUnlessEqual(res,
852 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
855 d.addCallback(_created)
858 def test_upload_and_download_full_size_keys(self):
859 self.nodemaker.key_generator = client.KeyGenerator()
860 d = self.nodemaker.create_mutable_file()
862 d = defer.succeed(None)
863 d.addCallback(lambda res: n.get_servermap(MODE_READ))
864 d.addCallback(lambda smap: smap.dump(StringIO()))
865 d.addCallback(lambda sio:
866 self.failUnless("3-of-10" in sio.getvalue()))
867 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
868 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
869 d.addCallback(lambda res: n.download_best_version())
870 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
871 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
872 d.addCallback(lambda res: n.download_best_version())
873 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
874 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
875 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
876 d.addCallback(lambda res: n.download_best_version())
877 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
878 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
879 d.addCallback(lambda smap:
880 n.download_version(smap,
881 smap.best_recoverable_version()))
882 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
884 d.addCallback(_created)
888 def test_size_after_servermap_update(self):
889 # a mutable file node should have something to say about how big
890 # it is after a servermap update is performed, since this tells
891 # us how large the best version of that mutable file is.
892 d = self.nodemaker.create_mutable_file()
895 return n.get_servermap(MODE_READ)
896 d.addCallback(_created)
897 d.addCallback(lambda ignored:
898 self.failUnlessEqual(self.n.get_size(), 0))
899 d.addCallback(lambda ignored:
900 self.n.overwrite(MutableData("foobarbaz")))
901 d.addCallback(lambda ignored:
902 self.failUnlessEqual(self.n.get_size(), 9))
903 d.addCallback(lambda ignored:
904 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
905 d.addCallback(_created)
906 d.addCallback(lambda ignored:
907 self.failUnlessEqual(self.n.get_size(), 9))
912 def publish_one(self):
913 # publish a file and create shares, which can then be manipulated
915 self.CONTENTS = "New contents go here" * 1000
916 self.uploadable = MutableData(self.CONTENTS)
917 self._storage = FakeStorage()
918 self._nodemaker = make_nodemaker(self._storage)
919 self._storage_broker = self._nodemaker.storage_broker
920 d = self._nodemaker.create_mutable_file(self.uploadable)
923 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
924 d.addCallback(_created)
927 def publish_mdmf(self):
928 # like publish_one, except that the result is guaranteed to be
930 # self.CONTENTS should have more than one segment.
931 self.CONTENTS = "This is an MDMF file" * 100000
932 self.uploadable = MutableData(self.CONTENTS)
933 self._storage = FakeStorage()
934 self._nodemaker = make_nodemaker(self._storage)
935 self._storage_broker = self._nodemaker.storage_broker
936 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
939 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
940 d.addCallback(_created)
944 def publish_sdmf(self):
945 # like publish_one, except that the result is guaranteed to be
947 self.CONTENTS = "This is an SDMF file" * 1000
948 self.uploadable = MutableData(self.CONTENTS)
949 self._storage = FakeStorage()
950 self._nodemaker = make_nodemaker(self._storage)
951 self._storage_broker = self._nodemaker.storage_broker
952 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
955 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
956 d.addCallback(_created)
960 def publish_multiple(self, version=0):
961 self.CONTENTS = ["Contents 0",
966 self.uploadables = [MutableData(d) for d in self.CONTENTS]
967 self._copied_shares = {}
968 self._storage = FakeStorage()
969 self._nodemaker = make_nodemaker(self._storage)
970 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
973 # now create multiple versions of the same file, and accumulate
974 # their shares, so we can mix and match them later.
975 d = defer.succeed(None)
976 d.addCallback(self._copy_shares, 0)
977 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
978 d.addCallback(self._copy_shares, 1)
979 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
980 d.addCallback(self._copy_shares, 2)
981 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
982 d.addCallback(self._copy_shares, 3)
983 # now we replace all the shares with version s3, and upload a new
984 # version to get s4b.
985 rollback = dict([(i,2) for i in range(10)])
986 d.addCallback(lambda res: self._set_versions(rollback))
987 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
988 d.addCallback(self._copy_shares, 4)
989 # we leave the storage in state 4
991 d.addCallback(_created)
995 def _copy_shares(self, ignored, index):
996 shares = self._storage._peers
997 # we need a deep copy
999 for peerid in shares:
1000 new_shares[peerid] = {}
1001 for shnum in shares[peerid]:
1002 new_shares[peerid][shnum] = shares[peerid][shnum]
1003 self._copied_shares[index] = new_shares
1005 def _set_versions(self, versionmap):
1006 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1007 # share to be at. Any shnum which is left out of the map will stay at
1008 # its current version.
1009 shares = self._storage._peers
1010 oldshares = self._copied_shares
1011 for peerid in shares:
1012 for shnum in shares[peerid]:
1013 if shnum in versionmap:
1014 index = versionmap[shnum]
1015 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1017 class Servermap(unittest.TestCase, PublishMixin):
1019 return self.publish_one()
1021 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1026 sb = self._storage_broker
1027 smu = ServermapUpdater(fn, sb, Monitor(),
1028 ServerMap(), mode, update_range=update_range)
1032 def update_servermap(self, oldmap, mode=MODE_CHECK):
1033 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1038 def failUnlessOneRecoverable(self, sm, num_shares):
1039 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1040 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1041 best = sm.best_recoverable_version()
1042 self.failIfEqual(best, None)
1043 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1044 self.failUnlessEqual(len(sm.shares_available()), 1)
1045 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1046 shnum, servers = sm.make_sharemap().items()[0]
1047 server = list(servers)[0]
1048 self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1049 self.failUnlessEqual(sm.version_on_server(server, 666), None)
1052 def test_basic(self):
1053 d = defer.succeed(None)
1054 ms = self.make_servermap
1055 us = self.update_servermap
1057 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1058 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1059 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1060 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1061 d.addCallback(lambda res: ms(mode=MODE_READ))
1062 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1063 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1064 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1065 # this mode stops at 'k' shares
1066 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1068 # and can we re-use the same servermap? Note that these are sorted in
1069 # increasing order of number of servers queried, since once a server
1070 # gets into the servermap, we'll always ask it for an update.
1071 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1072 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1073 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1074 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1075 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1076 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1077 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1078 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1079 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1083 def test_fetch_privkey(self):
1084 d = defer.succeed(None)
1085 # use the sibling filenode (which hasn't been used yet), and make
1086 # sure it can fetch the privkey. The file is small, so the privkey
1087 # will be fetched on the first (query) pass.
1088 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1089 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1091 # create a new file, which is large enough to knock the privkey out
1092 # of the early part of the file
1093 LARGE = "These are Larger contents" * 200 # about 5KB
1094 LARGE_uploadable = MutableData(LARGE)
1095 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1096 def _created(large_fn):
1097 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1098 return self.make_servermap(MODE_WRITE, large_fn2)
1099 d.addCallback(_created)
1100 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1104 def test_mark_bad(self):
1105 d = defer.succeed(None)
1106 ms = self.make_servermap
1108 d.addCallback(lambda res: ms(mode=MODE_READ))
1109 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1111 v = sm.best_recoverable_version()
1112 vm = sm.make_versionmap()
1113 shares = list(vm[v])
1114 self.failUnlessEqual(len(shares), 6)
1115 self._corrupted = set()
1116 # mark the first 5 shares as corrupt, then update the servermap.
1117 # The map should not have the marked shares it in any more, and
1118 # new shares should be found to replace the missing ones.
1119 for (shnum, server, timestamp) in shares:
1121 self._corrupted.add( (server, shnum) )
1122 sm.mark_bad_share(server, shnum, "")
1123 return self.update_servermap(sm, MODE_WRITE)
1124 d.addCallback(_made_map)
1126 # this should find all 5 shares that weren't marked bad
1127 v = sm.best_recoverable_version()
1128 vm = sm.make_versionmap()
1129 shares = list(vm[v])
1130 for (server, shnum) in self._corrupted:
1131 server_shares = sm.debug_shares_on_server(server)
1132 self.failIf(shnum in server_shares,
1133 "%d was in %s" % (shnum, server_shares))
1134 self.failUnlessEqual(len(shares), 5)
1135 d.addCallback(_check_map)
1138 def failUnlessNoneRecoverable(self, sm):
1139 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1140 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1141 best = sm.best_recoverable_version()
1142 self.failUnlessEqual(best, None)
1143 self.failUnlessEqual(len(sm.shares_available()), 0)
1145 def test_no_shares(self):
1146 self._storage._peers = {} # delete all shares
1147 ms = self.make_servermap
1148 d = defer.succeed(None)
1150 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1151 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1153 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1154 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1156 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1157 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1159 d.addCallback(lambda res: ms(mode=MODE_READ))
1160 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1164 def failUnlessNotQuiteEnough(self, sm):
1165 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1166 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1167 best = sm.best_recoverable_version()
1168 self.failUnlessEqual(best, None)
1169 self.failUnlessEqual(len(sm.shares_available()), 1)
1170 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1173 def test_not_quite_enough_shares(self):
1175 ms = self.make_servermap
1176 num_shares = len(s._peers)
1177 for peerid in s._peers:
1178 s._peers[peerid] = {}
1182 # now there ought to be only two shares left
1183 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1185 d = defer.succeed(None)
1187 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1188 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1189 d.addCallback(lambda sm:
1190 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1191 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1192 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1193 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1194 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1195 d.addCallback(lambda res: ms(mode=MODE_READ))
1196 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1201 def test_servermapupdater_finds_mdmf_files(self):
1202 # setUp already published an MDMF file for us. We just need to
1203 # make sure that when we run the ServermapUpdater, the file is
1204 # reported to have one recoverable version.
1205 d = defer.succeed(None)
1206 d.addCallback(lambda ignored:
1207 self.publish_mdmf())
1208 d.addCallback(lambda ignored:
1209 self.make_servermap(mode=MODE_CHECK))
1210 # Calling make_servermap also updates the servermap in the mode
1211 # that we specify, so we just need to see what it says.
1212 def _check_servermap(sm):
1213 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1214 d.addCallback(_check_servermap)
1218 def test_fetch_update(self):
1219 d = defer.succeed(None)
1220 d.addCallback(lambda ignored:
1221 self.publish_mdmf())
1222 d.addCallback(lambda ignored:
1223 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1224 def _check_servermap(sm):
1226 self.failUnlessEqual(len(sm.update_data), 10)
1228 for data in sm.update_data.itervalues():
1229 self.failUnlessEqual(len(data), 1)
1230 d.addCallback(_check_servermap)
1234 def test_servermapupdater_finds_sdmf_files(self):
1235 d = defer.succeed(None)
1236 d.addCallback(lambda ignored:
1237 self.publish_sdmf())
1238 d.addCallback(lambda ignored:
1239 self.make_servermap(mode=MODE_CHECK))
1240 d.addCallback(lambda servermap:
1241 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1245 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1247 return self.publish_one()
1249 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1251 oldmap = ServerMap()
1253 sb = self._storage_broker
1254 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1258 def abbrev_verinfo(self, verinfo):
1261 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1262 offsets_tuple) = verinfo
1263 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1265 def abbrev_verinfo_dict(self, verinfo_d):
1267 for verinfo,value in verinfo_d.items():
1268 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1269 offsets_tuple) = verinfo
1270 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1273 def dump_servermap(self, servermap):
1274 print "SERVERMAP", servermap
1275 print "RECOVERABLE", [self.abbrev_verinfo(v)
1276 for v in servermap.recoverable_versions()]
1277 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1278 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1280 def do_download(self, servermap, version=None):
1282 version = servermap.best_recoverable_version()
1283 r = Retrieve(self._fn, self._storage_broker, servermap, version)
1284 c = consumer.MemoryConsumer()
1285 d = r.download(consumer=c)
1286 d.addCallback(lambda mc: "".join(mc.chunks))
1290 def test_basic(self):
1291 d = self.make_servermap()
1292 def _do_retrieve(servermap):
1293 self._smap = servermap
1294 #self.dump_servermap(servermap)
1295 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1296 return self.do_download(servermap)
1297 d.addCallback(_do_retrieve)
1298 def _retrieved(new_contents):
1299 self.failUnlessEqual(new_contents, self.CONTENTS)
1300 d.addCallback(_retrieved)
1301 # we should be able to re-use the same servermap, both with and
1302 # without updating it.
1303 d.addCallback(lambda res: self.do_download(self._smap))
1304 d.addCallback(_retrieved)
1305 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1306 d.addCallback(lambda res: self.do_download(self._smap))
1307 d.addCallback(_retrieved)
1308 # clobbering the pubkey should make the servermap updater re-fetch it
1309 def _clobber_pubkey(res):
1310 self._fn._pubkey = None
1311 d.addCallback(_clobber_pubkey)
1312 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1313 d.addCallback(lambda res: self.do_download(self._smap))
1314 d.addCallback(_retrieved)
1317 def test_all_shares_vanished(self):
1318 d = self.make_servermap()
1319 def _remove_shares(servermap):
1320 for shares in self._storage._peers.values():
1322 d1 = self.shouldFail(NotEnoughSharesError,
1323 "test_all_shares_vanished",
1324 "ran out of servers",
1325 self.do_download, servermap)
1327 d.addCallback(_remove_shares)
1330 def test_no_servers(self):
1331 sb2 = make_storagebroker(num_peers=0)
1332 # if there are no servers, then a MODE_READ servermap should come
1334 d = self.make_servermap(sb=sb2)
1335 def _check_servermap(servermap):
1336 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1337 self.failIf(servermap.recoverable_versions())
1338 self.failIf(servermap.unrecoverable_versions())
1339 self.failIf(servermap.all_servers())
1340 d.addCallback(_check_servermap)
1343 def test_no_servers_download(self):
1344 sb2 = make_storagebroker(num_peers=0)
1345 self._fn._storage_broker = sb2
1346 d = self.shouldFail(UnrecoverableFileError,
1347 "test_no_servers_download",
1348 "no recoverable versions",
1349 self._fn.download_best_version)
1351 # a failed download that occurs while we aren't connected to
1352 # anybody should not prevent a subsequent download from working.
1353 # This isn't quite the webapi-driven test that #463 wants, but it
1354 # should be close enough.
1355 self._fn._storage_broker = self._storage_broker
1356 return self._fn.download_best_version()
1357 def _retrieved(new_contents):
1358 self.failUnlessEqual(new_contents, self.CONTENTS)
1359 d.addCallback(_restore)
1360 d.addCallback(_retrieved)
1364 def _test_corrupt_all(self, offset, substring,
1365 should_succeed=False,
1367 failure_checker=None,
1368 fetch_privkey=False):
1369 d = defer.succeed(None)
1371 d.addCallback(corrupt, self._storage, offset)
1372 d.addCallback(lambda res: self.make_servermap())
1373 if not corrupt_early:
1374 d.addCallback(corrupt, self._storage, offset)
1375 def _do_retrieve(servermap):
1376 ver = servermap.best_recoverable_version()
1377 if ver is None and not should_succeed:
1378 # no recoverable versions == not succeeding. The problem
1379 # should be noted in the servermap's list of problems.
1381 allproblems = [str(f) for f in servermap.get_problems()]
1382 self.failUnlessIn(substring, "".join(allproblems))
1385 d1 = self._fn.download_version(servermap, ver,
1387 d1.addCallback(lambda new_contents:
1388 self.failUnlessEqual(new_contents, self.CONTENTS))
1390 d1 = self.shouldFail(NotEnoughSharesError,
1391 "_corrupt_all(offset=%s)" % (offset,),
1393 self._fn.download_version, servermap,
1397 d1.addCallback(failure_checker)
1398 d1.addCallback(lambda res: servermap)
1400 d.addCallback(_do_retrieve)
1403 def test_corrupt_all_verbyte(self):
1404 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1405 # error in unpack_share().
1406 d = self._test_corrupt_all(0, "UnknownVersionError")
1407 def _check_servermap(servermap):
1408 # and the dump should mention the problems
1410 dump = servermap.dump(s).getvalue()
1411 self.failUnless("30 PROBLEMS" in dump, dump)
1412 d.addCallback(_check_servermap)
1415 def test_corrupt_all_seqnum(self):
1416 # a corrupt sequence number will trigger a bad signature
1417 return self._test_corrupt_all(1, "signature is invalid")
1419 def test_corrupt_all_R(self):
1420 # a corrupt root hash will trigger a bad signature
1421 return self._test_corrupt_all(9, "signature is invalid")
1423 def test_corrupt_all_IV(self):
1424 # a corrupt salt/IV will trigger a bad signature
1425 return self._test_corrupt_all(41, "signature is invalid")
1427 def test_corrupt_all_k(self):
1428 # a corrupt 'k' will trigger a bad signature
1429 return self._test_corrupt_all(57, "signature is invalid")
1431 def test_corrupt_all_N(self):
1432 # a corrupt 'N' will trigger a bad signature
1433 return self._test_corrupt_all(58, "signature is invalid")
1435 def test_corrupt_all_segsize(self):
1436 # a corrupt segsize will trigger a bad signature
1437 return self._test_corrupt_all(59, "signature is invalid")
1439 def test_corrupt_all_datalen(self):
1440 # a corrupt data length will trigger a bad signature
1441 return self._test_corrupt_all(67, "signature is invalid")
1443 def test_corrupt_all_pubkey(self):
1444 # a corrupt pubkey won't match the URI's fingerprint. We need to
1445 # remove the pubkey from the filenode, or else it won't bother trying
1447 self._fn._pubkey = None
1448 return self._test_corrupt_all("pubkey",
1449 "pubkey doesn't match fingerprint")
1451 def test_corrupt_all_sig(self):
1452 # a corrupt signature is a bad one
1453 # the signature runs from about [543:799], depending upon the length
1455 return self._test_corrupt_all("signature", "signature is invalid")
1457 def test_corrupt_all_share_hash_chain_number(self):
1458 # a corrupt share hash chain entry will show up as a bad hash. If we
1459 # mangle the first byte, that will look like a bad hash number,
1460 # causing an IndexError
1461 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1463 def test_corrupt_all_share_hash_chain_hash(self):
1464 # a corrupt share hash chain entry will show up as a bad hash. If we
1465 # mangle a few bytes in, that will look like a bad hash.
1466 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1468 def test_corrupt_all_block_hash_tree(self):
1469 return self._test_corrupt_all("block_hash_tree",
1470 "block hash tree failure")
1472 def test_corrupt_all_block(self):
1473 return self._test_corrupt_all("share_data", "block hash tree failure")
1475 def test_corrupt_all_encprivkey(self):
1476 # a corrupted privkey won't even be noticed by the reader, only by a
1478 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1481 def test_corrupt_all_encprivkey_late(self):
1482 # this should work for the same reason as above, but we corrupt
1483 # after the servermap update to exercise the error handling
1485 # We need to remove the privkey from the node, or the retrieve
1486 # process won't know to update it.
1487 self._fn._privkey = None
1488 return self._test_corrupt_all("enc_privkey",
1489 None, # this shouldn't fail
1490 should_succeed=True,
1491 corrupt_early=False,
1495 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1496 # just use a .todo because the failing-but-ignored test emits about 30kB
1498 def OFF_test_corrupt_all_seqnum_late(self):
1499 # corrupting the seqnum between mapupdate and retrieve should result
1500 # in NotEnoughSharesError, since each share will look invalid
1503 self.failUnless(f.check(NotEnoughSharesError))
1504 self.failUnless("uncoordinated write" in str(f))
1505 return self._test_corrupt_all(1, "ran out of servers",
1506 corrupt_early=False,
1507 failure_checker=_check)
1509 def test_corrupt_all_block_hash_tree_late(self):
1512 self.failUnless(f.check(NotEnoughSharesError))
1513 return self._test_corrupt_all("block_hash_tree",
1514 "block hash tree failure",
1515 corrupt_early=False,
1516 failure_checker=_check)
1519 def test_corrupt_all_block_late(self):
1522 self.failUnless(f.check(NotEnoughSharesError))
1523 return self._test_corrupt_all("share_data", "block hash tree failure",
1524 corrupt_early=False,
1525 failure_checker=_check)
1528 def test_basic_pubkey_at_end(self):
1529 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1530 # download to succeed but forcing a bunch of retries first. Note that
1531 # this is rather pessimistic: our Retrieve process will throw away
1532 # the whole share if the pubkey is bad, even though the rest of the
1533 # share might be good.
1535 self._fn._pubkey = None
1536 k = self._fn.get_required_shares()
1537 N = self._fn.get_total_shares()
1538 d = defer.succeed(None)
1539 d.addCallback(corrupt, self._storage, "pubkey",
1540 shnums_to_corrupt=range(0, N-k))
1541 d.addCallback(lambda res: self.make_servermap())
1542 def _do_retrieve(servermap):
1543 self.failUnless(servermap.get_problems())
1544 self.failUnless("pubkey doesn't match fingerprint"
1545 in str(servermap.get_problems()[0]))
1546 ver = servermap.best_recoverable_version()
1547 r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1548 c = consumer.MemoryConsumer()
1549 return r.download(c)
1550 d.addCallback(_do_retrieve)
1551 d.addCallback(lambda mc: "".join(mc.chunks))
1552 d.addCallback(lambda new_contents:
1553 self.failUnlessEqual(new_contents, self.CONTENTS))
1557 def _test_corrupt_some(self, offset, mdmf=False):
1559 d = self.publish_mdmf()
1561 d = defer.succeed(None)
1562 d.addCallback(lambda ignored:
1563 corrupt(None, self._storage, offset, range(5)))
1564 d.addCallback(lambda ignored:
1565 self.make_servermap())
1566 def _do_retrieve(servermap):
1567 ver = servermap.best_recoverable_version()
1568 self.failUnless(ver)
1569 return self._fn.download_best_version()
1570 d.addCallback(_do_retrieve)
1571 d.addCallback(lambda new_contents:
1572 self.failUnlessEqual(new_contents, self.CONTENTS))
1576 def test_corrupt_some(self):
1577 # corrupt the data of first five shares (so the servermap thinks
1578 # they're good but retrieve marks them as bad), so that the
1579 # MODE_READ set of 6 will be insufficient, forcing node.download to
1580 # retry with more servers.
1581 return self._test_corrupt_some("share_data")
1584 def test_download_fails(self):
1585 d = corrupt(None, self._storage, "signature")
1586 d.addCallback(lambda ignored:
1587 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1588 "no recoverable versions",
1589 self._fn.download_best_version))
1594 def test_corrupt_mdmf_block_hash_tree(self):
1595 d = self.publish_mdmf()
1596 d.addCallback(lambda ignored:
1597 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1598 "block hash tree failure",
1599 corrupt_early=False,
1600 should_succeed=False))
1604 def test_corrupt_mdmf_block_hash_tree_late(self):
1605 d = self.publish_mdmf()
1606 d.addCallback(lambda ignored:
1607 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1608 "block hash tree failure",
1610 should_succeed=False))
1614 def test_corrupt_mdmf_share_data(self):
1615 d = self.publish_mdmf()
1616 d.addCallback(lambda ignored:
1617 # TODO: Find out what the block size is and corrupt a
1618 # specific block, rather than just guessing.
1619 self._test_corrupt_all(("share_data", 12 * 40),
1620 "block hash tree failure",
1622 should_succeed=False))
1626 def test_corrupt_some_mdmf(self):
1627 return self._test_corrupt_some(("share_data", 12 * 40),
1632 def check_good(self, r, where):
1633 self.failUnless(r.is_healthy(), where)
1636 def check_bad(self, r, where):
1637 self.failIf(r.is_healthy(), where)
1640 def check_expected_failure(self, r, expected_exception, substring, where):
1641 for (peerid, storage_index, shnum, f) in r.problems:
1642 if f.check(expected_exception):
1643 self.failUnless(substring in str(f),
1644 "%s: substring '%s' not in '%s'" %
1645 (where, substring, str(f)))
1647 self.fail("%s: didn't see expected exception %s in problems %s" %
1648 (where, expected_exception, r.problems))
1651 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1653 return self.publish_one()
1656 def test_check_good(self):
1657 d = self._fn.check(Monitor())
1658 d.addCallback(self.check_good, "test_check_good")
1661 def test_check_mdmf_good(self):
1662 d = self.publish_mdmf()
1663 d.addCallback(lambda ignored:
1664 self._fn.check(Monitor()))
1665 d.addCallback(self.check_good, "test_check_mdmf_good")
1668 def test_check_no_shares(self):
1669 for shares in self._storage._peers.values():
1671 d = self._fn.check(Monitor())
1672 d.addCallback(self.check_bad, "test_check_no_shares")
1675 def test_check_mdmf_no_shares(self):
1676 d = self.publish_mdmf()
1678 for share in self._storage._peers.values():
1680 d.addCallback(_then)
1681 d.addCallback(lambda ignored:
1682 self._fn.check(Monitor()))
1683 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1686 def test_check_not_enough_shares(self):
1687 for shares in self._storage._peers.values():
1688 for shnum in shares.keys():
1691 d = self._fn.check(Monitor())
1692 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1695 def test_check_mdmf_not_enough_shares(self):
1696 d = self.publish_mdmf()
1698 for shares in self._storage._peers.values():
1699 for shnum in shares.keys():
1702 d.addCallback(_then)
1703 d.addCallback(lambda ignored:
1704 self._fn.check(Monitor()))
1705 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1709 def test_check_all_bad_sig(self):
1710 d = corrupt(None, self._storage, 1) # bad sig
1711 d.addCallback(lambda ignored:
1712 self._fn.check(Monitor()))
1713 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1716 def test_check_mdmf_all_bad_sig(self):
1717 d = self.publish_mdmf()
1718 d.addCallback(lambda ignored:
1719 corrupt(None, self._storage, 1))
1720 d.addCallback(lambda ignored:
1721 self._fn.check(Monitor()))
1722 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1725 def test_check_all_bad_blocks(self):
1726 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1727 # the Checker won't notice this.. it doesn't look at actual data
1728 d.addCallback(lambda ignored:
1729 self._fn.check(Monitor()))
1730 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1734 def test_check_mdmf_all_bad_blocks(self):
1735 d = self.publish_mdmf()
1736 d.addCallback(lambda ignored:
1737 corrupt(None, self._storage, "share_data"))
1738 d.addCallback(lambda ignored:
1739 self._fn.check(Monitor()))
1740 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1743 def test_verify_good(self):
1744 d = self._fn.check(Monitor(), verify=True)
1745 d.addCallback(self.check_good, "test_verify_good")
1748 def test_verify_all_bad_sig(self):
1749 d = corrupt(None, self._storage, 1) # bad sig
1750 d.addCallback(lambda ignored:
1751 self._fn.check(Monitor(), verify=True))
1752 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1755 def test_verify_one_bad_sig(self):
1756 d = corrupt(None, self._storage, 1, [9]) # bad sig
1757 d.addCallback(lambda ignored:
1758 self._fn.check(Monitor(), verify=True))
1759 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1762 def test_verify_one_bad_block(self):
1763 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1764 # the Verifier *will* notice this, since it examines every byte
1765 d.addCallback(lambda ignored:
1766 self._fn.check(Monitor(), verify=True))
1767 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1768 d.addCallback(self.check_expected_failure,
1769 CorruptShareError, "block hash tree failure",
1770 "test_verify_one_bad_block")
1773 def test_verify_one_bad_sharehash(self):
1774 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1775 d.addCallback(lambda ignored:
1776 self._fn.check(Monitor(), verify=True))
1777 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1778 d.addCallback(self.check_expected_failure,
1779 CorruptShareError, "corrupt hashes",
1780 "test_verify_one_bad_sharehash")
1783 def test_verify_one_bad_encprivkey(self):
1784 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1785 d.addCallback(lambda ignored:
1786 self._fn.check(Monitor(), verify=True))
1787 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1788 d.addCallback(self.check_expected_failure,
1789 CorruptShareError, "invalid privkey",
1790 "test_verify_one_bad_encprivkey")
1793 def test_verify_one_bad_encprivkey_uncheckable(self):
1794 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1795 readonly_fn = self._fn.get_readonly()
1796 # a read-only node has no way to validate the privkey
1797 d.addCallback(lambda ignored:
1798 readonly_fn.check(Monitor(), verify=True))
1799 d.addCallback(self.check_good,
1800 "test_verify_one_bad_encprivkey_uncheckable")
1804 def test_verify_mdmf_good(self):
1805 d = self.publish_mdmf()
1806 d.addCallback(lambda ignored:
1807 self._fn.check(Monitor(), verify=True))
1808 d.addCallback(self.check_good, "test_verify_mdmf_good")
1812 def test_verify_mdmf_one_bad_block(self):
1813 d = self.publish_mdmf()
1814 d.addCallback(lambda ignored:
1815 corrupt(None, self._storage, "share_data", [1]))
1816 d.addCallback(lambda ignored:
1817 self._fn.check(Monitor(), verify=True))
1818 # We should find one bad block here
1819 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1820 d.addCallback(self.check_expected_failure,
1821 CorruptShareError, "block hash tree failure",
1822 "test_verify_mdmf_one_bad_block")
1826 def test_verify_mdmf_bad_encprivkey(self):
1827 d = self.publish_mdmf()
1828 d.addCallback(lambda ignored:
1829 corrupt(None, self._storage, "enc_privkey", [0]))
1830 d.addCallback(lambda ignored:
1831 self._fn.check(Monitor(), verify=True))
1832 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1833 d.addCallback(self.check_expected_failure,
1834 CorruptShareError, "privkey",
1835 "test_verify_mdmf_bad_encprivkey")
1839 def test_verify_mdmf_bad_sig(self):
1840 d = self.publish_mdmf()
1841 d.addCallback(lambda ignored:
1842 corrupt(None, self._storage, 1, [1]))
1843 d.addCallback(lambda ignored:
1844 self._fn.check(Monitor(), verify=True))
1845 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1849 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1850 d = self.publish_mdmf()
1851 d.addCallback(lambda ignored:
1852 corrupt(None, self._storage, "enc_privkey", [1]))
1853 d.addCallback(lambda ignored:
1854 self._fn.get_readonly())
1855 d.addCallback(lambda fn:
1856 fn.check(Monitor(), verify=True))
1857 d.addCallback(self.check_good,
1858 "test_verify_mdmf_bad_encprivkey_uncheckable")
1862 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1864 def get_shares(self, s):
1865 all_shares = {} # maps (peerid, shnum) to share data
1866 for peerid in s._peers:
1867 shares = s._peers[peerid]
1868 for shnum in shares:
1869 data = shares[shnum]
1870 all_shares[ (peerid, shnum) ] = data
1873 def copy_shares(self, ignored=None):
1874 self.old_shares.append(self.get_shares(self._storage))
1876 def test_repair_nop(self):
1877 self.old_shares = []
1878 d = self.publish_one()
1879 d.addCallback(self.copy_shares)
1880 d.addCallback(lambda res: self._fn.check(Monitor()))
1881 d.addCallback(lambda check_results: self._fn.repair(check_results))
1882 def _check_results(rres):
1883 self.failUnless(IRepairResults.providedBy(rres))
1884 self.failUnless(rres.get_successful())
1885 # TODO: examine results
1889 initial_shares = self.old_shares[0]
1890 new_shares = self.old_shares[1]
1891 # TODO: this really shouldn't change anything. When we implement
1892 # a "minimal-bandwidth" repairer", change this test to assert:
1893 #self.failUnlessEqual(new_shares, initial_shares)
1895 # all shares should be in the same place as before
1896 self.failUnlessEqual(set(initial_shares.keys()),
1897 set(new_shares.keys()))
1898 # but they should all be at a newer seqnum. The IV will be
1899 # different, so the roothash will be too.
1900 for key in initial_shares:
1905 k0, N0, segsize0, datalen0,
1906 o0) = unpack_header(initial_shares[key])
1911 k1, N1, segsize1, datalen1,
1912 o1) = unpack_header(new_shares[key])
1913 self.failUnlessEqual(version0, version1)
1914 self.failUnlessEqual(seqnum0+1, seqnum1)
1915 self.failUnlessEqual(k0, k1)
1916 self.failUnlessEqual(N0, N1)
1917 self.failUnlessEqual(segsize0, segsize1)
1918 self.failUnlessEqual(datalen0, datalen1)
1919 d.addCallback(_check_results)
1922 def failIfSharesChanged(self, ignored=None):
1923 old_shares = self.old_shares[-2]
1924 current_shares = self.old_shares[-1]
1925 self.failUnlessEqual(old_shares, current_shares)
1928 def test_unrepairable_0shares(self):
1929 d = self.publish_one()
1930 def _delete_all_shares(ign):
1931 shares = self._storage._peers
1932 for peerid in shares:
1934 d.addCallback(_delete_all_shares)
1935 d.addCallback(lambda ign: self._fn.check(Monitor()))
1936 d.addCallback(lambda check_results: self._fn.repair(check_results))
1938 self.failUnlessEqual(crr.get_successful(), False)
1939 d.addCallback(_check)
1942 def test_mdmf_unrepairable_0shares(self):
1943 d = self.publish_mdmf()
1944 def _delete_all_shares(ign):
1945 shares = self._storage._peers
1946 for peerid in shares:
1948 d.addCallback(_delete_all_shares)
1949 d.addCallback(lambda ign: self._fn.check(Monitor()))
1950 d.addCallback(lambda check_results: self._fn.repair(check_results))
1951 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
1955 def test_unrepairable_1share(self):
1956 d = self.publish_one()
1957 def _delete_all_shares(ign):
1958 shares = self._storage._peers
1959 for peerid in shares:
1960 for shnum in list(shares[peerid]):
1962 del shares[peerid][shnum]
1963 d.addCallback(_delete_all_shares)
1964 d.addCallback(lambda ign: self._fn.check(Monitor()))
1965 d.addCallback(lambda check_results: self._fn.repair(check_results))
1967 self.failUnlessEqual(crr.get_successful(), False)
1968 d.addCallback(_check)
1971 def test_mdmf_unrepairable_1share(self):
1972 d = self.publish_mdmf()
1973 def _delete_all_shares(ign):
1974 shares = self._storage._peers
1975 for peerid in shares:
1976 for shnum in list(shares[peerid]):
1978 del shares[peerid][shnum]
1979 d.addCallback(_delete_all_shares)
1980 d.addCallback(lambda ign: self._fn.check(Monitor()))
1981 d.addCallback(lambda check_results: self._fn.repair(check_results))
1983 self.failUnlessEqual(crr.get_successful(), False)
1984 d.addCallback(_check)
1987 def test_repairable_5shares(self):
1988 d = self.publish_mdmf()
1989 def _delete_all_shares(ign):
1990 shares = self._storage._peers
1991 for peerid in shares:
1992 for shnum in list(shares[peerid]):
1994 del shares[peerid][shnum]
1995 d.addCallback(_delete_all_shares)
1996 d.addCallback(lambda ign: self._fn.check(Monitor()))
1997 d.addCallback(lambda check_results: self._fn.repair(check_results))
1999 self.failUnlessEqual(crr.get_successful(), True)
2000 d.addCallback(_check)
2003 def test_mdmf_repairable_5shares(self):
2004 d = self.publish_mdmf()
2005 def _delete_some_shares(ign):
2006 shares = self._storage._peers
2007 for peerid in shares:
2008 for shnum in list(shares[peerid]):
2010 del shares[peerid][shnum]
2011 d.addCallback(_delete_some_shares)
2012 d.addCallback(lambda ign: self._fn.check(Monitor()))
2014 self.failIf(cr.is_healthy())
2015 self.failUnless(cr.is_recoverable())
2017 d.addCallback(_check)
2018 d.addCallback(lambda check_results: self._fn.repair(check_results))
2020 self.failUnlessEqual(crr.get_successful(), True)
2021 d.addCallback(_check1)
2025 def test_merge(self):
2026 self.old_shares = []
2027 d = self.publish_multiple()
2028 # repair will refuse to merge multiple highest seqnums unless you
2030 d.addCallback(lambda res:
2031 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2032 1:4,3:4,5:4,7:4,9:4}))
2033 d.addCallback(self.copy_shares)
2034 d.addCallback(lambda res: self._fn.check(Monitor()))
2035 def _try_repair(check_results):
2036 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2037 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2038 self._fn.repair, check_results)
2039 d2.addCallback(self.copy_shares)
2040 d2.addCallback(self.failIfSharesChanged)
2041 d2.addCallback(lambda res: check_results)
2043 d.addCallback(_try_repair)
2044 d.addCallback(lambda check_results:
2045 self._fn.repair(check_results, force=True))
2046 # this should give us 10 shares of the highest roothash
2047 def _check_repair_results(rres):
2048 self.failUnless(rres.get_successful())
2050 d.addCallback(_check_repair_results)
2051 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2052 def _check_smap(smap):
2053 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2054 self.failIf(smap.unrecoverable_versions())
2055 # now, which should have won?
2056 roothash_s4a = self.get_roothash_for(3)
2057 roothash_s4b = self.get_roothash_for(4)
2058 if roothash_s4b > roothash_s4a:
2059 expected_contents = self.CONTENTS[4]
2061 expected_contents = self.CONTENTS[3]
2062 new_versionid = smap.best_recoverable_version()
2063 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2064 d2 = self._fn.download_version(smap, new_versionid)
2065 d2.addCallback(self.failUnlessEqual, expected_contents)
2067 d.addCallback(_check_smap)
2070 def test_non_merge(self):
2071 self.old_shares = []
2072 d = self.publish_multiple()
2073 # repair should not refuse a repair that doesn't need to merge. In
2074 # this case, we combine v2 with v3. The repair should ignore v2 and
2075 # copy v3 into a new v5.
2076 d.addCallback(lambda res:
2077 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2078 1:3,3:3,5:3,7:3,9:3}))
2079 d.addCallback(lambda res: self._fn.check(Monitor()))
2080 d.addCallback(lambda check_results: self._fn.repair(check_results))
2081 # this should give us 10 shares of v3
2082 def _check_repair_results(rres):
2083 self.failUnless(rres.get_successful())
2085 d.addCallback(_check_repair_results)
2086 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2087 def _check_smap(smap):
2088 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2089 self.failIf(smap.unrecoverable_versions())
2090 # now, which should have won?
2091 expected_contents = self.CONTENTS[3]
2092 new_versionid = smap.best_recoverable_version()
2093 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2094 d2 = self._fn.download_version(smap, new_versionid)
2095 d2.addCallback(self.failUnlessEqual, expected_contents)
2097 d.addCallback(_check_smap)
2100 def get_roothash_for(self, index):
2101 # return the roothash for the first share we see in the saved set
2102 shares = self._copied_shares[index]
2103 for peerid in shares:
2104 for shnum in shares[peerid]:
2105 share = shares[peerid][shnum]
2106 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2107 unpack_header(share)
2110 def test_check_and_repair_readcap(self):
2111 # we can't currently repair from a mutable readcap: #625
2112 self.old_shares = []
2113 d = self.publish_one()
2114 d.addCallback(self.copy_shares)
2115 def _get_readcap(res):
2116 self._fn3 = self._fn.get_readonly()
2117 # also delete some shares
2118 for peerid,shares in self._storage._peers.items():
2120 d.addCallback(_get_readcap)
2121 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2122 def _check_results(crr):
2123 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2124 # we should detect the unhealthy, but skip over mutable-readcap
2125 # repairs until #625 is fixed
2126 self.failIf(crr.get_pre_repair_results().is_healthy())
2127 self.failIf(crr.get_repair_attempted())
2128 self.failIf(crr.get_post_repair_results().is_healthy())
2129 d.addCallback(_check_results)
2132 class DevNullDictionary(dict):
2133 def __setitem__(self, key, value):
2136 class MultipleEncodings(unittest.TestCase):
2138 self.CONTENTS = "New contents go here"
2139 self.uploadable = MutableData(self.CONTENTS)
2140 self._storage = FakeStorage()
2141 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2142 self._storage_broker = self._nodemaker.storage_broker
2143 d = self._nodemaker.create_mutable_file(self.uploadable)
2146 d.addCallback(_created)
2149 def _encode(self, k, n, data, version=SDMF_VERSION):
2150 # encode 'data' into a peerid->shares dict.
2153 # disable the nodecache, since for these tests we explicitly need
2154 # multiple nodes pointing at the same file
2155 self._nodemaker._node_cache = DevNullDictionary()
2156 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2157 # then we copy over other fields that are normally fetched from the
2159 fn2._pubkey = fn._pubkey
2160 fn2._privkey = fn._privkey
2161 fn2._encprivkey = fn._encprivkey
2162 # and set the encoding parameters to something completely different
2163 fn2._required_shares = k
2164 fn2._total_shares = n
2167 s._peers = {} # clear existing storage
2168 p2 = Publish(fn2, self._storage_broker, None)
2169 uploadable = MutableData(data)
2170 d = p2.publish(uploadable)
2171 def _published(res):
2175 d.addCallback(_published)
2178 def make_servermap(self, mode=MODE_READ, oldmap=None):
2180 oldmap = ServerMap()
2181 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2186 def test_multiple_encodings(self):
2187 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2188 # then mix up the shares, to make sure that download survives seeing
2189 # a variety of encodings. This is actually kind of tricky to set up.
2191 contents1 = "Contents for encoding 1 (3-of-10) go here"
2192 contents2 = "Contents for encoding 2 (4-of-9) go here"
2193 contents3 = "Contents for encoding 3 (4-of-7) go here"
2195 # we make a retrieval object that doesn't know what encoding
2197 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2199 # now we upload a file through fn1, and grab its shares
2200 d = self._encode(3, 10, contents1)
2201 def _encoded_1(shares):
2202 self._shares1 = shares
2203 d.addCallback(_encoded_1)
2204 d.addCallback(lambda res: self._encode(4, 9, contents2))
2205 def _encoded_2(shares):
2206 self._shares2 = shares
2207 d.addCallback(_encoded_2)
2208 d.addCallback(lambda res: self._encode(4, 7, contents3))
2209 def _encoded_3(shares):
2210 self._shares3 = shares
2211 d.addCallback(_encoded_3)
2214 log.msg("merging sharelists")
2215 # we merge the shares from the two sets, leaving each shnum in
2216 # its original location, but using a share from set1 or set2
2217 # according to the following sequence:
2228 # so that neither form can be recovered until fetch [f], at which
2229 # point version-s1 (the 3-of-10 form) should be recoverable. If
2230 # the implementation latches on to the first version it sees,
2231 # then s2 will be recoverable at fetch [g].
2233 # Later, when we implement code that handles multiple versions,
2234 # we can use this framework to assert that all recoverable
2235 # versions are retrieved, and test that 'epsilon' does its job
2237 places = [2, 2, 3, 2, 1, 1, 1, 2]
2240 sb = self._storage_broker
2242 for peerid in sorted(sb.get_all_serverids()):
2243 for shnum in self._shares1.get(peerid, {}):
2244 if shnum < len(places):
2245 which = places[shnum]
2248 self._storage._peers[peerid] = peers = {}
2249 in_1 = shnum in self._shares1[peerid]
2250 in_2 = shnum in self._shares2.get(peerid, {})
2251 in_3 = shnum in self._shares3.get(peerid, {})
2254 peers[shnum] = self._shares1[peerid][shnum]
2255 sharemap[shnum] = peerid
2258 peers[shnum] = self._shares2[peerid][shnum]
2259 sharemap[shnum] = peerid
2262 peers[shnum] = self._shares3[peerid][shnum]
2263 sharemap[shnum] = peerid
2265 # we don't bother placing any other shares
2266 # now sort the sequence so that share 0 is returned first
2267 new_sequence = [sharemap[shnum]
2268 for shnum in sorted(sharemap.keys())]
2269 self._storage._sequence = new_sequence
2270 log.msg("merge done")
2271 d.addCallback(_merge)
2272 d.addCallback(lambda res: fn3.download_best_version())
2273 def _retrieved(new_contents):
2274 # the current specified behavior is "first version recoverable"
2275 self.failUnlessEqual(new_contents, contents1)
2276 d.addCallback(_retrieved)
2280 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2283 return self.publish_multiple()
2285 def test_multiple_versions(self):
2286 # if we see a mix of versions in the grid, download_best_version
2287 # should get the latest one
2288 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2289 d = self._fn.download_best_version()
2290 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2291 # and the checker should report problems
2292 d.addCallback(lambda res: self._fn.check(Monitor()))
2293 d.addCallback(self.check_bad, "test_multiple_versions")
2295 # but if everything is at version 2, that's what we should download
2296 d.addCallback(lambda res:
2297 self._set_versions(dict([(i,2) for i in range(10)])))
2298 d.addCallback(lambda res: self._fn.download_best_version())
2299 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2300 # if exactly one share is at version 3, we should still get v2
2301 d.addCallback(lambda res:
2302 self._set_versions({0:3}))
2303 d.addCallback(lambda res: self._fn.download_best_version())
2304 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2305 # but the servermap should see the unrecoverable version. This
2306 # depends upon the single newer share being queried early.
2307 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2308 def _check_smap(smap):
2309 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2310 newer = smap.unrecoverable_newer_versions()
2311 self.failUnlessEqual(len(newer), 1)
2312 verinfo, health = newer.items()[0]
2313 self.failUnlessEqual(verinfo[0], 4)
2314 self.failUnlessEqual(health, (1,3))
2315 self.failIf(smap.needs_merge())
2316 d.addCallback(_check_smap)
2317 # if we have a mix of two parallel versions (s4a and s4b), we could
2319 d.addCallback(lambda res:
2320 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2321 1:4,3:4,5:4,7:4,9:4}))
2322 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2323 def _check_smap_mixed(smap):
2324 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2325 newer = smap.unrecoverable_newer_versions()
2326 self.failUnlessEqual(len(newer), 0)
2327 self.failUnless(smap.needs_merge())
2328 d.addCallback(_check_smap_mixed)
2329 d.addCallback(lambda res: self._fn.download_best_version())
2330 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2331 res == self.CONTENTS[4]))
2334 def test_replace(self):
2335 # if we see a mix of versions in the grid, we should be able to
2336 # replace them all with a newer version
2338 # if exactly one share is at version 3, we should download (and
2339 # replace) v2, and the result should be v4. Note that the index we
2340 # give to _set_versions is different than the sequence number.
2341 target = dict([(i,2) for i in range(10)]) # seqnum3
2342 target[0] = 3 # seqnum4
2343 self._set_versions(target)
2345 def _modify(oldversion, servermap, first_time):
2346 return oldversion + " modified"
2347 d = self._fn.modify(_modify)
2348 d.addCallback(lambda res: self._fn.download_best_version())
2349 expected = self.CONTENTS[2] + " modified"
2350 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2351 # and the servermap should indicate that the outlier was replaced too
2352 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2353 def _check_smap(smap):
2354 self.failUnlessEqual(smap.highest_seqnum(), 5)
2355 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2356 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2357 d.addCallback(_check_smap)
2361 class Utils(unittest.TestCase):
2362 def test_cache(self):
2364 # xdata = base62.b2a(os.urandom(100))[:100]
2365 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2366 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2367 c.add("v1", 1, 0, xdata)
2368 c.add("v1", 1, 2000, ydata)
2369 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2370 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2371 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2372 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2373 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2374 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2375 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2376 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2377 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2378 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2379 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2380 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2381 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2382 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2383 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2384 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2385 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2386 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2388 # test joining fragments
2390 c.add("v1", 1, 0, xdata[:10])
2391 c.add("v1", 1, 10, xdata[10:20])
2392 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2394 class Exceptions(unittest.TestCase):
2395 def test_repr(self):
2396 nmde = NeedMoreDataError(100, 50, 100)
2397 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2398 ucwe = UncoordinatedWriteError()
2399 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2401 class SameKeyGenerator:
2402 def __init__(self, pubkey, privkey):
2403 self.pubkey = pubkey
2404 self.privkey = privkey
2405 def generate(self, keysize=None):
2406 return defer.succeed( (self.pubkey, self.privkey) )
2408 class FirstServerGetsKilled:
2410 def notify(self, retval, wrapper, methname):
2412 wrapper.broken = True
2416 class FirstServerGetsDeleted:
2419 self.silenced = None
2420 def notify(self, retval, wrapper, methname):
2422 # this query will work, but later queries should think the share
2425 self.silenced = wrapper
2427 if wrapper == self.silenced:
2428 assert methname == "slot_testv_and_readv_and_writev"
2432 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2433 def do_publish_surprise(self, version):
2434 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2436 nm = self.g.clients[0].nodemaker
2437 d = nm.create_mutable_file(MutableData("contents 1"),
2440 d = defer.succeed(None)
2441 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2442 def _got_smap1(smap):
2443 # stash the old state of the file
2445 d.addCallback(_got_smap1)
2446 # then modify the file, leaving the old map untouched
2447 d.addCallback(lambda res: log.msg("starting winning write"))
2448 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2449 # now attempt to modify the file with the old servermap. This
2450 # will look just like an uncoordinated write, in which every
2451 # single share got updated between our mapupdate and our publish
2452 d.addCallback(lambda res: log.msg("starting doomed write"))
2453 d.addCallback(lambda res:
2454 self.shouldFail(UncoordinatedWriteError,
2455 "test_publish_surprise", None,
2457 MutableData("contents 2a"), self.old_map))
2459 d.addCallback(_created)
2462 def test_publish_surprise_sdmf(self):
2463 return self.do_publish_surprise(SDMF_VERSION)
2465 def test_publish_surprise_mdmf(self):
2466 return self.do_publish_surprise(MDMF_VERSION)
2468 def test_retrieve_surprise(self):
2469 self.basedir = "mutable/Problems/test_retrieve_surprise"
2471 nm = self.g.clients[0].nodemaker
2472 d = nm.create_mutable_file(MutableData("contents 1"))
2474 d = defer.succeed(None)
2475 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2476 def _got_smap1(smap):
2477 # stash the old state of the file
2479 d.addCallback(_got_smap1)
2480 # then modify the file, leaving the old map untouched
2481 d.addCallback(lambda res: log.msg("starting winning write"))
2482 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2483 # now attempt to retrieve the old version with the old servermap.
2484 # This will look like someone has changed the file since we
2485 # updated the servermap.
2486 d.addCallback(lambda res: n._cache._clear())
2487 d.addCallback(lambda res: log.msg("starting doomed read"))
2488 d.addCallback(lambda res:
2489 self.shouldFail(NotEnoughSharesError,
2490 "test_retrieve_surprise",
2491 "ran out of servers: have 0 of 1",
2494 self.old_map.best_recoverable_version(),
2497 d.addCallback(_created)
2501 def test_unexpected_shares(self):
2502 # upload the file, take a servermap, shut down one of the servers,
2503 # upload it again (causing shares to appear on a new server), then
2504 # upload using the old servermap. The last upload should fail with an
2505 # UncoordinatedWriteError, because of the shares that didn't appear
2507 self.basedir = "mutable/Problems/test_unexpected_shares"
2509 nm = self.g.clients[0].nodemaker
2510 d = nm.create_mutable_file(MutableData("contents 1"))
2512 d = defer.succeed(None)
2513 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2514 def _got_smap1(smap):
2515 # stash the old state of the file
2517 # now shut down one of the servers
2518 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2519 self.g.remove_server(peer0)
2520 # then modify the file, leaving the old map untouched
2521 log.msg("starting winning write")
2522 return n.overwrite(MutableData("contents 2"))
2523 d.addCallback(_got_smap1)
2524 # now attempt to modify the file with the old servermap. This
2525 # will look just like an uncoordinated write, in which every
2526 # single share got updated between our mapupdate and our publish
2527 d.addCallback(lambda res: log.msg("starting doomed write"))
2528 d.addCallback(lambda res:
2529 self.shouldFail(UncoordinatedWriteError,
2530 "test_surprise", None,
2532 MutableData("contents 2a"), self.old_map))
2534 d.addCallback(_created)
2537 def test_multiply_placed_shares(self):
2538 self.basedir = "mutable/Problems/test_multiply_placed_shares"
2540 nm = self.g.clients[0].nodemaker
2541 d = nm.create_mutable_file(MutableData("contents 1"))
2542 # remove one of the servers and reupload the file.
2546 servers = self.g.get_all_serverids()
2547 self.ss = self.g.remove_server(servers[len(servers)-1])
2549 new_server = self.g.make_server(len(servers)-1)
2550 self.g.add_server(len(servers)-1, new_server)
2552 return self._node.download_best_version()
2553 d.addCallback(_created)
2554 d.addCallback(lambda data: MutableData(data))
2555 d.addCallback(lambda data: self._node.overwrite(data))
2557 # restore the server we removed earlier, then download+upload
2559 def _overwritten(ign):
2560 self.g.add_server(len(self.g.servers_by_number), self.ss)
2561 return self._node.download_best_version()
2562 d.addCallback(_overwritten)
2563 d.addCallback(lambda data: MutableData(data))
2564 d.addCallback(lambda data: self._node.overwrite(data))
2565 d.addCallback(lambda ignored:
2566 self._node.get_servermap(MODE_CHECK))
2567 def _overwritten_again(smap):
2568 # Make sure that all shares were updated by making sure that
2569 # there aren't any other versions in the sharemap.
2570 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2571 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2572 d.addCallback(_overwritten_again)
2575 def test_bad_server(self):
2576 # Break one server, then create the file: the initial publish should
2577 # complete with an alternate server. Breaking a second server should
2578 # not prevent an update from succeeding either.
2579 self.basedir = "mutable/Problems/test_bad_server"
2581 nm = self.g.clients[0].nodemaker
2583 # to make sure that one of the initial peers is broken, we have to
2584 # get creative. We create an RSA key and compute its storage-index.
2585 # Then we make a KeyGenerator that always returns that one key, and
2586 # use it to create the mutable file. This will get easier when we can
2587 # use #467 static-server-selection to disable permutation and force
2588 # the choice of server for share[0].
2590 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2591 def _got_key( (pubkey, privkey) ):
2592 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2593 pubkey_s = pubkey.serialize()
2594 privkey_s = privkey.serialize()
2595 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2596 ssk_pubkey_fingerprint_hash(pubkey_s))
2597 self._storage_index = u.get_storage_index()
2598 d.addCallback(_got_key)
2599 def _break_peer0(res):
2600 si = self._storage_index
2601 servers = nm.storage_broker.get_servers_for_psi(si)
2602 self.g.break_server(servers[0].get_serverid())
2603 self.server1 = servers[1]
2604 d.addCallback(_break_peer0)
2605 # now "create" the file, using the pre-established key, and let the
2606 # initial publish finally happen
2607 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2608 # that ought to work
2610 d = n.download_best_version()
2611 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2612 # now break the second peer
2613 def _break_peer1(res):
2614 self.g.break_server(self.server1.get_serverid())
2615 d.addCallback(_break_peer1)
2616 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2617 # that ought to work too
2618 d.addCallback(lambda res: n.download_best_version())
2619 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2620 def _explain_error(f):
2622 if f.check(NotEnoughServersError):
2623 print "first_error:", f.value.first_error
2625 d.addErrback(_explain_error)
2627 d.addCallback(_got_node)
2630 def test_bad_server_overlap(self):
2631 # like test_bad_server, but with no extra unused servers to fall back
2632 # upon. This means that we must re-use a server which we've already
2633 # used. If we don't remember the fact that we sent them one share
2634 # already, we'll mistakenly think we're experiencing an
2635 # UncoordinatedWriteError.
2637 # Break one server, then create the file: the initial publish should
2638 # complete with an alternate server. Breaking a second server should
2639 # not prevent an update from succeeding either.
2640 self.basedir = "mutable/Problems/test_bad_server_overlap"
2642 nm = self.g.clients[0].nodemaker
2643 sb = nm.storage_broker
2645 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2646 self.g.break_server(peerids[0])
2648 d = nm.create_mutable_file(MutableData("contents 1"))
2650 d = n.download_best_version()
2651 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2652 # now break one of the remaining servers
2653 def _break_second_server(res):
2654 self.g.break_server(peerids[1])
2655 d.addCallback(_break_second_server)
2656 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2657 # that ought to work too
2658 d.addCallback(lambda res: n.download_best_version())
2659 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2661 d.addCallback(_created)
2664 def test_publish_all_servers_bad(self):
2665 # Break all servers: the publish should fail
2666 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2668 nm = self.g.clients[0].nodemaker
2669 for s in nm.storage_broker.get_connected_servers():
2670 s.get_rref().broken = True
2672 d = self.shouldFail(NotEnoughServersError,
2673 "test_publish_all_servers_bad",
2674 "ran out of good servers",
2675 nm.create_mutable_file, MutableData("contents"))
2678 def test_publish_no_servers(self):
2679 # no servers at all: the publish should fail
2680 self.basedir = "mutable/Problems/test_publish_no_servers"
2681 self.set_up_grid(num_servers=0)
2682 nm = self.g.clients[0].nodemaker
2684 d = self.shouldFail(NotEnoughServersError,
2685 "test_publish_no_servers",
2686 "Ran out of non-bad servers",
2687 nm.create_mutable_file, MutableData("contents"))
2691 def test_privkey_query_error(self):
2692 # when a servermap is updated with MODE_WRITE, it tries to get the
2693 # privkey. Something might go wrong during this query attempt.
2694 # Exercise the code in _privkey_query_failed which tries to handle
2696 self.basedir = "mutable/Problems/test_privkey_query_error"
2697 self.set_up_grid(num_servers=20)
2698 nm = self.g.clients[0].nodemaker
2699 nm._node_cache = DevNullDictionary() # disable the nodecache
2701 # we need some contents that are large enough to push the privkey out
2702 # of the early part of the file
2703 LARGE = "These are Larger contents" * 2000 # about 50KB
2704 LARGE_uploadable = MutableData(LARGE)
2705 d = nm.create_mutable_file(LARGE_uploadable)
2707 self.uri = n.get_uri()
2708 self.n2 = nm.create_from_cap(self.uri)
2710 # When a mapupdate is performed on a node that doesn't yet know
2711 # the privkey, a short read is sent to a batch of servers, to get
2712 # the verinfo and (hopefully, if the file is short enough) the
2713 # encprivkey. Our file is too large to let this first read
2714 # contain the encprivkey. Each non-encprivkey-bearing response
2715 # that arrives (until the node gets the encprivkey) will trigger
2716 # a second read to specifically read the encprivkey.
2718 # So, to exercise this case:
2719 # 1. notice which server gets a read() call first
2720 # 2. tell that server to start throwing errors
2721 killer = FirstServerGetsKilled()
2722 for s in nm.storage_broker.get_connected_servers():
2723 s.get_rref().post_call_notifier = killer.notify
2724 d.addCallback(_created)
2726 # now we update a servermap from a new node (which doesn't have the
2727 # privkey yet, forcing it to use a separate privkey query). Note that
2728 # the map-update will succeed, since we'll just get a copy from one
2729 # of the other shares.
2730 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2734 def test_privkey_query_missing(self):
2735 # like test_privkey_query_error, but the shares are deleted by the
2736 # second query, instead of raising an exception.
2737 self.basedir = "mutable/Problems/test_privkey_query_missing"
2738 self.set_up_grid(num_servers=20)
2739 nm = self.g.clients[0].nodemaker
2740 LARGE = "These are Larger contents" * 2000 # about 50KiB
2741 LARGE_uploadable = MutableData(LARGE)
2742 nm._node_cache = DevNullDictionary() # disable the nodecache
2744 d = nm.create_mutable_file(LARGE_uploadable)
2746 self.uri = n.get_uri()
2747 self.n2 = nm.create_from_cap(self.uri)
2748 deleter = FirstServerGetsDeleted()
2749 for s in nm.storage_broker.get_connected_servers():
2750 s.get_rref().post_call_notifier = deleter.notify
2751 d.addCallback(_created)
2752 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2756 def test_block_and_hash_query_error(self):
2757 # This tests for what happens when a query to a remote server
2758 # fails in either the hash validation step or the block getting
2759 # step (because of batching, this is the same actual query).
2760 # We need to have the storage server persist up until the point
2761 # that its prefix is validated, then suddenly die. This
2762 # exercises some exception handling code in Retrieve.
2763 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2764 self.set_up_grid(num_servers=20)
2765 nm = self.g.clients[0].nodemaker
2766 CONTENTS = "contents" * 2000
2767 CONTENTS_uploadable = MutableData(CONTENTS)
2768 d = nm.create_mutable_file(CONTENTS_uploadable)
2771 d.addCallback(_created)
2772 d.addCallback(lambda ignored:
2773 self._node.get_servermap(MODE_READ))
2774 def _then(servermap):
2775 # we have our servermap. Now we set up the servers like the
2776 # tests above -- the first one that gets a read call should
2777 # start throwing errors, but only after returning its prefix
2778 # for validation. Since we'll download without fetching the
2779 # private key, the next query to the remote server will be
2780 # for either a block and salt or for hashes, either of which
2781 # will exercise the error handling code.
2782 killer = FirstServerGetsKilled()
2783 for s in nm.storage_broker.get_connected_servers():
2784 s.get_rref().post_call_notifier = killer.notify
2785 ver = servermap.best_recoverable_version()
2787 return self._node.download_version(servermap, ver)
2788 d.addCallback(_then)
2789 d.addCallback(lambda data:
2790 self.failUnlessEqual(data, CONTENTS))
2794 class FileHandle(unittest.TestCase):
2796 self.test_data = "Test Data" * 50000
2797 self.sio = StringIO(self.test_data)
2798 self.uploadable = MutableFileHandle(self.sio)
2801 def test_filehandle_read(self):
2802 self.basedir = "mutable/FileHandle/test_filehandle_read"
2804 for i in xrange(0, len(self.test_data), chunk_size):
2805 data = self.uploadable.read(chunk_size)
2806 data = "".join(data)
2808 end = i + chunk_size
2809 self.failUnlessEqual(data, self.test_data[start:end])
2812 def test_filehandle_get_size(self):
2813 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2814 actual_size = len(self.test_data)
2815 size = self.uploadable.get_size()
2816 self.failUnlessEqual(size, actual_size)
2819 def test_filehandle_get_size_out_of_order(self):
2820 # We should be able to call get_size whenever we want without
2821 # disturbing the location of the seek pointer.
2823 data = self.uploadable.read(chunk_size)
2824 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2827 size = self.uploadable.get_size()
2828 self.failUnlessEqual(size, len(self.test_data))
2830 # Now get more data. We should be right where we left off.
2831 more_data = self.uploadable.read(chunk_size)
2833 end = chunk_size * 2
2834 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2837 def test_filehandle_file(self):
2838 # Make sure that the MutableFileHandle works on a file as well
2839 # as a StringIO object, since in some cases it will be asked to
2841 self.basedir = self.mktemp()
2842 # necessary? What am I doing wrong here?
2843 os.mkdir(self.basedir)
2844 f_path = os.path.join(self.basedir, "test_file")
2845 f = open(f_path, "w")
2846 f.write(self.test_data)
2848 f = open(f_path, "r")
2850 uploadable = MutableFileHandle(f)
2852 data = uploadable.read(len(self.test_data))
2853 self.failUnlessEqual("".join(data), self.test_data)
2854 size = uploadable.get_size()
2855 self.failUnlessEqual(size, len(self.test_data))
2858 def test_close(self):
2859 # Make sure that the MutableFileHandle closes its handle when
2861 self.uploadable.close()
2862 self.failUnless(self.sio.closed)
2865 class DataHandle(unittest.TestCase):
2867 self.test_data = "Test Data" * 50000
2868 self.uploadable = MutableData(self.test_data)
2871 def test_datahandle_read(self):
2873 for i in xrange(0, len(self.test_data), chunk_size):
2874 data = self.uploadable.read(chunk_size)
2875 data = "".join(data)
2877 end = i + chunk_size
2878 self.failUnlessEqual(data, self.test_data[start:end])
2881 def test_datahandle_get_size(self):
2882 actual_size = len(self.test_data)
2883 size = self.uploadable.get_size()
2884 self.failUnlessEqual(size, actual_size)
2887 def test_datahandle_get_size_out_of_order(self):
2888 # We should be able to call get_size whenever we want without
2889 # disturbing the location of the seek pointer.
2891 data = self.uploadable.read(chunk_size)
2892 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2895 size = self.uploadable.get_size()
2896 self.failUnlessEqual(size, len(self.test_data))
2898 # Now get more data. We should be right where we left off.
2899 more_data = self.uploadable.read(chunk_size)
2901 end = chunk_size * 2
2902 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2905 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2908 GridTestMixin.setUp(self)
2909 self.basedir = self.mktemp()
2911 self.c = self.g.clients[0]
2912 self.nm = self.c.nodemaker
2913 self.data = "test data" * 100000 # about 900 KiB; MDMF
2914 self.small_data = "test data" * 10 # about 90 B; SDMF
2917 def do_upload_mdmf(self):
2918 d = self.nm.create_mutable_file(MutableData(self.data),
2919 version=MDMF_VERSION)
2921 assert isinstance(n, MutableFileNode)
2922 assert n._protocol_version == MDMF_VERSION
2925 d.addCallback(_then)
2928 def do_upload_sdmf(self):
2929 d = self.nm.create_mutable_file(MutableData(self.small_data))
2931 assert isinstance(n, MutableFileNode)
2932 assert n._protocol_version == SDMF_VERSION
2935 d.addCallback(_then)
2938 def do_upload_empty_sdmf(self):
2939 d = self.nm.create_mutable_file(MutableData(""))
2941 assert isinstance(n, MutableFileNode)
2942 self.sdmf_zero_length_node = n
2943 assert n._protocol_version == SDMF_VERSION
2945 d.addCallback(_then)
2948 def do_upload(self):
2949 d = self.do_upload_mdmf()
2950 d.addCallback(lambda ign: self.do_upload_sdmf())
2953 def test_debug(self):
2954 d = self.do_upload_mdmf()
2956 fso = debug.FindSharesOptions()
2957 storage_index = base32.b2a(n.get_storage_index())
2958 fso.si_s = storage_index
2959 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2961 in self.iterate_servers()]
2962 fso.stdout = StringIO()
2963 fso.stderr = StringIO()
2964 debug.find_shares(fso)
2965 sharefiles = fso.stdout.getvalue().splitlines()
2966 expected = self.nm.default_encoding_parameters["n"]
2967 self.failUnlessEqual(len(sharefiles), expected)
2969 do = debug.DumpOptions()
2970 do["filename"] = sharefiles[0]
2971 do.stdout = StringIO()
2972 debug.dump_share(do)
2973 output = do.stdout.getvalue()
2974 lines = set(output.splitlines())
2975 self.failUnless("Mutable slot found:" in lines, output)
2976 self.failUnless(" share_type: MDMF" in lines, output)
2977 self.failUnless(" num_extra_leases: 0" in lines, output)
2978 self.failUnless(" MDMF contents:" in lines, output)
2979 self.failUnless(" seqnum: 1" in lines, output)
2980 self.failUnless(" required_shares: 3" in lines, output)
2981 self.failUnless(" total_shares: 10" in lines, output)
2982 self.failUnless(" segsize: 131073" in lines, output)
2983 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
2984 vcap = n.get_verify_cap().to_string()
2985 self.failUnless(" verify-cap: %s" % vcap in lines, output)
2987 cso = debug.CatalogSharesOptions()
2988 cso.nodedirs = fso.nodedirs
2989 cso.stdout = StringIO()
2990 cso.stderr = StringIO()
2991 debug.catalog_shares(cso)
2992 shares = cso.stdout.getvalue().splitlines()
2993 oneshare = shares[0] # all shares should be MDMF
2994 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
2995 self.failUnless(oneshare.startswith("MDMF"), oneshare)
2996 fields = oneshare.split()
2997 self.failUnlessEqual(fields[0], "MDMF")
2998 self.failUnlessEqual(fields[1], storage_index)
2999 self.failUnlessEqual(fields[2], "3/10")
3000 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3001 self.failUnless(fields[4].startswith("#1:"), fields[3])
3002 # the rest of fields[4] is the roothash, which depends upon
3003 # encryption salts and is not constant. fields[5] is the
3004 # remaining time on the longest lease, which is timing dependent.
3005 # The rest of the line is the quoted pathname to the share.
3006 d.addCallback(_debug)
3009 def test_get_sequence_number(self):
3010 d = self.do_upload()
3011 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3012 d.addCallback(lambda bv:
3013 self.failUnlessEqual(bv.get_sequence_number(), 1))
3014 d.addCallback(lambda ignored:
3015 self.sdmf_node.get_best_readable_version())
3016 d.addCallback(lambda bv:
3017 self.failUnlessEqual(bv.get_sequence_number(), 1))
3018 # Now update. The sequence number in both cases should be 1 in
3020 def _do_update(ignored):
3021 new_data = MutableData("foo bar baz" * 100000)
3022 new_small_data = MutableData("foo bar baz" * 10)
3023 d1 = self.mdmf_node.overwrite(new_data)
3024 d2 = self.sdmf_node.overwrite(new_small_data)
3025 dl = gatherResults([d1, d2])
3027 d.addCallback(_do_update)
3028 d.addCallback(lambda ignored:
3029 self.mdmf_node.get_best_readable_version())
3030 d.addCallback(lambda bv:
3031 self.failUnlessEqual(bv.get_sequence_number(), 2))
3032 d.addCallback(lambda ignored:
3033 self.sdmf_node.get_best_readable_version())
3034 d.addCallback(lambda bv:
3035 self.failUnlessEqual(bv.get_sequence_number(), 2))
3039 def test_cap_after_upload(self):
3040 # If we create a new mutable file and upload things to it, and
3041 # it's an MDMF file, we should get an MDMF cap back from that
3042 # file and should be able to use that.
3043 # That's essentially what MDMF node is, so just check that.
3044 d = self.do_upload_mdmf()
3046 mdmf_uri = self.mdmf_node.get_uri()
3047 cap = uri.from_string(mdmf_uri)
3048 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3049 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3050 cap = uri.from_string(readonly_mdmf_uri)
3051 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3052 d.addCallback(_then)
3055 def test_mutable_version(self):
3056 # assert that getting parameters from the IMutableVersion object
3057 # gives us the same data as getting them from the filenode itself
3058 d = self.do_upload()
3059 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3060 def _check_mdmf(bv):
3062 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3063 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3064 self.failIf(bv.is_readonly())
3065 d.addCallback(_check_mdmf)
3066 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3067 def _check_sdmf(bv):
3069 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3070 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3071 self.failIf(bv.is_readonly())
3072 d.addCallback(_check_sdmf)
3076 def test_get_readonly_version(self):
3077 d = self.do_upload()
3078 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3079 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3081 # Attempting to get a mutable version of a mutable file from a
3082 # filenode initialized with a readcap should return a readonly
3083 # version of that same node.
3084 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3085 d.addCallback(lambda ro: ro.get_best_mutable_version())
3086 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3088 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3089 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3091 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3092 d.addCallback(lambda ro: ro.get_best_mutable_version())
3093 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3097 def test_toplevel_overwrite(self):
3098 new_data = MutableData("foo bar baz" * 100000)
3099 new_small_data = MutableData("foo bar baz" * 10)
3100 d = self.do_upload()
3101 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3102 d.addCallback(lambda ignored:
3103 self.mdmf_node.download_best_version())
3104 d.addCallback(lambda data:
3105 self.failUnlessEqual(data, "foo bar baz" * 100000))
3106 d.addCallback(lambda ignored:
3107 self.sdmf_node.overwrite(new_small_data))
3108 d.addCallback(lambda ignored:
3109 self.sdmf_node.download_best_version())
3110 d.addCallback(lambda data:
3111 self.failUnlessEqual(data, "foo bar baz" * 10))
3115 def test_toplevel_modify(self):
3116 d = self.do_upload()
3117 def modifier(old_contents, servermap, first_time):
3118 return old_contents + "modified"
3119 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3120 d.addCallback(lambda ignored:
3121 self.mdmf_node.download_best_version())
3122 d.addCallback(lambda data:
3123 self.failUnlessIn("modified", data))
3124 d.addCallback(lambda ignored:
3125 self.sdmf_node.modify(modifier))
3126 d.addCallback(lambda ignored:
3127 self.sdmf_node.download_best_version())
3128 d.addCallback(lambda data:
3129 self.failUnlessIn("modified", data))
3133 def test_version_modify(self):
3134 # TODO: When we can publish multiple versions, alter this test
3135 # to modify a version other than the best usable version, then
3136 # test to see that the best recoverable version is that.
3137 d = self.do_upload()
3138 def modifier(old_contents, servermap, first_time):
3139 return old_contents + "modified"
3140 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3141 d.addCallback(lambda ignored:
3142 self.mdmf_node.download_best_version())
3143 d.addCallback(lambda data:
3144 self.failUnlessIn("modified", data))
3145 d.addCallback(lambda ignored:
3146 self.sdmf_node.modify(modifier))
3147 d.addCallback(lambda ignored:
3148 self.sdmf_node.download_best_version())
3149 d.addCallback(lambda data:
3150 self.failUnlessIn("modified", data))
3154 def test_download_version(self):
3155 d = self.publish_multiple()
3156 # We want to have two recoverable versions on the grid.
3157 d.addCallback(lambda res:
3158 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3159 1:1,3:1,5:1,7:1,9:1}))
3160 # Now try to download each version. We should get the plaintext
3161 # associated with that version.
3162 d.addCallback(lambda ignored:
3163 self._fn.get_servermap(mode=MODE_READ))
3164 def _got_servermap(smap):
3165 versions = smap.recoverable_versions()
3166 assert len(versions) == 2
3168 self.servermap = smap
3169 self.version1, self.version2 = versions
3170 assert self.version1 != self.version2
3172 self.version1_seqnum = self.version1[0]
3173 self.version2_seqnum = self.version2[0]
3174 self.version1_index = self.version1_seqnum - 1
3175 self.version2_index = self.version2_seqnum - 1
3177 d.addCallback(_got_servermap)
3178 d.addCallback(lambda ignored:
3179 self._fn.download_version(self.servermap, self.version1))
3180 d.addCallback(lambda results:
3181 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3183 d.addCallback(lambda ignored:
3184 self._fn.download_version(self.servermap, self.version2))
3185 d.addCallback(lambda results:
3186 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3191 def test_download_nonexistent_version(self):
3192 d = self.do_upload_mdmf()
3193 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3194 def _set_servermap(servermap):
3195 self.servermap = servermap
3196 d.addCallback(_set_servermap)
3197 d.addCallback(lambda ignored:
3198 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3200 self.mdmf_node.download_version, self.servermap,
3205 def test_partial_read(self):
3206 d = self.do_upload_mdmf()
3207 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3208 modes = [("start_on_segment_boundary",
3209 mathutil.next_multiple(128 * 1024, 3), 50),
3210 ("ending_one_byte_after_segment_boundary",
3211 mathutil.next_multiple(128 * 1024, 3)-50, 51),
3212 ("zero_length_at_start", 0, 0),
3213 ("zero_length_in_middle", 50, 0),
3214 ("zero_length_at_segment_boundary",
3215 mathutil.next_multiple(128 * 1024, 3), 0),
3217 for (name, offset, length) in modes:
3218 d.addCallback(self._do_partial_read, name, offset, length)
3219 # then read only a few bytes at a time, and see that the results are
3221 def _read_data(version):
3222 c = consumer.MemoryConsumer()
3223 d2 = defer.succeed(None)
3224 for i in xrange(0, len(self.data), 10000):
3225 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3226 d2.addCallback(lambda ignored:
3227 self.failUnlessEqual(self.data, "".join(c.chunks)))
3229 d.addCallback(_read_data)
3231 def _do_partial_read(self, version, name, offset, length):
3232 c = consumer.MemoryConsumer()
3233 d = version.read(c, offset, length)
3234 expected = self.data[offset:offset+length]
3235 d.addCallback(lambda ignored: "".join(c.chunks))
3236 def _check(results):
3237 if results != expected:
3239 print "got: %s ... %s" % (results[:20], results[-20:])
3240 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3241 self.fail("results[%s] != expected" % name)
3242 return version # daisy-chained to next call
3243 d.addCallback(_check)
3247 def _test_read_and_download(self, node, expected):
3248 d = node.get_best_readable_version()
3249 def _read_data(version):
3250 c = consumer.MemoryConsumer()
3251 d2 = defer.succeed(None)
3252 d2.addCallback(lambda ignored: version.read(c))
3253 d2.addCallback(lambda ignored:
3254 self.failUnlessEqual(expected, "".join(c.chunks)))
3256 d.addCallback(_read_data)
3257 d.addCallback(lambda ignored: node.download_best_version())
3258 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3261 def test_read_and_download_mdmf(self):
3262 d = self.do_upload_mdmf()
3263 d.addCallback(self._test_read_and_download, self.data)
3266 def test_read_and_download_sdmf(self):
3267 d = self.do_upload_sdmf()
3268 d.addCallback(self._test_read_and_download, self.small_data)
3271 def test_read_and_download_sdmf_zero_length(self):
3272 d = self.do_upload_empty_sdmf()
3273 d.addCallback(self._test_read_and_download, "")
3277 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3278 timeout = 400 # these tests are too big, 120s is not enough on slow
3281 GridTestMixin.setUp(self)
3282 self.basedir = self.mktemp()
3284 self.c = self.g.clients[0]
3285 self.nm = self.c.nodemaker
3286 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3287 self.small_data = "test data" * 10 # about 90 B; SDMF
3290 def do_upload_sdmf(self):
3291 d = self.nm.create_mutable_file(MutableData(self.small_data))
3293 assert isinstance(n, MutableFileNode)
3295 # Make SDMF node that has 255 shares.
3296 self.nm.default_encoding_parameters['n'] = 255
3297 self.nm.default_encoding_parameters['k'] = 127
3298 return self.nm.create_mutable_file(MutableData(self.small_data))
3299 d.addCallback(_then)
3301 assert isinstance(n, MutableFileNode)
3302 self.sdmf_max_shares_node = n
3303 d.addCallback(_then2)
3306 def do_upload_mdmf(self):
3307 d = self.nm.create_mutable_file(MutableData(self.data),
3308 version=MDMF_VERSION)
3310 assert isinstance(n, MutableFileNode)
3312 # Make MDMF node that has 255 shares.
3313 self.nm.default_encoding_parameters['n'] = 255
3314 self.nm.default_encoding_parameters['k'] = 127
3315 return self.nm.create_mutable_file(MutableData(self.data),
3316 version=MDMF_VERSION)
3317 d.addCallback(_then)
3319 assert isinstance(n, MutableFileNode)
3320 self.mdmf_max_shares_node = n
3321 d.addCallback(_then2)
3324 def _test_replace(self, offset, new_data):
3325 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3326 d0 = self.do_upload_mdmf()
3328 d = defer.succeed(None)
3329 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3330 # close over 'node'.
3331 d.addCallback(lambda ign, node=node:
3332 node.get_best_mutable_version())
3333 d.addCallback(lambda mv:
3334 mv.update(MutableData(new_data), offset))
3335 d.addCallback(lambda ign, node=node:
3336 node.download_best_version())
3337 def _check(results):
3338 if results != expected:
3340 print "got: %s ... %s" % (results[:20], results[-20:])
3341 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3342 self.fail("results != expected")
3343 d.addCallback(_check)
3345 d0.addCallback(_run)
3348 def test_append(self):
3349 # We should be able to append data to a mutable file and get
3351 return self._test_replace(len(self.data), "appended")
3353 def test_replace_middle(self):
3354 # We should be able to replace data in the middle of a mutable
3355 # file and get what we expect back.
3356 return self._test_replace(100, "replaced")
3358 def test_replace_beginning(self):
3359 # We should be able to replace data at the beginning of the file
3360 # without truncating the file
3361 return self._test_replace(0, "beginning")
3363 def test_replace_segstart1(self):
3364 return self._test_replace(128*1024+1, "NNNN")
3366 def test_replace_zero_length_beginning(self):
3367 return self._test_replace(0, "")
3369 def test_replace_zero_length_middle(self):
3370 return self._test_replace(50, "")
3372 def test_replace_zero_length_segstart1(self):
3373 return self._test_replace(128*1024+1, "")
3375 def test_replace_and_extend(self):
3376 # We should be able to replace data in the middle of a mutable
3377 # file and extend that mutable file and get what we expect.
3378 return self._test_replace(100, "modified " * 100000)
3381 def _check_differences(self, got, expected):
3382 # displaying arbitrary file corruption is tricky for a
3383 # 1MB file of repeating data,, so look for likely places
3384 # with problems and display them separately
3385 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3386 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3387 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3388 for (start,end) in gotmods]
3389 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3390 for (start,end) in expmods]
3391 #print "expecting: %s" % expspans
3395 print "differences:"
3396 for segnum in range(len(expected)//SEGSIZE):
3397 start = segnum * SEGSIZE
3398 end = (segnum+1) * SEGSIZE
3399 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3400 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3401 if got_ends != exp_ends:
3402 print "expected[%d]: %s" % (start, exp_ends)
3403 print "got [%d]: %s" % (start, got_ends)
3404 if expspans != gotspans:
3405 print "expected: %s" % expspans
3406 print "got : %s" % gotspans
3407 open("EXPECTED","wb").write(expected)
3408 open("GOT","wb").write(got)
3409 print "wrote data to EXPECTED and GOT"
3410 self.fail("didn't get expected data")
3413 def test_replace_locations(self):
3414 # exercise fencepost conditions
3416 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3417 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3418 d0 = self.do_upload_mdmf()
3420 expected = self.data
3421 d = defer.succeed(None)
3422 for offset in suspects:
3423 new_data = letters.next()*2 # "AA", then "BB", etc
3424 expected = expected[:offset]+new_data+expected[offset+2:]
3425 d.addCallback(lambda ign:
3426 self.mdmf_node.get_best_mutable_version())
3427 def _modify(mv, offset=offset, new_data=new_data):
3428 # close over 'offset','new_data'
3429 md = MutableData(new_data)
3430 return mv.update(md, offset)
3431 d.addCallback(_modify)
3432 d.addCallback(lambda ignored:
3433 self.mdmf_node.download_best_version())
3434 d.addCallback(self._check_differences, expected)
3436 d0.addCallback(_run)
3439 def test_replace_locations_max_shares(self):
3440 # exercise fencepost conditions
3442 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3443 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3444 d0 = self.do_upload_mdmf()
3446 expected = self.data
3447 d = defer.succeed(None)
3448 for offset in suspects:
3449 new_data = letters.next()*2 # "AA", then "BB", etc
3450 expected = expected[:offset]+new_data+expected[offset+2:]
3451 d.addCallback(lambda ign:
3452 self.mdmf_max_shares_node.get_best_mutable_version())
3453 def _modify(mv, offset=offset, new_data=new_data):
3454 # close over 'offset','new_data'
3455 md = MutableData(new_data)
3456 return mv.update(md, offset)
3457 d.addCallback(_modify)
3458 d.addCallback(lambda ignored:
3459 self.mdmf_max_shares_node.download_best_version())
3460 d.addCallback(self._check_differences, expected)
3462 d0.addCallback(_run)
3466 def test_append_power_of_two(self):
3467 # If we attempt to extend a mutable file so that its segment
3468 # count crosses a power-of-two boundary, the update operation
3469 # should know how to reencode the file.
3471 # Note that the data populating self.mdmf_node is about 900 KiB
3472 # long -- this is 7 segments in the default segment size. So we
3473 # need to add 2 segments worth of data to push it over a
3474 # power-of-two boundary.
3475 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3476 new_data = self.data + (segment * 2)
3477 d0 = self.do_upload_mdmf()
3479 d = defer.succeed(None)
3480 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3481 # close over 'node'.
3482 d.addCallback(lambda ign, node=node:
3483 node.get_best_mutable_version())
3484 d.addCallback(lambda mv:
3485 mv.update(MutableData(segment * 2), len(self.data)))
3486 d.addCallback(lambda ign, node=node:
3487 node.download_best_version())
3488 d.addCallback(lambda results:
3489 self.failUnlessEqual(results, new_data))
3491 d0.addCallback(_run)
3494 def test_update_sdmf(self):
3495 # Running update on a single-segment file should still work.
3496 new_data = self.small_data + "appended"
3497 d0 = self.do_upload_sdmf()
3499 d = defer.succeed(None)
3500 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3501 # close over 'node'.
3502 d.addCallback(lambda ign, node=node:
3503 node.get_best_mutable_version())
3504 d.addCallback(lambda mv:
3505 mv.update(MutableData("appended"), len(self.small_data)))
3506 d.addCallback(lambda ign, node=node:
3507 node.download_best_version())
3508 d.addCallback(lambda results:
3509 self.failUnlessEqual(results, new_data))
3511 d0.addCallback(_run)
3514 def test_replace_in_last_segment(self):
3515 # The wrapper should know how to handle the tail segment
3517 replace_offset = len(self.data) - 100
3518 new_data = self.data[:replace_offset] + "replaced"
3519 rest_offset = replace_offset + len("replaced")
3520 new_data += self.data[rest_offset:]
3521 d0 = self.do_upload_mdmf()
3523 d = defer.succeed(None)
3524 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3525 # close over 'node'.
3526 d.addCallback(lambda ign, node=node:
3527 node.get_best_mutable_version())
3528 d.addCallback(lambda mv:
3529 mv.update(MutableData("replaced"), replace_offset))
3530 d.addCallback(lambda ign, node=node:
3531 node.download_best_version())
3532 d.addCallback(lambda results:
3533 self.failUnlessEqual(results, new_data))
3535 d0.addCallback(_run)
3538 def test_multiple_segment_replace(self):
3539 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3540 new_data = self.data[:replace_offset]
3541 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3542 new_data += 2 * new_segment
3543 new_data += "replaced"
3544 rest_offset = len(new_data)
3545 new_data += self.data[rest_offset:]
3546 d0 = self.do_upload_mdmf()
3548 d = defer.succeed(None)
3549 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3550 # close over 'node'.
3551 d.addCallback(lambda ign, node=node:
3552 node.get_best_mutable_version())
3553 d.addCallback(lambda mv:
3554 mv.update(MutableData((2 * new_segment) + "replaced"),
3556 d.addCallback(lambda ignored, node=node:
3557 node.download_best_version())
3558 d.addCallback(lambda results:
3559 self.failUnlessEqual(results, new_data))
3561 d0.addCallback(_run)
3564 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3565 sdmf_old_shares = {}
3566 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3567 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3568 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3569 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3570 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3571 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3572 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3573 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3574 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3575 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3576 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3577 sdmf_old_contents = "This is a test file.\n"
3578 def copy_sdmf_shares(self):
3579 # We'll basically be short-circuiting the upload process.
3580 servernums = self.g.servers_by_number.keys()
3581 assert len(servernums) == 10
3583 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3584 # Get the storage index.
3585 cap = uri.from_string(self.sdmf_old_cap)
3586 si = cap.get_storage_index()
3588 # Now execute each assignment by writing the storage.
3589 for (share, servernum) in assignments:
3590 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3591 storedir = self.get_serverdir(servernum)
3592 storage_path = os.path.join(storedir, "shares",
3593 storage_index_to_dir(si))
3594 fileutil.make_dirs(storage_path)
3595 fileutil.write(os.path.join(storage_path, "%d" % share),
3597 # ...and verify that the shares are there.
3598 shares = self.find_uri_shares(self.sdmf_old_cap)
3599 assert len(shares) == 10
3601 def test_new_downloader_can_read_old_shares(self):
3602 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3604 self.copy_sdmf_shares()
3605 nm = self.g.clients[0].nodemaker
3606 n = nm.create_from_cap(self.sdmf_old_cap)
3607 d = n.download_best_version()
3608 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3611 class DifferentEncoding(unittest.TestCase):
3613 self._storage = s = FakeStorage()
3614 self.nodemaker = make_nodemaker(s)
3616 def test_filenode(self):
3617 # create a file with 3-of-20, then modify it with a client configured
3618 # to do 3-of-10. #1510 tracks a failure here
3619 self.nodemaker.default_encoding_parameters["n"] = 20
3620 d = self.nodemaker.create_mutable_file("old contents")
3622 filecap = n.get_cap().to_string()
3623 del n # we want a new object, not the cached one
3624 self.nodemaker.default_encoding_parameters["n"] = 10
3625 n2 = self.nodemaker.create_from_cap(filecap)
3627 d.addCallback(_created)
3628 def modifier(old_contents, servermap, first_time):
3629 return "new contents"
3630 d.addCallback(lambda n: n.modify(modifier))