2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9 ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import ResponseCache, \
25 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27 NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31 DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39 PausingAndStoppingConsumer, StoppingConsumer, \
40 ImmediatelyStoppingConsumer
42 def eventuaaaaaly(res=None):
43 d = fireEventually(res)
44 d.addCallback(fireEventually)
45 d.addCallback(fireEventually)
49 # this "FakeStorage" exists to put the share data in RAM and avoid using real
50 # network connections, both to speed up the tests and to reduce the amount of
51 # non-mutable.py code being exercised.
54 # this class replaces the collection of storage servers, allowing the
55 # tests to examine and manipulate the published shares. It also lets us
56 # control the order in which read queries are answered, to exercise more
57 # of the error-handling code in Retrieve .
59 # Note that we ignore the storage index: this FakeStorage instance can
60 # only be used for a single storage index.
65 # _sequence is used to cause the responses to occur in a specific
66 # order. If it is in use, then we will defer queries instead of
67 # answering them right away, accumulating the Deferreds in a dict. We
68 # don't know exactly how many queries we'll get, so exactly one
69 # second after the first query arrives, we will release them all (in
73 self._pending_timer = None
75 def read(self, peerid, storage_index):
76 shares = self._peers.get(peerid, {})
77 if self._sequence is None:
78 return eventuaaaaaly(shares)
81 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
82 if peerid not in self._pending:
83 self._pending[peerid] = []
84 self._pending[peerid].append( (d, shares) )
87 def _fire_readers(self):
88 self._pending_timer = None
89 pending = self._pending
91 for peerid in self._sequence:
93 for (d, shares) in pending.pop(peerid):
94 eventually(d.callback, shares)
95 for peerid in pending:
96 for (d, shares) in pending[peerid]:
97 eventually(d.callback, shares)
99 def write(self, peerid, storage_index, shnum, offset, data):
100 if peerid not in self._peers:
101 self._peers[peerid] = {}
102 shares = self._peers[peerid]
104 f.write(shares.get(shnum, ""))
107 shares[shnum] = f.getvalue()
110 class FakeStorageServer:
111 def __init__(self, peerid, storage):
113 self.storage = storage
115 def callRemote(self, methname, *args, **kwargs):
118 meth = getattr(self, methname)
119 return meth(*args, **kwargs)
121 d.addCallback(lambda res: _call())
124 def callRemoteOnly(self, methname, *args, **kwargs):
126 d = self.callRemote(methname, *args, **kwargs)
127 d.addBoth(lambda ignore: None)
130 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
133 def slot_readv(self, storage_index, shnums, readv):
134 d = self.storage.read(self.peerid, storage_index)
138 if shnums and shnum not in shnums:
140 vector = response[shnum] = []
141 for (offset, length) in readv:
142 assert isinstance(offset, (int, long)), offset
143 assert isinstance(length, (int, long)), length
144 vector.append(shares[shnum][offset:offset+length])
149 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
150 tw_vectors, read_vector):
151 # always-pass: parrot the test vectors back to them.
153 for shnum, (testv, writev, new_length) in tw_vectors.items():
154 for (offset, length, op, specimen) in testv:
155 assert op in ("le", "eq", "ge")
156 # TODO: this isn't right, the read is controlled by read_vector,
158 readv[shnum] = [ specimen
159 for (offset, length, op, specimen)
161 for (offset, data) in writev:
162 self.storage.write(self.peerid, storage_index, shnum,
164 answer = (True, readv)
165 return fireEventually(answer)
168 def flip_bit(original, byte_offset):
169 return (original[:byte_offset] +
170 chr(ord(original[byte_offset]) ^ 0x01) +
171 original[byte_offset+1:])
173 def add_two(original, byte_offset):
174 # It isn't enough to simply flip the bit for the version number,
175 # because 1 is a valid version number. So we add two instead.
176 return (original[:byte_offset] +
177 chr(ord(original[byte_offset]) ^ 0x02) +
178 original[byte_offset+1:])
180 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
181 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
182 # list of shnums to corrupt.
184 for peerid in s._peers:
185 shares = s._peers[peerid]
187 if (shnums_to_corrupt is not None
188 and shnum not in shnums_to_corrupt):
191 # We're feeding the reader all of the share data, so it
192 # won't need to use the rref that we didn't provide, nor the
193 # storage index that we didn't provide. We do this because
194 # the reader will work for both MDMF and SDMF.
195 reader = MDMFSlotReadProxy(None, None, shnum, data)
196 # We need to get the offsets for the next part.
197 d = reader.get_verinfo()
198 def _do_corruption(verinfo, data, shnum, shares):
204 k, n, prefix, o) = verinfo
205 if isinstance(offset, tuple):
206 offset1, offset2 = offset
210 if offset1 == "pubkey" and IV:
213 real_offset = o[offset1]
215 real_offset = offset1
216 real_offset = int(real_offset) + offset2 + offset_offset
217 assert isinstance(real_offset, int), offset
218 if offset1 == 0: # verbyte
222 shares[shnum] = f(data, real_offset)
223 d.addCallback(_do_corruption, data, shnum, shares)
225 dl = defer.DeferredList(ds)
226 dl.addCallback(lambda ignored: res)
229 def make_storagebroker(s=None, num_peers=10):
232 peerids = [tagged_hash("peerid", "%d" % i)[:20]
233 for i in range(num_peers)]
234 storage_broker = StorageFarmBroker(None, True)
235 for peerid in peerids:
236 fss = FakeStorageServer(peerid, s)
237 ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
238 "permutation-seed-base32": base32.b2a(peerid) }
239 storage_broker.test_add_rref(peerid, fss, ann)
240 return storage_broker
242 def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
243 storage_broker = make_storagebroker(s, num_peers)
244 sh = client.SecretHolder("lease secret", "convergence secret")
245 keygen = client.KeyGenerator()
247 keygen.set_default_keysize(keysize)
248 nodemaker = NodeMaker(storage_broker, sh, None,
250 {"k": 3, "n": 10}, SDMF_VERSION, keygen)
253 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
254 # this used to be in Publish, but we removed the limit. Some of
255 # these tests test whether the new code correctly allows files
256 # larger than the limit.
257 OLD_MAX_SEGMENT_SIZE = 3500000
259 self._storage = s = FakeStorage()
260 self.nodemaker = make_nodemaker(s)
262 def test_create(self):
263 d = self.nodemaker.create_mutable_file()
265 self.failUnless(isinstance(n, MutableFileNode))
266 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267 sb = self.nodemaker.storage_broker
268 peer0 = sorted(sb.get_all_serverids())[0]
269 shnums = self._storage._peers[peer0].keys()
270 self.failUnlessEqual(len(shnums), 1)
271 d.addCallback(_created)
275 def test_create_mdmf(self):
276 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
278 self.failUnless(isinstance(n, MutableFileNode))
279 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
280 sb = self.nodemaker.storage_broker
281 peer0 = sorted(sb.get_all_serverids())[0]
282 shnums = self._storage._peers[peer0].keys()
283 self.failUnlessEqual(len(shnums), 1)
284 d.addCallback(_created)
287 def test_single_share(self):
288 # Make sure that we tolerate publishing a single share.
289 self.nodemaker.default_encoding_parameters['k'] = 1
290 self.nodemaker.default_encoding_parameters['happy'] = 1
291 self.nodemaker.default_encoding_parameters['n'] = 1
292 d = defer.succeed(None)
293 for v in (SDMF_VERSION, MDMF_VERSION):
294 d.addCallback(lambda ignored, v=v:
295 self.nodemaker.create_mutable_file(version=v))
297 self.failUnless(isinstance(n, MutableFileNode))
300 d.addCallback(_created)
301 d.addCallback(lambda n:
302 n.overwrite(MutableData("Contents" * 50000)))
303 d.addCallback(lambda ignored:
304 self._node.download_best_version())
305 d.addCallback(lambda contents:
306 self.failUnlessEqual(contents, "Contents" * 50000))
309 def test_max_shares(self):
310 self.nodemaker.default_encoding_parameters['n'] = 255
311 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
313 self.failUnless(isinstance(n, MutableFileNode))
314 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
315 sb = self.nodemaker.storage_broker
316 num_shares = sum([len(self._storage._peers[x].keys()) for x \
317 in sb.get_all_serverids()])
318 self.failUnlessEqual(num_shares, 255)
321 d.addCallback(_created)
322 # Now we upload some contents
323 d.addCallback(lambda n:
324 n.overwrite(MutableData("contents" * 50000)))
325 # ...then download contents
326 d.addCallback(lambda ignored:
327 self._node.download_best_version())
328 # ...and check to make sure everything went okay.
329 d.addCallback(lambda contents:
330 self.failUnlessEqual("contents" * 50000, contents))
333 def test_max_shares_mdmf(self):
334 # Test how files behave when there are 255 shares.
335 self.nodemaker.default_encoding_parameters['n'] = 255
336 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
338 self.failUnless(isinstance(n, MutableFileNode))
339 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
340 sb = self.nodemaker.storage_broker
341 num_shares = sum([len(self._storage._peers[x].keys()) for x \
342 in sb.get_all_serverids()])
343 self.failUnlessEqual(num_shares, 255)
346 d.addCallback(_created)
347 d.addCallback(lambda n:
348 n.overwrite(MutableData("contents" * 50000)))
349 d.addCallback(lambda ignored:
350 self._node.download_best_version())
351 d.addCallback(lambda contents:
352 self.failUnlessEqual(contents, "contents" * 50000))
355 def test_mdmf_filenode_cap(self):
356 # Test that an MDMF filenode, once created, returns an MDMF URI.
357 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
359 self.failUnless(isinstance(n, MutableFileNode))
361 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
362 rcap = n.get_readcap()
363 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
364 vcap = n.get_verify_cap()
365 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
366 d.addCallback(_created)
370 def test_create_from_mdmf_writecap(self):
371 # Test that the nodemaker is capable of creating an MDMF
372 # filenode given an MDMF cap.
373 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
375 self.failUnless(isinstance(n, MutableFileNode))
377 self.failUnless(s.startswith("URI:MDMF"))
378 n2 = self.nodemaker.create_from_cap(s)
379 self.failUnless(isinstance(n2, MutableFileNode))
380 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
381 self.failUnlessEqual(n.get_uri(), n2.get_uri())
382 d.addCallback(_created)
386 def test_create_from_mdmf_readcap(self):
387 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
389 self.failUnless(isinstance(n, MutableFileNode))
390 s = n.get_readonly_uri()
391 n2 = self.nodemaker.create_from_cap(s)
392 self.failUnless(isinstance(n2, MutableFileNode))
394 # Check that it's a readonly node
395 self.failUnless(n2.is_readonly())
396 d.addCallback(_created)
400 def test_internal_version_from_cap(self):
401 # MutableFileNodes and MutableFileVersions have an internal
402 # switch that tells them whether they're dealing with an SDMF or
403 # MDMF mutable file when they start doing stuff. We want to make
404 # sure that this is set appropriately given an MDMF cap.
405 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
407 self.uri = n.get_uri()
408 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
410 n2 = self.nodemaker.create_from_cap(self.uri)
411 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
412 d.addCallback(_created)
416 def test_serialize(self):
417 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
419 def _callback(*args, **kwargs):
420 self.failUnlessEqual(args, (4,) )
421 self.failUnlessEqual(kwargs, {"foo": 5})
424 d = n._do_serialized(_callback, 4, foo=5)
425 def _check_callback(res):
426 self.failUnlessEqual(res, 6)
427 self.failUnlessEqual(calls, [1])
428 d.addCallback(_check_callback)
431 raise ValueError("heya")
432 d.addCallback(lambda res:
433 self.shouldFail(ValueError, "_check_errback", "heya",
434 n._do_serialized, _errback))
437 def test_upload_and_download(self):
438 d = self.nodemaker.create_mutable_file()
440 d = defer.succeed(None)
441 d.addCallback(lambda res: n.get_servermap(MODE_READ))
442 d.addCallback(lambda smap: smap.dump(StringIO()))
443 d.addCallback(lambda sio:
444 self.failUnless("3-of-10" in sio.getvalue()))
445 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
446 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
447 d.addCallback(lambda res: n.download_best_version())
448 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
449 d.addCallback(lambda res: n.get_size_of_best_version())
450 d.addCallback(lambda size:
451 self.failUnlessEqual(size, len("contents 1")))
452 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
453 d.addCallback(lambda res: n.download_best_version())
454 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
455 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
456 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
457 d.addCallback(lambda res: n.download_best_version())
458 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
459 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
460 d.addCallback(lambda smap:
461 n.download_version(smap,
462 smap.best_recoverable_version()))
463 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
464 # test a file that is large enough to overcome the
465 # mapupdate-to-retrieve data caching (i.e. make the shares larger
466 # than the default readsize, which is 2000 bytes). A 15kB file
467 # will have 5kB shares.
468 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
469 d.addCallback(lambda res: n.download_best_version())
470 d.addCallback(lambda res:
471 self.failUnlessEqual(res, "large size file" * 1000))
473 d.addCallback(_created)
477 def test_upload_and_download_mdmf(self):
478 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
480 d = defer.succeed(None)
481 d.addCallback(lambda ignored:
482 n.get_servermap(MODE_READ))
483 def _then(servermap):
484 dumped = servermap.dump(StringIO())
485 self.failUnlessIn("3-of-10", dumped.getvalue())
487 # Now overwrite the contents with some new contents. We want
488 # to make them big enough to force the file to be uploaded
489 # in more than one segment.
490 big_contents = "contents1" * 100000 # about 900 KiB
491 big_contents_uploadable = MutableData(big_contents)
492 d.addCallback(lambda ignored:
493 n.overwrite(big_contents_uploadable))
494 d.addCallback(lambda ignored:
495 n.download_best_version())
496 d.addCallback(lambda data:
497 self.failUnlessEqual(data, big_contents))
498 # Overwrite the contents again with some new contents. As
499 # before, they need to be big enough to force multiple
500 # segments, so that we make the downloader deal with
502 bigger_contents = "contents2" * 1000000 # about 9MiB
503 bigger_contents_uploadable = MutableData(bigger_contents)
504 d.addCallback(lambda ignored:
505 n.overwrite(bigger_contents_uploadable))
506 d.addCallback(lambda ignored:
507 n.download_best_version())
508 d.addCallback(lambda data:
509 self.failUnlessEqual(data, bigger_contents))
511 d.addCallback(_created)
515 def test_retrieve_producer_mdmf(self):
516 # We should make sure that the retriever is able to pause and stop
518 data = "contents1" * 100000
519 d = self.nodemaker.create_mutable_file(MutableData(data),
520 version=MDMF_VERSION)
521 d.addCallback(lambda node: node.get_best_mutable_version())
522 d.addCallback(self._test_retrieve_producer, "MDMF", data)
525 # note: SDMF has only one big segment, so we can't use the usual
526 # after-the-first-write() trick to pause or stop the download.
527 # Disabled until we find a better approach.
528 def OFF_test_retrieve_producer_sdmf(self):
529 data = "contents1" * 100000
530 d = self.nodemaker.create_mutable_file(MutableData(data),
531 version=SDMF_VERSION)
532 d.addCallback(lambda node: node.get_best_mutable_version())
533 d.addCallback(self._test_retrieve_producer, "SDMF", data)
536 def _test_retrieve_producer(self, version, kind, data):
537 # Now we'll retrieve it into a pausing consumer.
538 c = PausingConsumer()
540 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
542 c2 = PausingAndStoppingConsumer()
543 d.addCallback(lambda ign:
544 self.shouldFail(DownloadStopped, kind+"_pause_stop",
545 "our Consumer called stopProducing()",
548 c3 = StoppingConsumer()
549 d.addCallback(lambda ign:
550 self.shouldFail(DownloadStopped, kind+"_stop",
551 "our Consumer called stopProducing()",
554 c4 = ImmediatelyStoppingConsumer()
555 d.addCallback(lambda ign:
556 self.shouldFail(DownloadStopped, kind+"_stop_imm",
557 "our Consumer called stopProducing()",
561 c5 = MemoryConsumer()
562 d1 = version.read(c5)
563 c5.producer.stopProducing()
564 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
565 "our Consumer called stopProducing()",
570 def test_download_from_mdmf_cap(self):
571 # We should be able to download an MDMF file given its cap
572 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
574 self.uri = node.get_uri()
575 # also confirm that the cap has no extension fields
576 pieces = self.uri.split(":")
577 self.failUnlessEqual(len(pieces), 4)
579 return node.overwrite(MutableData("contents1" * 100000))
581 node = self.nodemaker.create_from_cap(self.uri)
582 return node.download_best_version()
583 def _downloaded(data):
584 self.failUnlessEqual(data, "contents1" * 100000)
585 d.addCallback(_created)
587 d.addCallback(_downloaded)
591 def test_mdmf_write_count(self):
592 # Publishing an MDMF file should only cause one write for each
593 # share that is to be published. Otherwise, we introduce
594 # undesirable semantics that are a regression from SDMF
595 upload = MutableData("MDMF" * 100000) # about 400 KiB
596 d = self.nodemaker.create_mutable_file(upload,
597 version=MDMF_VERSION)
598 def _check_server_write_counts(ignored):
599 sb = self.nodemaker.storage_broker
600 for server in sb.servers.itervalues():
601 self.failUnlessEqual(server.get_rref().queries, 1)
602 d.addCallback(_check_server_write_counts)
606 def test_create_with_initial_contents(self):
607 upload1 = MutableData("contents 1")
608 d = self.nodemaker.create_mutable_file(upload1)
610 d = n.download_best_version()
611 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
612 upload2 = MutableData("contents 2")
613 d.addCallback(lambda res: n.overwrite(upload2))
614 d.addCallback(lambda res: n.download_best_version())
615 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
617 d.addCallback(_created)
621 def test_create_mdmf_with_initial_contents(self):
622 initial_contents = "foobarbaz" * 131072 # 900KiB
623 initial_contents_uploadable = MutableData(initial_contents)
624 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
625 version=MDMF_VERSION)
627 d = n.download_best_version()
628 d.addCallback(lambda data:
629 self.failUnlessEqual(data, initial_contents))
630 uploadable2 = MutableData(initial_contents + "foobarbaz")
631 d.addCallback(lambda ignored:
632 n.overwrite(uploadable2))
633 d.addCallback(lambda ignored:
634 n.download_best_version())
635 d.addCallback(lambda data:
636 self.failUnlessEqual(data, initial_contents +
639 d.addCallback(_created)
643 def test_response_cache_memory_leak(self):
644 d = self.nodemaker.create_mutable_file("contents")
646 d = n.download_best_version()
647 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
648 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
650 def _check_cache(expected):
651 # The total size of cache entries should not increase on the second download;
652 # in fact the cache contents should be identical.
653 d2 = n.download_best_version()
654 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
656 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
658 d.addCallback(_created)
661 def test_create_with_initial_contents_function(self):
662 data = "initial contents"
663 def _make_contents(n):
664 self.failUnless(isinstance(n, MutableFileNode))
665 key = n.get_writekey()
666 self.failUnless(isinstance(key, str), key)
667 self.failUnlessEqual(len(key), 16) # AES key size
668 return MutableData(data)
669 d = self.nodemaker.create_mutable_file(_make_contents)
671 return n.download_best_version()
672 d.addCallback(_created)
673 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
677 def test_create_mdmf_with_initial_contents_function(self):
678 data = "initial contents" * 100000
679 def _make_contents(n):
680 self.failUnless(isinstance(n, MutableFileNode))
681 key = n.get_writekey()
682 self.failUnless(isinstance(key, str), key)
683 self.failUnlessEqual(len(key), 16)
684 return MutableData(data)
685 d = self.nodemaker.create_mutable_file(_make_contents,
686 version=MDMF_VERSION)
687 d.addCallback(lambda n:
688 n.download_best_version())
689 d.addCallback(lambda data2:
690 self.failUnlessEqual(data2, data))
694 def test_create_with_too_large_contents(self):
695 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
696 BIG_uploadable = MutableData(BIG)
697 d = self.nodemaker.create_mutable_file(BIG_uploadable)
699 other_BIG_uploadable = MutableData(BIG)
700 d = n.overwrite(other_BIG_uploadable)
702 d.addCallback(_created)
705 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
706 d = n.get_servermap(MODE_READ)
707 d.addCallback(lambda servermap: servermap.best_recoverable_version())
708 d.addCallback(lambda verinfo:
709 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
712 def test_modify(self):
713 def _modifier(old_contents, servermap, first_time):
714 new_contents = old_contents + "line2"
716 def _non_modifier(old_contents, servermap, first_time):
718 def _none_modifier(old_contents, servermap, first_time):
720 def _error_modifier(old_contents, servermap, first_time):
721 raise ValueError("oops")
722 def _toobig_modifier(old_contents, servermap, first_time):
723 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
726 def _ucw_error_modifier(old_contents, servermap, first_time):
727 # simulate an UncoordinatedWriteError once
730 raise UncoordinatedWriteError("simulated")
731 new_contents = old_contents + "line3"
733 def _ucw_error_non_modifier(old_contents, servermap, first_time):
734 # simulate an UncoordinatedWriteError once, and don't actually
735 # modify the contents on subsequent invocations
738 raise UncoordinatedWriteError("simulated")
741 initial_contents = "line1"
742 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
744 d = n.modify(_modifier)
745 d.addCallback(lambda res: n.download_best_version())
746 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
747 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
749 d.addCallback(lambda res: n.modify(_non_modifier))
750 d.addCallback(lambda res: n.download_best_version())
751 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
752 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
754 d.addCallback(lambda res: n.modify(_none_modifier))
755 d.addCallback(lambda res: n.download_best_version())
756 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
757 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
759 d.addCallback(lambda res:
760 self.shouldFail(ValueError, "error_modifier", None,
761 n.modify, _error_modifier))
762 d.addCallback(lambda res: n.download_best_version())
763 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
764 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
767 d.addCallback(lambda res: n.download_best_version())
768 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
769 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
771 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
772 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
773 d.addCallback(lambda res: n.download_best_version())
774 d.addCallback(lambda res: self.failUnlessEqual(res,
776 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
778 def _reset_ucw_error_modifier(res):
781 d.addCallback(_reset_ucw_error_modifier)
783 # in practice, this n.modify call should publish twice: the first
784 # one gets a UCWE, the second does not. But our test jig (in
785 # which the modifier raises the UCWE) skips over the first one,
786 # so in this test there will be only one publish, and the seqnum
787 # will only be one larger than the previous test, not two (i.e. 4
789 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
790 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
791 d.addCallback(lambda res: n.download_best_version())
792 d.addCallback(lambda res: self.failUnlessEqual(res,
794 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
795 d.addCallback(lambda res: n.modify(_toobig_modifier))
797 d.addCallback(_created)
801 def test_modify_backoffer(self):
802 def _modifier(old_contents, servermap, first_time):
803 return old_contents + "line2"
805 def _ucw_error_modifier(old_contents, servermap, first_time):
806 # simulate an UncoordinatedWriteError once
809 raise UncoordinatedWriteError("simulated")
810 return old_contents + "line3"
811 def _always_ucw_error_modifier(old_contents, servermap, first_time):
812 raise UncoordinatedWriteError("simulated")
813 def _backoff_stopper(node, f):
815 def _backoff_pauser(node, f):
817 reactor.callLater(0.5, d.callback, None)
820 # the give-up-er will hit its maximum retry count quickly
821 giveuper = BackoffAgent()
822 giveuper._delay = 0.1
825 d = self.nodemaker.create_mutable_file(MutableData("line1"))
827 d = n.modify(_modifier)
828 d.addCallback(lambda res: n.download_best_version())
829 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
830 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
832 d.addCallback(lambda res:
833 self.shouldFail(UncoordinatedWriteError,
834 "_backoff_stopper", None,
835 n.modify, _ucw_error_modifier,
837 d.addCallback(lambda res: n.download_best_version())
838 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
839 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
841 def _reset_ucw_error_modifier(res):
844 d.addCallback(_reset_ucw_error_modifier)
845 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
847 d.addCallback(lambda res: n.download_best_version())
848 d.addCallback(lambda res: self.failUnlessEqual(res,
850 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
852 d.addCallback(lambda res:
853 self.shouldFail(UncoordinatedWriteError,
855 n.modify, _always_ucw_error_modifier,
857 d.addCallback(lambda res: n.download_best_version())
858 d.addCallback(lambda res: self.failUnlessEqual(res,
860 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
863 d.addCallback(_created)
866 def test_upload_and_download_full_size_keys(self):
867 self.nodemaker.key_generator = client.KeyGenerator()
868 d = self.nodemaker.create_mutable_file()
870 d = defer.succeed(None)
871 d.addCallback(lambda res: n.get_servermap(MODE_READ))
872 d.addCallback(lambda smap: smap.dump(StringIO()))
873 d.addCallback(lambda sio:
874 self.failUnless("3-of-10" in sio.getvalue()))
875 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
876 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
877 d.addCallback(lambda res: n.download_best_version())
878 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
879 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
880 d.addCallback(lambda res: n.download_best_version())
881 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
882 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
883 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
884 d.addCallback(lambda res: n.download_best_version())
885 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
886 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
887 d.addCallback(lambda smap:
888 n.download_version(smap,
889 smap.best_recoverable_version()))
890 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
892 d.addCallback(_created)
896 def test_size_after_servermap_update(self):
897 # a mutable file node should have something to say about how big
898 # it is after a servermap update is performed, since this tells
899 # us how large the best version of that mutable file is.
900 d = self.nodemaker.create_mutable_file()
903 return n.get_servermap(MODE_READ)
904 d.addCallback(_created)
905 d.addCallback(lambda ignored:
906 self.failUnlessEqual(self.n.get_size(), 0))
907 d.addCallback(lambda ignored:
908 self.n.overwrite(MutableData("foobarbaz")))
909 d.addCallback(lambda ignored:
910 self.failUnlessEqual(self.n.get_size(), 9))
911 d.addCallback(lambda ignored:
912 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
913 d.addCallback(_created)
914 d.addCallback(lambda ignored:
915 self.failUnlessEqual(self.n.get_size(), 9))
920 def publish_one(self):
921 # publish a file and create shares, which can then be manipulated
923 self.CONTENTS = "New contents go here" * 1000
924 self.uploadable = MutableData(self.CONTENTS)
925 self._storage = FakeStorage()
926 self._nodemaker = make_nodemaker(self._storage)
927 self._storage_broker = self._nodemaker.storage_broker
928 d = self._nodemaker.create_mutable_file(self.uploadable)
931 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
932 d.addCallback(_created)
935 def publish_mdmf(self):
936 # like publish_one, except that the result is guaranteed to be
938 # self.CONTENTS should have more than one segment.
939 self.CONTENTS = "This is an MDMF file" * 100000
940 self.uploadable = MutableData(self.CONTENTS)
941 self._storage = FakeStorage()
942 self._nodemaker = make_nodemaker(self._storage)
943 self._storage_broker = self._nodemaker.storage_broker
944 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
947 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
948 d.addCallback(_created)
952 def publish_sdmf(self):
953 # like publish_one, except that the result is guaranteed to be
955 self.CONTENTS = "This is an SDMF file" * 1000
956 self.uploadable = MutableData(self.CONTENTS)
957 self._storage = FakeStorage()
958 self._nodemaker = make_nodemaker(self._storage)
959 self._storage_broker = self._nodemaker.storage_broker
960 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
963 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
964 d.addCallback(_created)
967 def publish_empty_sdmf(self):
969 self.uploadable = MutableData(self.CONTENTS)
970 self._storage = FakeStorage()
971 self._nodemaker = make_nodemaker(self._storage, keysize=None)
972 self._storage_broker = self._nodemaker.storage_broker
973 d = self._nodemaker.create_mutable_file(self.uploadable,
974 version=SDMF_VERSION)
977 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
978 d.addCallback(_created)
982 def publish_multiple(self, version=0):
983 self.CONTENTS = ["Contents 0",
988 self.uploadables = [MutableData(d) for d in self.CONTENTS]
989 self._copied_shares = {}
990 self._storage = FakeStorage()
991 self._nodemaker = make_nodemaker(self._storage)
992 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
995 # now create multiple versions of the same file, and accumulate
996 # their shares, so we can mix and match them later.
997 d = defer.succeed(None)
998 d.addCallback(self._copy_shares, 0)
999 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1000 d.addCallback(self._copy_shares, 1)
1001 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1002 d.addCallback(self._copy_shares, 2)
1003 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1004 d.addCallback(self._copy_shares, 3)
1005 # now we replace all the shares with version s3, and upload a new
1006 # version to get s4b.
1007 rollback = dict([(i,2) for i in range(10)])
1008 d.addCallback(lambda res: self._set_versions(rollback))
1009 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1010 d.addCallback(self._copy_shares, 4)
1011 # we leave the storage in state 4
1013 d.addCallback(_created)
1017 def _copy_shares(self, ignored, index):
1018 shares = self._storage._peers
1019 # we need a deep copy
1021 for peerid in shares:
1022 new_shares[peerid] = {}
1023 for shnum in shares[peerid]:
1024 new_shares[peerid][shnum] = shares[peerid][shnum]
1025 self._copied_shares[index] = new_shares
1027 def _set_versions(self, versionmap):
1028 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1029 # share to be at. Any shnum which is left out of the map will stay at
1030 # its current version.
1031 shares = self._storage._peers
1032 oldshares = self._copied_shares
1033 for peerid in shares:
1034 for shnum in shares[peerid]:
1035 if shnum in versionmap:
1036 index = versionmap[shnum]
1037 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1039 class Servermap(unittest.TestCase, PublishMixin):
1041 return self.publish_one()
1043 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1048 sb = self._storage_broker
1049 smu = ServermapUpdater(fn, sb, Monitor(),
1050 ServerMap(), mode, update_range=update_range)
1054 def update_servermap(self, oldmap, mode=MODE_CHECK):
1055 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1060 def failUnlessOneRecoverable(self, sm, num_shares):
1061 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1062 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1063 best = sm.best_recoverable_version()
1064 self.failIfEqual(best, None)
1065 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1066 self.failUnlessEqual(len(sm.shares_available()), 1)
1067 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1068 shnum, servers = sm.make_sharemap().items()[0]
1069 server = list(servers)[0]
1070 self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1071 self.failUnlessEqual(sm.version_on_server(server, 666), None)
1074 def test_basic(self):
1075 d = defer.succeed(None)
1076 ms = self.make_servermap
1077 us = self.update_servermap
1079 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1080 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1081 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1082 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1083 d.addCallback(lambda res: ms(mode=MODE_READ))
1084 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1085 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1086 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1087 # this mode stops at 'k' shares
1088 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1090 # and can we re-use the same servermap? Note that these are sorted in
1091 # increasing order of number of servers queried, since once a server
1092 # gets into the servermap, we'll always ask it for an update.
1093 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1094 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1095 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1096 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1097 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1098 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1099 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1100 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1101 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1105 def test_fetch_privkey(self):
1106 d = defer.succeed(None)
1107 # use the sibling filenode (which hasn't been used yet), and make
1108 # sure it can fetch the privkey. The file is small, so the privkey
1109 # will be fetched on the first (query) pass.
1110 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1111 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1113 # create a new file, which is large enough to knock the privkey out
1114 # of the early part of the file
1115 LARGE = "These are Larger contents" * 200 # about 5KB
1116 LARGE_uploadable = MutableData(LARGE)
1117 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1118 def _created(large_fn):
1119 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1120 return self.make_servermap(MODE_WRITE, large_fn2)
1121 d.addCallback(_created)
1122 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1126 def test_mark_bad(self):
1127 d = defer.succeed(None)
1128 ms = self.make_servermap
1130 d.addCallback(lambda res: ms(mode=MODE_READ))
1131 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1133 v = sm.best_recoverable_version()
1134 vm = sm.make_versionmap()
1135 shares = list(vm[v])
1136 self.failUnlessEqual(len(shares), 6)
1137 self._corrupted = set()
1138 # mark the first 5 shares as corrupt, then update the servermap.
1139 # The map should not have the marked shares it in any more, and
1140 # new shares should be found to replace the missing ones.
1141 for (shnum, server, timestamp) in shares:
1143 self._corrupted.add( (server, shnum) )
1144 sm.mark_bad_share(server, shnum, "")
1145 return self.update_servermap(sm, MODE_WRITE)
1146 d.addCallback(_made_map)
1148 # this should find all 5 shares that weren't marked bad
1149 v = sm.best_recoverable_version()
1150 vm = sm.make_versionmap()
1151 shares = list(vm[v])
1152 for (server, shnum) in self._corrupted:
1153 server_shares = sm.debug_shares_on_server(server)
1154 self.failIf(shnum in server_shares,
1155 "%d was in %s" % (shnum, server_shares))
1156 self.failUnlessEqual(len(shares), 5)
1157 d.addCallback(_check_map)
1160 def failUnlessNoneRecoverable(self, sm):
1161 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1162 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1163 best = sm.best_recoverable_version()
1164 self.failUnlessEqual(best, None)
1165 self.failUnlessEqual(len(sm.shares_available()), 0)
1167 def test_no_shares(self):
1168 self._storage._peers = {} # delete all shares
1169 ms = self.make_servermap
1170 d = defer.succeed(None)
1172 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1173 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1175 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1176 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1178 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1179 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1181 d.addCallback(lambda res: ms(mode=MODE_READ))
1182 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1186 def failUnlessNotQuiteEnough(self, sm):
1187 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1188 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1189 best = sm.best_recoverable_version()
1190 self.failUnlessEqual(best, None)
1191 self.failUnlessEqual(len(sm.shares_available()), 1)
1192 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1195 def test_not_quite_enough_shares(self):
1197 ms = self.make_servermap
1198 num_shares = len(s._peers)
1199 for peerid in s._peers:
1200 s._peers[peerid] = {}
1204 # now there ought to be only two shares left
1205 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1207 d = defer.succeed(None)
1209 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1210 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1211 d.addCallback(lambda sm:
1212 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1213 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1214 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1215 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1216 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1217 d.addCallback(lambda res: ms(mode=MODE_READ))
1218 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1223 def test_servermapupdater_finds_mdmf_files(self):
1224 # setUp already published an MDMF file for us. We just need to
1225 # make sure that when we run the ServermapUpdater, the file is
1226 # reported to have one recoverable version.
1227 d = defer.succeed(None)
1228 d.addCallback(lambda ignored:
1229 self.publish_mdmf())
1230 d.addCallback(lambda ignored:
1231 self.make_servermap(mode=MODE_CHECK))
1232 # Calling make_servermap also updates the servermap in the mode
1233 # that we specify, so we just need to see what it says.
1234 def _check_servermap(sm):
1235 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1236 d.addCallback(_check_servermap)
1240 def test_fetch_update(self):
1241 d = defer.succeed(None)
1242 d.addCallback(lambda ignored:
1243 self.publish_mdmf())
1244 d.addCallback(lambda ignored:
1245 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1246 def _check_servermap(sm):
1248 self.failUnlessEqual(len(sm.update_data), 10)
1250 for data in sm.update_data.itervalues():
1251 self.failUnlessEqual(len(data), 1)
1252 d.addCallback(_check_servermap)
1256 def test_servermapupdater_finds_sdmf_files(self):
1257 d = defer.succeed(None)
1258 d.addCallback(lambda ignored:
1259 self.publish_sdmf())
1260 d.addCallback(lambda ignored:
1261 self.make_servermap(mode=MODE_CHECK))
1262 d.addCallback(lambda servermap:
1263 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1267 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1269 return self.publish_one()
1271 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1273 oldmap = ServerMap()
1275 sb = self._storage_broker
1276 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1280 def abbrev_verinfo(self, verinfo):
1283 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1284 offsets_tuple) = verinfo
1285 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1287 def abbrev_verinfo_dict(self, verinfo_d):
1289 for verinfo,value in verinfo_d.items():
1290 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1291 offsets_tuple) = verinfo
1292 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1295 def dump_servermap(self, servermap):
1296 print "SERVERMAP", servermap
1297 print "RECOVERABLE", [self.abbrev_verinfo(v)
1298 for v in servermap.recoverable_versions()]
1299 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1300 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1302 def do_download(self, servermap, version=None):
1304 version = servermap.best_recoverable_version()
1305 r = Retrieve(self._fn, self._storage_broker, servermap, version)
1306 c = consumer.MemoryConsumer()
1307 d = r.download(consumer=c)
1308 d.addCallback(lambda mc: "".join(mc.chunks))
1312 def test_basic(self):
1313 d = self.make_servermap()
1314 def _do_retrieve(servermap):
1315 self._smap = servermap
1316 #self.dump_servermap(servermap)
1317 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1318 return self.do_download(servermap)
1319 d.addCallback(_do_retrieve)
1320 def _retrieved(new_contents):
1321 self.failUnlessEqual(new_contents, self.CONTENTS)
1322 d.addCallback(_retrieved)
1323 # we should be able to re-use the same servermap, both with and
1324 # without updating it.
1325 d.addCallback(lambda res: self.do_download(self._smap))
1326 d.addCallback(_retrieved)
1327 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1328 d.addCallback(lambda res: self.do_download(self._smap))
1329 d.addCallback(_retrieved)
1330 # clobbering the pubkey should make the servermap updater re-fetch it
1331 def _clobber_pubkey(res):
1332 self._fn._pubkey = None
1333 d.addCallback(_clobber_pubkey)
1334 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1335 d.addCallback(lambda res: self.do_download(self._smap))
1336 d.addCallback(_retrieved)
1339 def test_all_shares_vanished(self):
1340 d = self.make_servermap()
1341 def _remove_shares(servermap):
1342 for shares in self._storage._peers.values():
1344 d1 = self.shouldFail(NotEnoughSharesError,
1345 "test_all_shares_vanished",
1346 "ran out of servers",
1347 self.do_download, servermap)
1349 d.addCallback(_remove_shares)
1352 def test_no_servers(self):
1353 sb2 = make_storagebroker(num_peers=0)
1354 # if there are no servers, then a MODE_READ servermap should come
1356 d = self.make_servermap(sb=sb2)
1357 def _check_servermap(servermap):
1358 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1359 self.failIf(servermap.recoverable_versions())
1360 self.failIf(servermap.unrecoverable_versions())
1361 self.failIf(servermap.all_servers())
1362 d.addCallback(_check_servermap)
1365 def test_no_servers_download(self):
1366 sb2 = make_storagebroker(num_peers=0)
1367 self._fn._storage_broker = sb2
1368 d = self.shouldFail(UnrecoverableFileError,
1369 "test_no_servers_download",
1370 "no recoverable versions",
1371 self._fn.download_best_version)
1373 # a failed download that occurs while we aren't connected to
1374 # anybody should not prevent a subsequent download from working.
1375 # This isn't quite the webapi-driven test that #463 wants, but it
1376 # should be close enough.
1377 self._fn._storage_broker = self._storage_broker
1378 return self._fn.download_best_version()
1379 def _retrieved(new_contents):
1380 self.failUnlessEqual(new_contents, self.CONTENTS)
1381 d.addCallback(_restore)
1382 d.addCallback(_retrieved)
1386 def _test_corrupt_all(self, offset, substring,
1387 should_succeed=False,
1389 failure_checker=None,
1390 fetch_privkey=False):
1391 d = defer.succeed(None)
1393 d.addCallback(corrupt, self._storage, offset)
1394 d.addCallback(lambda res: self.make_servermap())
1395 if not corrupt_early:
1396 d.addCallback(corrupt, self._storage, offset)
1397 def _do_retrieve(servermap):
1398 ver = servermap.best_recoverable_version()
1399 if ver is None and not should_succeed:
1400 # no recoverable versions == not succeeding. The problem
1401 # should be noted in the servermap's list of problems.
1403 allproblems = [str(f) for f in servermap.get_problems()]
1404 self.failUnlessIn(substring, "".join(allproblems))
1407 d1 = self._fn.download_version(servermap, ver,
1409 d1.addCallback(lambda new_contents:
1410 self.failUnlessEqual(new_contents, self.CONTENTS))
1412 d1 = self.shouldFail(NotEnoughSharesError,
1413 "_corrupt_all(offset=%s)" % (offset,),
1415 self._fn.download_version, servermap,
1419 d1.addCallback(failure_checker)
1420 d1.addCallback(lambda res: servermap)
1422 d.addCallback(_do_retrieve)
1425 def test_corrupt_all_verbyte(self):
1426 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1427 # error in unpack_share().
1428 d = self._test_corrupt_all(0, "UnknownVersionError")
1429 def _check_servermap(servermap):
1430 # and the dump should mention the problems
1432 dump = servermap.dump(s).getvalue()
1433 self.failUnless("30 PROBLEMS" in dump, dump)
1434 d.addCallback(_check_servermap)
1437 def test_corrupt_all_seqnum(self):
1438 # a corrupt sequence number will trigger a bad signature
1439 return self._test_corrupt_all(1, "signature is invalid")
1441 def test_corrupt_all_R(self):
1442 # a corrupt root hash will trigger a bad signature
1443 return self._test_corrupt_all(9, "signature is invalid")
1445 def test_corrupt_all_IV(self):
1446 # a corrupt salt/IV will trigger a bad signature
1447 return self._test_corrupt_all(41, "signature is invalid")
1449 def test_corrupt_all_k(self):
1450 # a corrupt 'k' will trigger a bad signature
1451 return self._test_corrupt_all(57, "signature is invalid")
1453 def test_corrupt_all_N(self):
1454 # a corrupt 'N' will trigger a bad signature
1455 return self._test_corrupt_all(58, "signature is invalid")
1457 def test_corrupt_all_segsize(self):
1458 # a corrupt segsize will trigger a bad signature
1459 return self._test_corrupt_all(59, "signature is invalid")
1461 def test_corrupt_all_datalen(self):
1462 # a corrupt data length will trigger a bad signature
1463 return self._test_corrupt_all(67, "signature is invalid")
1465 def test_corrupt_all_pubkey(self):
1466 # a corrupt pubkey won't match the URI's fingerprint. We need to
1467 # remove the pubkey from the filenode, or else it won't bother trying
1469 self._fn._pubkey = None
1470 return self._test_corrupt_all("pubkey",
1471 "pubkey doesn't match fingerprint")
1473 def test_corrupt_all_sig(self):
1474 # a corrupt signature is a bad one
1475 # the signature runs from about [543:799], depending upon the length
1477 return self._test_corrupt_all("signature", "signature is invalid")
1479 def test_corrupt_all_share_hash_chain_number(self):
1480 # a corrupt share hash chain entry will show up as a bad hash. If we
1481 # mangle the first byte, that will look like a bad hash number,
1482 # causing an IndexError
1483 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1485 def test_corrupt_all_share_hash_chain_hash(self):
1486 # a corrupt share hash chain entry will show up as a bad hash. If we
1487 # mangle a few bytes in, that will look like a bad hash.
1488 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1490 def test_corrupt_all_block_hash_tree(self):
1491 return self._test_corrupt_all("block_hash_tree",
1492 "block hash tree failure")
1494 def test_corrupt_all_block(self):
1495 return self._test_corrupt_all("share_data", "block hash tree failure")
1497 def test_corrupt_all_encprivkey(self):
1498 # a corrupted privkey won't even be noticed by the reader, only by a
1500 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1503 def test_corrupt_all_encprivkey_late(self):
1504 # this should work for the same reason as above, but we corrupt
1505 # after the servermap update to exercise the error handling
1507 # We need to remove the privkey from the node, or the retrieve
1508 # process won't know to update it.
1509 self._fn._privkey = None
1510 return self._test_corrupt_all("enc_privkey",
1511 None, # this shouldn't fail
1512 should_succeed=True,
1513 corrupt_early=False,
1517 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1518 # just use a .todo because the failing-but-ignored test emits about 30kB
1520 def OFF_test_corrupt_all_seqnum_late(self):
1521 # corrupting the seqnum between mapupdate and retrieve should result
1522 # in NotEnoughSharesError, since each share will look invalid
1525 self.failUnless(f.check(NotEnoughSharesError))
1526 self.failUnless("uncoordinated write" in str(f))
1527 return self._test_corrupt_all(1, "ran out of servers",
1528 corrupt_early=False,
1529 failure_checker=_check)
1531 def test_corrupt_all_block_hash_tree_late(self):
1534 self.failUnless(f.check(NotEnoughSharesError))
1535 return self._test_corrupt_all("block_hash_tree",
1536 "block hash tree failure",
1537 corrupt_early=False,
1538 failure_checker=_check)
1541 def test_corrupt_all_block_late(self):
1544 self.failUnless(f.check(NotEnoughSharesError))
1545 return self._test_corrupt_all("share_data", "block hash tree failure",
1546 corrupt_early=False,
1547 failure_checker=_check)
1550 def test_basic_pubkey_at_end(self):
1551 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1552 # download to succeed but forcing a bunch of retries first. Note that
1553 # this is rather pessimistic: our Retrieve process will throw away
1554 # the whole share if the pubkey is bad, even though the rest of the
1555 # share might be good.
1557 self._fn._pubkey = None
1558 k = self._fn.get_required_shares()
1559 N = self._fn.get_total_shares()
1560 d = defer.succeed(None)
1561 d.addCallback(corrupt, self._storage, "pubkey",
1562 shnums_to_corrupt=range(0, N-k))
1563 d.addCallback(lambda res: self.make_servermap())
1564 def _do_retrieve(servermap):
1565 self.failUnless(servermap.get_problems())
1566 self.failUnless("pubkey doesn't match fingerprint"
1567 in str(servermap.get_problems()[0]))
1568 ver = servermap.best_recoverable_version()
1569 r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1570 c = consumer.MemoryConsumer()
1571 return r.download(c)
1572 d.addCallback(_do_retrieve)
1573 d.addCallback(lambda mc: "".join(mc.chunks))
1574 d.addCallback(lambda new_contents:
1575 self.failUnlessEqual(new_contents, self.CONTENTS))
1579 def _test_corrupt_some(self, offset, mdmf=False):
1581 d = self.publish_mdmf()
1583 d = defer.succeed(None)
1584 d.addCallback(lambda ignored:
1585 corrupt(None, self._storage, offset, range(5)))
1586 d.addCallback(lambda ignored:
1587 self.make_servermap())
1588 def _do_retrieve(servermap):
1589 ver = servermap.best_recoverable_version()
1590 self.failUnless(ver)
1591 return self._fn.download_best_version()
1592 d.addCallback(_do_retrieve)
1593 d.addCallback(lambda new_contents:
1594 self.failUnlessEqual(new_contents, self.CONTENTS))
1598 def test_corrupt_some(self):
1599 # corrupt the data of first five shares (so the servermap thinks
1600 # they're good but retrieve marks them as bad), so that the
1601 # MODE_READ set of 6 will be insufficient, forcing node.download to
1602 # retry with more servers.
1603 return self._test_corrupt_some("share_data")
1606 def test_download_fails(self):
1607 d = corrupt(None, self._storage, "signature")
1608 d.addCallback(lambda ignored:
1609 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1610 "no recoverable versions",
1611 self._fn.download_best_version))
1616 def test_corrupt_mdmf_block_hash_tree(self):
1617 d = self.publish_mdmf()
1618 d.addCallback(lambda ignored:
1619 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1620 "block hash tree failure",
1621 corrupt_early=False,
1622 should_succeed=False))
1626 def test_corrupt_mdmf_block_hash_tree_late(self):
1627 d = self.publish_mdmf()
1628 d.addCallback(lambda ignored:
1629 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1630 "block hash tree failure",
1632 should_succeed=False))
1636 def test_corrupt_mdmf_share_data(self):
1637 d = self.publish_mdmf()
1638 d.addCallback(lambda ignored:
1639 # TODO: Find out what the block size is and corrupt a
1640 # specific block, rather than just guessing.
1641 self._test_corrupt_all(("share_data", 12 * 40),
1642 "block hash tree failure",
1644 should_succeed=False))
1648 def test_corrupt_some_mdmf(self):
1649 return self._test_corrupt_some(("share_data", 12 * 40),
1654 def check_good(self, r, where):
1655 self.failUnless(r.is_healthy(), where)
1658 def check_bad(self, r, where):
1659 self.failIf(r.is_healthy(), where)
1662 def check_expected_failure(self, r, expected_exception, substring, where):
1663 for (peerid, storage_index, shnum, f) in r.get_share_problems():
1664 if f.check(expected_exception):
1665 self.failUnless(substring in str(f),
1666 "%s: substring '%s' not in '%s'" %
1667 (where, substring, str(f)))
1669 self.fail("%s: didn't see expected exception %s in problems %s" %
1670 (where, expected_exception, r.get_share_problems()))
1673 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1675 return self.publish_one()
1678 def test_check_good(self):
1679 d = self._fn.check(Monitor())
1680 d.addCallback(self.check_good, "test_check_good")
1683 def test_check_mdmf_good(self):
1684 d = self.publish_mdmf()
1685 d.addCallback(lambda ignored:
1686 self._fn.check(Monitor()))
1687 d.addCallback(self.check_good, "test_check_mdmf_good")
1690 def test_check_no_shares(self):
1691 for shares in self._storage._peers.values():
1693 d = self._fn.check(Monitor())
1694 d.addCallback(self.check_bad, "test_check_no_shares")
1697 def test_check_mdmf_no_shares(self):
1698 d = self.publish_mdmf()
1700 for share in self._storage._peers.values():
1702 d.addCallback(_then)
1703 d.addCallback(lambda ignored:
1704 self._fn.check(Monitor()))
1705 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1708 def test_check_not_enough_shares(self):
1709 for shares in self._storage._peers.values():
1710 for shnum in shares.keys():
1713 d = self._fn.check(Monitor())
1714 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1717 def test_check_mdmf_not_enough_shares(self):
1718 d = self.publish_mdmf()
1720 for shares in self._storage._peers.values():
1721 for shnum in shares.keys():
1724 d.addCallback(_then)
1725 d.addCallback(lambda ignored:
1726 self._fn.check(Monitor()))
1727 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1731 def test_check_all_bad_sig(self):
1732 d = corrupt(None, self._storage, 1) # bad sig
1733 d.addCallback(lambda ignored:
1734 self._fn.check(Monitor()))
1735 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1738 def test_check_mdmf_all_bad_sig(self):
1739 d = self.publish_mdmf()
1740 d.addCallback(lambda ignored:
1741 corrupt(None, self._storage, 1))
1742 d.addCallback(lambda ignored:
1743 self._fn.check(Monitor()))
1744 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1747 def test_verify_mdmf_all_bad_sharedata(self):
1748 d = self.publish_mdmf()
1749 # On 8 of the shares, corrupt the beginning of the share data.
1750 # The signature check during the servermap update won't catch this.
1751 d.addCallback(lambda ignored:
1752 corrupt(None, self._storage, "share_data", range(8)))
1753 # On 2 of the shares, corrupt the end of the share data.
1754 # The signature check during the servermap update won't catch
1755 # this either, and the retrieval process will have to process
1756 # all of the segments before it notices.
1757 d.addCallback(lambda ignored:
1758 # the block hash tree comes right after the share data, so if we
1759 # corrupt a little before the block hash tree, we'll corrupt in the
1760 # last block of each share.
1761 corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1762 d.addCallback(lambda ignored:
1763 self._fn.check(Monitor(), verify=True))
1764 # The verifier should flag the file as unhealthy, and should
1765 # list all 10 shares as bad.
1766 d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1767 def _check_num_bad(r):
1768 self.failIf(r.is_recoverable())
1769 smap = r.get_servermap()
1770 self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1771 d.addCallback(_check_num_bad)
1774 def test_check_all_bad_blocks(self):
1775 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1776 # the Checker won't notice this.. it doesn't look at actual data
1777 d.addCallback(lambda ignored:
1778 self._fn.check(Monitor()))
1779 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1783 def test_check_mdmf_all_bad_blocks(self):
1784 d = self.publish_mdmf()
1785 d.addCallback(lambda ignored:
1786 corrupt(None, self._storage, "share_data"))
1787 d.addCallback(lambda ignored:
1788 self._fn.check(Monitor()))
1789 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1792 def test_verify_good(self):
1793 d = self._fn.check(Monitor(), verify=True)
1794 d.addCallback(self.check_good, "test_verify_good")
1797 def test_verify_all_bad_sig(self):
1798 d = corrupt(None, self._storage, 1) # bad sig
1799 d.addCallback(lambda ignored:
1800 self._fn.check(Monitor(), verify=True))
1801 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1804 def test_verify_one_bad_sig(self):
1805 d = corrupt(None, self._storage, 1, [9]) # bad sig
1806 d.addCallback(lambda ignored:
1807 self._fn.check(Monitor(), verify=True))
1808 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1811 def test_verify_one_bad_block(self):
1812 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1813 # the Verifier *will* notice this, since it examines every byte
1814 d.addCallback(lambda ignored:
1815 self._fn.check(Monitor(), verify=True))
1816 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1817 d.addCallback(self.check_expected_failure,
1818 CorruptShareError, "block hash tree failure",
1819 "test_verify_one_bad_block")
1822 def test_verify_one_bad_sharehash(self):
1823 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1824 d.addCallback(lambda ignored:
1825 self._fn.check(Monitor(), verify=True))
1826 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1827 d.addCallback(self.check_expected_failure,
1828 CorruptShareError, "corrupt hashes",
1829 "test_verify_one_bad_sharehash")
1832 def test_verify_one_bad_encprivkey(self):
1833 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1834 d.addCallback(lambda ignored:
1835 self._fn.check(Monitor(), verify=True))
1836 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1837 d.addCallback(self.check_expected_failure,
1838 CorruptShareError, "invalid privkey",
1839 "test_verify_one_bad_encprivkey")
1842 def test_verify_one_bad_encprivkey_uncheckable(self):
1843 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1844 readonly_fn = self._fn.get_readonly()
1845 # a read-only node has no way to validate the privkey
1846 d.addCallback(lambda ignored:
1847 readonly_fn.check(Monitor(), verify=True))
1848 d.addCallback(self.check_good,
1849 "test_verify_one_bad_encprivkey_uncheckable")
1853 def test_verify_mdmf_good(self):
1854 d = self.publish_mdmf()
1855 d.addCallback(lambda ignored:
1856 self._fn.check(Monitor(), verify=True))
1857 d.addCallback(self.check_good, "test_verify_mdmf_good")
1861 def test_verify_mdmf_one_bad_block(self):
1862 d = self.publish_mdmf()
1863 d.addCallback(lambda ignored:
1864 corrupt(None, self._storage, "share_data", [1]))
1865 d.addCallback(lambda ignored:
1866 self._fn.check(Monitor(), verify=True))
1867 # We should find one bad block here
1868 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1869 d.addCallback(self.check_expected_failure,
1870 CorruptShareError, "block hash tree failure",
1871 "test_verify_mdmf_one_bad_block")
1875 def test_verify_mdmf_bad_encprivkey(self):
1876 d = self.publish_mdmf()
1877 d.addCallback(lambda ignored:
1878 corrupt(None, self._storage, "enc_privkey", [0]))
1879 d.addCallback(lambda ignored:
1880 self._fn.check(Monitor(), verify=True))
1881 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1882 d.addCallback(self.check_expected_failure,
1883 CorruptShareError, "privkey",
1884 "test_verify_mdmf_bad_encprivkey")
1888 def test_verify_mdmf_bad_sig(self):
1889 d = self.publish_mdmf()
1890 d.addCallback(lambda ignored:
1891 corrupt(None, self._storage, 1, [1]))
1892 d.addCallback(lambda ignored:
1893 self._fn.check(Monitor(), verify=True))
1894 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1898 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1899 d = self.publish_mdmf()
1900 d.addCallback(lambda ignored:
1901 corrupt(None, self._storage, "enc_privkey", [1]))
1902 d.addCallback(lambda ignored:
1903 self._fn.get_readonly())
1904 d.addCallback(lambda fn:
1905 fn.check(Monitor(), verify=True))
1906 d.addCallback(self.check_good,
1907 "test_verify_mdmf_bad_encprivkey_uncheckable")
1911 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1913 def get_shares(self, s):
1914 all_shares = {} # maps (peerid, shnum) to share data
1915 for peerid in s._peers:
1916 shares = s._peers[peerid]
1917 for shnum in shares:
1918 data = shares[shnum]
1919 all_shares[ (peerid, shnum) ] = data
1922 def copy_shares(self, ignored=None):
1923 self.old_shares.append(self.get_shares(self._storage))
1925 def test_repair_nop(self):
1926 self.old_shares = []
1927 d = self.publish_one()
1928 d.addCallback(self.copy_shares)
1929 d.addCallback(lambda res: self._fn.check(Monitor()))
1930 d.addCallback(lambda check_results: self._fn.repair(check_results))
1931 def _check_results(rres):
1932 self.failUnless(IRepairResults.providedBy(rres))
1933 self.failUnless(rres.get_successful())
1934 # TODO: examine results
1938 initial_shares = self.old_shares[0]
1939 new_shares = self.old_shares[1]
1940 # TODO: this really shouldn't change anything. When we implement
1941 # a "minimal-bandwidth" repairer", change this test to assert:
1942 #self.failUnlessEqual(new_shares, initial_shares)
1944 # all shares should be in the same place as before
1945 self.failUnlessEqual(set(initial_shares.keys()),
1946 set(new_shares.keys()))
1947 # but they should all be at a newer seqnum. The IV will be
1948 # different, so the roothash will be too.
1949 for key in initial_shares:
1954 k0, N0, segsize0, datalen0,
1955 o0) = unpack_header(initial_shares[key])
1960 k1, N1, segsize1, datalen1,
1961 o1) = unpack_header(new_shares[key])
1962 self.failUnlessEqual(version0, version1)
1963 self.failUnlessEqual(seqnum0+1, seqnum1)
1964 self.failUnlessEqual(k0, k1)
1965 self.failUnlessEqual(N0, N1)
1966 self.failUnlessEqual(segsize0, segsize1)
1967 self.failUnlessEqual(datalen0, datalen1)
1968 d.addCallback(_check_results)
1971 def failIfSharesChanged(self, ignored=None):
1972 old_shares = self.old_shares[-2]
1973 current_shares = self.old_shares[-1]
1974 self.failUnlessEqual(old_shares, current_shares)
1977 def test_unrepairable_0shares(self):
1978 d = self.publish_one()
1979 def _delete_all_shares(ign):
1980 shares = self._storage._peers
1981 for peerid in shares:
1983 d.addCallback(_delete_all_shares)
1984 d.addCallback(lambda ign: self._fn.check(Monitor()))
1985 d.addCallback(lambda check_results: self._fn.repair(check_results))
1987 self.failUnlessEqual(crr.get_successful(), False)
1988 d.addCallback(_check)
1991 def test_mdmf_unrepairable_0shares(self):
1992 d = self.publish_mdmf()
1993 def _delete_all_shares(ign):
1994 shares = self._storage._peers
1995 for peerid in shares:
1997 d.addCallback(_delete_all_shares)
1998 d.addCallback(lambda ign: self._fn.check(Monitor()))
1999 d.addCallback(lambda check_results: self._fn.repair(check_results))
2000 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2004 def test_unrepairable_1share(self):
2005 d = self.publish_one()
2006 def _delete_all_shares(ign):
2007 shares = self._storage._peers
2008 for peerid in shares:
2009 for shnum in list(shares[peerid]):
2011 del shares[peerid][shnum]
2012 d.addCallback(_delete_all_shares)
2013 d.addCallback(lambda ign: self._fn.check(Monitor()))
2014 d.addCallback(lambda check_results: self._fn.repair(check_results))
2016 self.failUnlessEqual(crr.get_successful(), False)
2017 d.addCallback(_check)
2020 def test_mdmf_unrepairable_1share(self):
2021 d = self.publish_mdmf()
2022 def _delete_all_shares(ign):
2023 shares = self._storage._peers
2024 for peerid in shares:
2025 for shnum in list(shares[peerid]):
2027 del shares[peerid][shnum]
2028 d.addCallback(_delete_all_shares)
2029 d.addCallback(lambda ign: self._fn.check(Monitor()))
2030 d.addCallback(lambda check_results: self._fn.repair(check_results))
2032 self.failUnlessEqual(crr.get_successful(), False)
2033 d.addCallback(_check)
2036 def test_repairable_5shares(self):
2037 d = self.publish_mdmf()
2038 def _delete_all_shares(ign):
2039 shares = self._storage._peers
2040 for peerid in shares:
2041 for shnum in list(shares[peerid]):
2043 del shares[peerid][shnum]
2044 d.addCallback(_delete_all_shares)
2045 d.addCallback(lambda ign: self._fn.check(Monitor()))
2046 d.addCallback(lambda check_results: self._fn.repair(check_results))
2048 self.failUnlessEqual(crr.get_successful(), True)
2049 d.addCallback(_check)
2052 def test_mdmf_repairable_5shares(self):
2053 d = self.publish_mdmf()
2054 def _delete_some_shares(ign):
2055 shares = self._storage._peers
2056 for peerid in shares:
2057 for shnum in list(shares[peerid]):
2059 del shares[peerid][shnum]
2060 d.addCallback(_delete_some_shares)
2061 d.addCallback(lambda ign: self._fn.check(Monitor()))
2063 self.failIf(cr.is_healthy())
2064 self.failUnless(cr.is_recoverable())
2066 d.addCallback(_check)
2067 d.addCallback(lambda check_results: self._fn.repair(check_results))
2069 self.failUnlessEqual(crr.get_successful(), True)
2070 d.addCallback(_check1)
2074 def test_merge(self):
2075 self.old_shares = []
2076 d = self.publish_multiple()
2077 # repair will refuse to merge multiple highest seqnums unless you
2079 d.addCallback(lambda res:
2080 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2081 1:4,3:4,5:4,7:4,9:4}))
2082 d.addCallback(self.copy_shares)
2083 d.addCallback(lambda res: self._fn.check(Monitor()))
2084 def _try_repair(check_results):
2085 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2086 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2087 self._fn.repair, check_results)
2088 d2.addCallback(self.copy_shares)
2089 d2.addCallback(self.failIfSharesChanged)
2090 d2.addCallback(lambda res: check_results)
2092 d.addCallback(_try_repair)
2093 d.addCallback(lambda check_results:
2094 self._fn.repair(check_results, force=True))
2095 # this should give us 10 shares of the highest roothash
2096 def _check_repair_results(rres):
2097 self.failUnless(rres.get_successful())
2099 d.addCallback(_check_repair_results)
2100 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2101 def _check_smap(smap):
2102 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2103 self.failIf(smap.unrecoverable_versions())
2104 # now, which should have won?
2105 roothash_s4a = self.get_roothash_for(3)
2106 roothash_s4b = self.get_roothash_for(4)
2107 if roothash_s4b > roothash_s4a:
2108 expected_contents = self.CONTENTS[4]
2110 expected_contents = self.CONTENTS[3]
2111 new_versionid = smap.best_recoverable_version()
2112 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2113 d2 = self._fn.download_version(smap, new_versionid)
2114 d2.addCallback(self.failUnlessEqual, expected_contents)
2116 d.addCallback(_check_smap)
2119 def test_non_merge(self):
2120 self.old_shares = []
2121 d = self.publish_multiple()
2122 # repair should not refuse a repair that doesn't need to merge. In
2123 # this case, we combine v2 with v3. The repair should ignore v2 and
2124 # copy v3 into a new v5.
2125 d.addCallback(lambda res:
2126 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2127 1:3,3:3,5:3,7:3,9:3}))
2128 d.addCallback(lambda res: self._fn.check(Monitor()))
2129 d.addCallback(lambda check_results: self._fn.repair(check_results))
2130 # this should give us 10 shares of v3
2131 def _check_repair_results(rres):
2132 self.failUnless(rres.get_successful())
2134 d.addCallback(_check_repair_results)
2135 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2136 def _check_smap(smap):
2137 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2138 self.failIf(smap.unrecoverable_versions())
2139 # now, which should have won?
2140 expected_contents = self.CONTENTS[3]
2141 new_versionid = smap.best_recoverable_version()
2142 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2143 d2 = self._fn.download_version(smap, new_versionid)
2144 d2.addCallback(self.failUnlessEqual, expected_contents)
2146 d.addCallback(_check_smap)
2149 def get_roothash_for(self, index):
2150 # return the roothash for the first share we see in the saved set
2151 shares = self._copied_shares[index]
2152 for peerid in shares:
2153 for shnum in shares[peerid]:
2154 share = shares[peerid][shnum]
2155 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2156 unpack_header(share)
2159 def test_check_and_repair_readcap(self):
2160 # we can't currently repair from a mutable readcap: #625
2161 self.old_shares = []
2162 d = self.publish_one()
2163 d.addCallback(self.copy_shares)
2164 def _get_readcap(res):
2165 self._fn3 = self._fn.get_readonly()
2166 # also delete some shares
2167 for peerid,shares in self._storage._peers.items():
2169 d.addCallback(_get_readcap)
2170 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2171 def _check_results(crr):
2172 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2173 # we should detect the unhealthy, but skip over mutable-readcap
2174 # repairs until #625 is fixed
2175 self.failIf(crr.get_pre_repair_results().is_healthy())
2176 self.failIf(crr.get_repair_attempted())
2177 self.failIf(crr.get_post_repair_results().is_healthy())
2178 d.addCallback(_check_results)
2181 def test_repair_empty(self):
2182 # bug 1689: delete one share of an empty mutable file, then repair.
2183 # In the buggy version, the check that precedes the retrieve+publish
2184 # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
2185 # privkey that repair needs.
2186 d = self.publish_empty_sdmf()
2187 def _delete_one_share(ign):
2188 shares = self._storage._peers
2189 for peerid in shares:
2190 for shnum in list(shares[peerid]):
2192 del shares[peerid][shnum]
2193 d.addCallback(_delete_one_share)
2194 d.addCallback(lambda ign: self._fn2.check(Monitor()))
2195 d.addCallback(lambda check_results: self._fn2.repair(check_results))
2197 self.failUnlessEqual(crr.get_successful(), True)
2198 d.addCallback(_check)
2201 class DevNullDictionary(dict):
2202 def __setitem__(self, key, value):
2205 class MultipleEncodings(unittest.TestCase):
2207 self.CONTENTS = "New contents go here"
2208 self.uploadable = MutableData(self.CONTENTS)
2209 self._storage = FakeStorage()
2210 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2211 self._storage_broker = self._nodemaker.storage_broker
2212 d = self._nodemaker.create_mutable_file(self.uploadable)
2215 d.addCallback(_created)
2218 def _encode(self, k, n, data, version=SDMF_VERSION):
2219 # encode 'data' into a peerid->shares dict.
2222 # disable the nodecache, since for these tests we explicitly need
2223 # multiple nodes pointing at the same file
2224 self._nodemaker._node_cache = DevNullDictionary()
2225 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2226 # then we copy over other fields that are normally fetched from the
2228 fn2._pubkey = fn._pubkey
2229 fn2._privkey = fn._privkey
2230 fn2._encprivkey = fn._encprivkey
2231 # and set the encoding parameters to something completely different
2232 fn2._required_shares = k
2233 fn2._total_shares = n
2236 s._peers = {} # clear existing storage
2237 p2 = Publish(fn2, self._storage_broker, None)
2238 uploadable = MutableData(data)
2239 d = p2.publish(uploadable)
2240 def _published(res):
2244 d.addCallback(_published)
2247 def make_servermap(self, mode=MODE_READ, oldmap=None):
2249 oldmap = ServerMap()
2250 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2255 def test_multiple_encodings(self):
2256 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2257 # then mix up the shares, to make sure that download survives seeing
2258 # a variety of encodings. This is actually kind of tricky to set up.
2260 contents1 = "Contents for encoding 1 (3-of-10) go here"
2261 contents2 = "Contents for encoding 2 (4-of-9) go here"
2262 contents3 = "Contents for encoding 3 (4-of-7) go here"
2264 # we make a retrieval object that doesn't know what encoding
2266 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2268 # now we upload a file through fn1, and grab its shares
2269 d = self._encode(3, 10, contents1)
2270 def _encoded_1(shares):
2271 self._shares1 = shares
2272 d.addCallback(_encoded_1)
2273 d.addCallback(lambda res: self._encode(4, 9, contents2))
2274 def _encoded_2(shares):
2275 self._shares2 = shares
2276 d.addCallback(_encoded_2)
2277 d.addCallback(lambda res: self._encode(4, 7, contents3))
2278 def _encoded_3(shares):
2279 self._shares3 = shares
2280 d.addCallback(_encoded_3)
2283 log.msg("merging sharelists")
2284 # we merge the shares from the two sets, leaving each shnum in
2285 # its original location, but using a share from set1 or set2
2286 # according to the following sequence:
2297 # so that neither form can be recovered until fetch [f], at which
2298 # point version-s1 (the 3-of-10 form) should be recoverable. If
2299 # the implementation latches on to the first version it sees,
2300 # then s2 will be recoverable at fetch [g].
2302 # Later, when we implement code that handles multiple versions,
2303 # we can use this framework to assert that all recoverable
2304 # versions are retrieved, and test that 'epsilon' does its job
2306 places = [2, 2, 3, 2, 1, 1, 1, 2]
2309 sb = self._storage_broker
2311 for peerid in sorted(sb.get_all_serverids()):
2312 for shnum in self._shares1.get(peerid, {}):
2313 if shnum < len(places):
2314 which = places[shnum]
2317 self._storage._peers[peerid] = peers = {}
2318 in_1 = shnum in self._shares1[peerid]
2319 in_2 = shnum in self._shares2.get(peerid, {})
2320 in_3 = shnum in self._shares3.get(peerid, {})
2323 peers[shnum] = self._shares1[peerid][shnum]
2324 sharemap[shnum] = peerid
2327 peers[shnum] = self._shares2[peerid][shnum]
2328 sharemap[shnum] = peerid
2331 peers[shnum] = self._shares3[peerid][shnum]
2332 sharemap[shnum] = peerid
2334 # we don't bother placing any other shares
2335 # now sort the sequence so that share 0 is returned first
2336 new_sequence = [sharemap[shnum]
2337 for shnum in sorted(sharemap.keys())]
2338 self._storage._sequence = new_sequence
2339 log.msg("merge done")
2340 d.addCallback(_merge)
2341 d.addCallback(lambda res: fn3.download_best_version())
2342 def _retrieved(new_contents):
2343 # the current specified behavior is "first version recoverable"
2344 self.failUnlessEqual(new_contents, contents1)
2345 d.addCallback(_retrieved)
2349 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2352 return self.publish_multiple()
2354 def test_multiple_versions(self):
2355 # if we see a mix of versions in the grid, download_best_version
2356 # should get the latest one
2357 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2358 d = self._fn.download_best_version()
2359 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2360 # and the checker should report problems
2361 d.addCallback(lambda res: self._fn.check(Monitor()))
2362 d.addCallback(self.check_bad, "test_multiple_versions")
2364 # but if everything is at version 2, that's what we should download
2365 d.addCallback(lambda res:
2366 self._set_versions(dict([(i,2) for i in range(10)])))
2367 d.addCallback(lambda res: self._fn.download_best_version())
2368 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2369 # if exactly one share is at version 3, we should still get v2
2370 d.addCallback(lambda res:
2371 self._set_versions({0:3}))
2372 d.addCallback(lambda res: self._fn.download_best_version())
2373 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2374 # but the servermap should see the unrecoverable version. This
2375 # depends upon the single newer share being queried early.
2376 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2377 def _check_smap(smap):
2378 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2379 newer = smap.unrecoverable_newer_versions()
2380 self.failUnlessEqual(len(newer), 1)
2381 verinfo, health = newer.items()[0]
2382 self.failUnlessEqual(verinfo[0], 4)
2383 self.failUnlessEqual(health, (1,3))
2384 self.failIf(smap.needs_merge())
2385 d.addCallback(_check_smap)
2386 # if we have a mix of two parallel versions (s4a and s4b), we could
2388 d.addCallback(lambda res:
2389 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2390 1:4,3:4,5:4,7:4,9:4}))
2391 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2392 def _check_smap_mixed(smap):
2393 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2394 newer = smap.unrecoverable_newer_versions()
2395 self.failUnlessEqual(len(newer), 0)
2396 self.failUnless(smap.needs_merge())
2397 d.addCallback(_check_smap_mixed)
2398 d.addCallback(lambda res: self._fn.download_best_version())
2399 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2400 res == self.CONTENTS[4]))
2403 def test_replace(self):
2404 # if we see a mix of versions in the grid, we should be able to
2405 # replace them all with a newer version
2407 # if exactly one share is at version 3, we should download (and
2408 # replace) v2, and the result should be v4. Note that the index we
2409 # give to _set_versions is different than the sequence number.
2410 target = dict([(i,2) for i in range(10)]) # seqnum3
2411 target[0] = 3 # seqnum4
2412 self._set_versions(target)
2414 def _modify(oldversion, servermap, first_time):
2415 return oldversion + " modified"
2416 d = self._fn.modify(_modify)
2417 d.addCallback(lambda res: self._fn.download_best_version())
2418 expected = self.CONTENTS[2] + " modified"
2419 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2420 # and the servermap should indicate that the outlier was replaced too
2421 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2422 def _check_smap(smap):
2423 self.failUnlessEqual(smap.highest_seqnum(), 5)
2424 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2425 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2426 d.addCallback(_check_smap)
2430 class Utils(unittest.TestCase):
2431 def test_cache(self):
2433 # xdata = base62.b2a(os.urandom(100))[:100]
2434 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2435 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2436 c.add("v1", 1, 0, xdata)
2437 c.add("v1", 1, 2000, ydata)
2438 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2439 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2440 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2441 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2442 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2443 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2444 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2445 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2446 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2447 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2448 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2449 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2450 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2451 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2452 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2453 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2454 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2455 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2457 # test joining fragments
2459 c.add("v1", 1, 0, xdata[:10])
2460 c.add("v1", 1, 10, xdata[10:20])
2461 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2463 class Exceptions(unittest.TestCase):
2464 def test_repr(self):
2465 nmde = NeedMoreDataError(100, 50, 100)
2466 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2467 ucwe = UncoordinatedWriteError()
2468 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2470 class SameKeyGenerator:
2471 def __init__(self, pubkey, privkey):
2472 self.pubkey = pubkey
2473 self.privkey = privkey
2474 def generate(self, keysize=None):
2475 return defer.succeed( (self.pubkey, self.privkey) )
2477 class FirstServerGetsKilled:
2479 def notify(self, retval, wrapper, methname):
2481 wrapper.broken = True
2485 class FirstServerGetsDeleted:
2488 self.silenced = None
2489 def notify(self, retval, wrapper, methname):
2491 # this query will work, but later queries should think the share
2494 self.silenced = wrapper
2496 if wrapper == self.silenced:
2497 assert methname == "slot_testv_and_readv_and_writev"
2501 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2502 def do_publish_surprise(self, version):
2503 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2505 nm = self.g.clients[0].nodemaker
2506 d = nm.create_mutable_file(MutableData("contents 1"),
2509 d = defer.succeed(None)
2510 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2511 def _got_smap1(smap):
2512 # stash the old state of the file
2514 d.addCallback(_got_smap1)
2515 # then modify the file, leaving the old map untouched
2516 d.addCallback(lambda res: log.msg("starting winning write"))
2517 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2518 # now attempt to modify the file with the old servermap. This
2519 # will look just like an uncoordinated write, in which every
2520 # single share got updated between our mapupdate and our publish
2521 d.addCallback(lambda res: log.msg("starting doomed write"))
2522 d.addCallback(lambda res:
2523 self.shouldFail(UncoordinatedWriteError,
2524 "test_publish_surprise", None,
2526 MutableData("contents 2a"), self.old_map))
2528 d.addCallback(_created)
2531 def test_publish_surprise_sdmf(self):
2532 return self.do_publish_surprise(SDMF_VERSION)
2534 def test_publish_surprise_mdmf(self):
2535 return self.do_publish_surprise(MDMF_VERSION)
2537 def test_retrieve_surprise(self):
2538 self.basedir = "mutable/Problems/test_retrieve_surprise"
2540 nm = self.g.clients[0].nodemaker
2541 d = nm.create_mutable_file(MutableData("contents 1"))
2543 d = defer.succeed(None)
2544 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2545 def _got_smap1(smap):
2546 # stash the old state of the file
2548 d.addCallback(_got_smap1)
2549 # then modify the file, leaving the old map untouched
2550 d.addCallback(lambda res: log.msg("starting winning write"))
2551 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2552 # now attempt to retrieve the old version with the old servermap.
2553 # This will look like someone has changed the file since we
2554 # updated the servermap.
2555 d.addCallback(lambda res: n._cache._clear())
2556 d.addCallback(lambda res: log.msg("starting doomed read"))
2557 d.addCallback(lambda res:
2558 self.shouldFail(NotEnoughSharesError,
2559 "test_retrieve_surprise",
2560 "ran out of servers: have 0 of 1",
2563 self.old_map.best_recoverable_version(),
2566 d.addCallback(_created)
2570 def test_unexpected_shares(self):
2571 # upload the file, take a servermap, shut down one of the servers,
2572 # upload it again (causing shares to appear on a new server), then
2573 # upload using the old servermap. The last upload should fail with an
2574 # UncoordinatedWriteError, because of the shares that didn't appear
2576 self.basedir = "mutable/Problems/test_unexpected_shares"
2578 nm = self.g.clients[0].nodemaker
2579 d = nm.create_mutable_file(MutableData("contents 1"))
2581 d = defer.succeed(None)
2582 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2583 def _got_smap1(smap):
2584 # stash the old state of the file
2586 # now shut down one of the servers
2587 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2588 self.g.remove_server(peer0)
2589 # then modify the file, leaving the old map untouched
2590 log.msg("starting winning write")
2591 return n.overwrite(MutableData("contents 2"))
2592 d.addCallback(_got_smap1)
2593 # now attempt to modify the file with the old servermap. This
2594 # will look just like an uncoordinated write, in which every
2595 # single share got updated between our mapupdate and our publish
2596 d.addCallback(lambda res: log.msg("starting doomed write"))
2597 d.addCallback(lambda res:
2598 self.shouldFail(UncoordinatedWriteError,
2599 "test_surprise", None,
2601 MutableData("contents 2a"), self.old_map))
2603 d.addCallback(_created)
2606 def test_multiply_placed_shares(self):
2607 self.basedir = "mutable/Problems/test_multiply_placed_shares"
2609 nm = self.g.clients[0].nodemaker
2610 d = nm.create_mutable_file(MutableData("contents 1"))
2611 # remove one of the servers and reupload the file.
2615 servers = self.g.get_all_serverids()
2616 self.ss = self.g.remove_server(servers[len(servers)-1])
2618 new_server = self.g.make_server(len(servers)-1)
2619 self.g.add_server(len(servers)-1, new_server)
2621 return self._node.download_best_version()
2622 d.addCallback(_created)
2623 d.addCallback(lambda data: MutableData(data))
2624 d.addCallback(lambda data: self._node.overwrite(data))
2626 # restore the server we removed earlier, then download+upload
2628 def _overwritten(ign):
2629 self.g.add_server(len(self.g.servers_by_number), self.ss)
2630 return self._node.download_best_version()
2631 d.addCallback(_overwritten)
2632 d.addCallback(lambda data: MutableData(data))
2633 d.addCallback(lambda data: self._node.overwrite(data))
2634 d.addCallback(lambda ignored:
2635 self._node.get_servermap(MODE_CHECK))
2636 def _overwritten_again(smap):
2637 # Make sure that all shares were updated by making sure that
2638 # there aren't any other versions in the sharemap.
2639 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2640 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2641 d.addCallback(_overwritten_again)
2644 def test_bad_server(self):
2645 # Break one server, then create the file: the initial publish should
2646 # complete with an alternate server. Breaking a second server should
2647 # not prevent an update from succeeding either.
2648 self.basedir = "mutable/Problems/test_bad_server"
2650 nm = self.g.clients[0].nodemaker
2652 # to make sure that one of the initial peers is broken, we have to
2653 # get creative. We create an RSA key and compute its storage-index.
2654 # Then we make a KeyGenerator that always returns that one key, and
2655 # use it to create the mutable file. This will get easier when we can
2656 # use #467 static-server-selection to disable permutation and force
2657 # the choice of server for share[0].
2659 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2660 def _got_key( (pubkey, privkey) ):
2661 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2662 pubkey_s = pubkey.serialize()
2663 privkey_s = privkey.serialize()
2664 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2665 ssk_pubkey_fingerprint_hash(pubkey_s))
2666 self._storage_index = u.get_storage_index()
2667 d.addCallback(_got_key)
2668 def _break_peer0(res):
2669 si = self._storage_index
2670 servers = nm.storage_broker.get_servers_for_psi(si)
2671 self.g.break_server(servers[0].get_serverid())
2672 self.server1 = servers[1]
2673 d.addCallback(_break_peer0)
2674 # now "create" the file, using the pre-established key, and let the
2675 # initial publish finally happen
2676 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2677 # that ought to work
2679 d = n.download_best_version()
2680 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2681 # now break the second peer
2682 def _break_peer1(res):
2683 self.g.break_server(self.server1.get_serverid())
2684 d.addCallback(_break_peer1)
2685 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2686 # that ought to work too
2687 d.addCallback(lambda res: n.download_best_version())
2688 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2689 def _explain_error(f):
2691 if f.check(NotEnoughServersError):
2692 print "first_error:", f.value.first_error
2694 d.addErrback(_explain_error)
2696 d.addCallback(_got_node)
2699 def test_bad_server_overlap(self):
2700 # like test_bad_server, but with no extra unused servers to fall back
2701 # upon. This means that we must re-use a server which we've already
2702 # used. If we don't remember the fact that we sent them one share
2703 # already, we'll mistakenly think we're experiencing an
2704 # UncoordinatedWriteError.
2706 # Break one server, then create the file: the initial publish should
2707 # complete with an alternate server. Breaking a second server should
2708 # not prevent an update from succeeding either.
2709 self.basedir = "mutable/Problems/test_bad_server_overlap"
2711 nm = self.g.clients[0].nodemaker
2712 sb = nm.storage_broker
2714 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2715 self.g.break_server(peerids[0])
2717 d = nm.create_mutable_file(MutableData("contents 1"))
2719 d = n.download_best_version()
2720 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2721 # now break one of the remaining servers
2722 def _break_second_server(res):
2723 self.g.break_server(peerids[1])
2724 d.addCallback(_break_second_server)
2725 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2726 # that ought to work too
2727 d.addCallback(lambda res: n.download_best_version())
2728 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2730 d.addCallback(_created)
2733 def test_publish_all_servers_bad(self):
2734 # Break all servers: the publish should fail
2735 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2737 nm = self.g.clients[0].nodemaker
2738 for s in nm.storage_broker.get_connected_servers():
2739 s.get_rref().broken = True
2741 d = self.shouldFail(NotEnoughServersError,
2742 "test_publish_all_servers_bad",
2743 "ran out of good servers",
2744 nm.create_mutable_file, MutableData("contents"))
2747 def test_publish_no_servers(self):
2748 # no servers at all: the publish should fail
2749 self.basedir = "mutable/Problems/test_publish_no_servers"
2750 self.set_up_grid(num_servers=0)
2751 nm = self.g.clients[0].nodemaker
2753 d = self.shouldFail(NotEnoughServersError,
2754 "test_publish_no_servers",
2755 "Ran out of non-bad servers",
2756 nm.create_mutable_file, MutableData("contents"))
2760 def test_privkey_query_error(self):
2761 # when a servermap is updated with MODE_WRITE, it tries to get the
2762 # privkey. Something might go wrong during this query attempt.
2763 # Exercise the code in _privkey_query_failed which tries to handle
2765 self.basedir = "mutable/Problems/test_privkey_query_error"
2766 self.set_up_grid(num_servers=20)
2767 nm = self.g.clients[0].nodemaker
2768 nm._node_cache = DevNullDictionary() # disable the nodecache
2770 # we need some contents that are large enough to push the privkey out
2771 # of the early part of the file
2772 LARGE = "These are Larger contents" * 2000 # about 50KB
2773 LARGE_uploadable = MutableData(LARGE)
2774 d = nm.create_mutable_file(LARGE_uploadable)
2776 self.uri = n.get_uri()
2777 self.n2 = nm.create_from_cap(self.uri)
2779 # When a mapupdate is performed on a node that doesn't yet know
2780 # the privkey, a short read is sent to a batch of servers, to get
2781 # the verinfo and (hopefully, if the file is short enough) the
2782 # encprivkey. Our file is too large to let this first read
2783 # contain the encprivkey. Each non-encprivkey-bearing response
2784 # that arrives (until the node gets the encprivkey) will trigger
2785 # a second read to specifically read the encprivkey.
2787 # So, to exercise this case:
2788 # 1. notice which server gets a read() call first
2789 # 2. tell that server to start throwing errors
2790 killer = FirstServerGetsKilled()
2791 for s in nm.storage_broker.get_connected_servers():
2792 s.get_rref().post_call_notifier = killer.notify
2793 d.addCallback(_created)
2795 # now we update a servermap from a new node (which doesn't have the
2796 # privkey yet, forcing it to use a separate privkey query). Note that
2797 # the map-update will succeed, since we'll just get a copy from one
2798 # of the other shares.
2799 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2803 def test_privkey_query_missing(self):
2804 # like test_privkey_query_error, but the shares are deleted by the
2805 # second query, instead of raising an exception.
2806 self.basedir = "mutable/Problems/test_privkey_query_missing"
2807 self.set_up_grid(num_servers=20)
2808 nm = self.g.clients[0].nodemaker
2809 LARGE = "These are Larger contents" * 2000 # about 50KiB
2810 LARGE_uploadable = MutableData(LARGE)
2811 nm._node_cache = DevNullDictionary() # disable the nodecache
2813 d = nm.create_mutable_file(LARGE_uploadable)
2815 self.uri = n.get_uri()
2816 self.n2 = nm.create_from_cap(self.uri)
2817 deleter = FirstServerGetsDeleted()
2818 for s in nm.storage_broker.get_connected_servers():
2819 s.get_rref().post_call_notifier = deleter.notify
2820 d.addCallback(_created)
2821 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2825 def test_block_and_hash_query_error(self):
2826 # This tests for what happens when a query to a remote server
2827 # fails in either the hash validation step or the block getting
2828 # step (because of batching, this is the same actual query).
2829 # We need to have the storage server persist up until the point
2830 # that its prefix is validated, then suddenly die. This
2831 # exercises some exception handling code in Retrieve.
2832 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2833 self.set_up_grid(num_servers=20)
2834 nm = self.g.clients[0].nodemaker
2835 CONTENTS = "contents" * 2000
2836 CONTENTS_uploadable = MutableData(CONTENTS)
2837 d = nm.create_mutable_file(CONTENTS_uploadable)
2840 d.addCallback(_created)
2841 d.addCallback(lambda ignored:
2842 self._node.get_servermap(MODE_READ))
2843 def _then(servermap):
2844 # we have our servermap. Now we set up the servers like the
2845 # tests above -- the first one that gets a read call should
2846 # start throwing errors, but only after returning its prefix
2847 # for validation. Since we'll download without fetching the
2848 # private key, the next query to the remote server will be
2849 # for either a block and salt or for hashes, either of which
2850 # will exercise the error handling code.
2851 killer = FirstServerGetsKilled()
2852 for s in nm.storage_broker.get_connected_servers():
2853 s.get_rref().post_call_notifier = killer.notify
2854 ver = servermap.best_recoverable_version()
2856 return self._node.download_version(servermap, ver)
2857 d.addCallback(_then)
2858 d.addCallback(lambda data:
2859 self.failUnlessEqual(data, CONTENTS))
2862 def test_1654(self):
2863 # test that the Retrieve object unconditionally verifies the block
2864 # hash tree root for mutable shares. The failure mode is that
2865 # carefully crafted shares can cause undetected corruption (the
2866 # retrieve appears to finish successfully, but the result is
2867 # corrupted). When fixed, these shares always cause a
2868 # CorruptShareError, which results in NotEnoughSharesError in this
2870 self.basedir = "mutable/Problems/test_1654"
2871 self.set_up_grid(num_servers=2)
2872 cap = uri.from_string(TEST_1654_CAP)
2873 si = cap.get_storage_index()
2875 for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2876 sharedata = base64.b64decode(share)
2877 storedir = self.get_serverdir(shnum)
2878 storage_path = os.path.join(storedir, "shares",
2879 storage_index_to_dir(si))
2880 fileutil.make_dirs(storage_path)
2881 fileutil.write(os.path.join(storage_path, "%d" % shnum),
2884 nm = self.g.clients[0].nodemaker
2885 n = nm.create_from_cap(TEST_1654_CAP)
2886 # to exercise the problem correctly, we must ensure that sh0 is
2887 # processed first, and sh1 second. NoNetworkGrid has facilities to
2888 # stall the first request from a single server, but it's not
2889 # currently easy to extend that to stall the second request (mutable
2890 # retrievals will see two: first the mapupdate, then the fetch).
2891 # However, repeated executions of this run without the #1654 fix
2892 # suggests that we're failing reliably even without explicit stalls,
2893 # probably because the servers are queried in a fixed order. So I'm
2894 # ok with relying upon that.
2895 d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2896 "ran out of servers",
2897 n.download_best_version)
2901 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2903 TEST_1654_SH0 = """\
2904 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2905 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2906 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2907 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2908 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2909 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2910 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2911 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2912 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2913 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2914 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2915 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2916 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2917 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2918 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2919 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2920 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2921 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2922 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2923 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2924 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2925 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2926 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2927 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2928 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2929 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2930 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2931 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2932 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2933 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2934 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2935 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2936 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2937 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2938 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2939 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2940 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2941 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2942 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2943 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2944 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2945 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2946 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2947 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2948 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2949 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2950 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2951 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2952 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2953 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2954 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2955 72mXGlqyLyWYuAAAAAA="""
2957 TEST_1654_SH1 = """\
2958 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2959 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2960 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2961 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2962 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2963 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2964 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2965 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2966 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2967 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2968 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2969 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2970 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2971 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2972 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2973 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2974 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2975 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2976 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2977 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2978 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2979 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2980 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2981 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2982 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2983 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2984 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2985 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2986 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2987 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2988 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2989 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2990 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2991 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2992 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2993 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2994 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2995 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2996 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2997 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2998 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2999 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
3000 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
3001 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
3002 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
3003 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
3004 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
3005 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
3006 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
3007 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
3008 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
3009 72mXGlqyLyWYuAAAAAA="""
3012 class FileHandle(unittest.TestCase):
3014 self.test_data = "Test Data" * 50000
3015 self.sio = StringIO(self.test_data)
3016 self.uploadable = MutableFileHandle(self.sio)
3019 def test_filehandle_read(self):
3020 self.basedir = "mutable/FileHandle/test_filehandle_read"
3022 for i in xrange(0, len(self.test_data), chunk_size):
3023 data = self.uploadable.read(chunk_size)
3024 data = "".join(data)
3026 end = i + chunk_size
3027 self.failUnlessEqual(data, self.test_data[start:end])
3030 def test_filehandle_get_size(self):
3031 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
3032 actual_size = len(self.test_data)
3033 size = self.uploadable.get_size()
3034 self.failUnlessEqual(size, actual_size)
3037 def test_filehandle_get_size_out_of_order(self):
3038 # We should be able to call get_size whenever we want without
3039 # disturbing the location of the seek pointer.
3041 data = self.uploadable.read(chunk_size)
3042 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3045 size = self.uploadable.get_size()
3046 self.failUnlessEqual(size, len(self.test_data))
3048 # Now get more data. We should be right where we left off.
3049 more_data = self.uploadable.read(chunk_size)
3051 end = chunk_size * 2
3052 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3055 def test_filehandle_file(self):
3056 # Make sure that the MutableFileHandle works on a file as well
3057 # as a StringIO object, since in some cases it will be asked to
3059 self.basedir = self.mktemp()
3060 # necessary? What am I doing wrong here?
3061 os.mkdir(self.basedir)
3062 f_path = os.path.join(self.basedir, "test_file")
3063 f = open(f_path, "w")
3064 f.write(self.test_data)
3066 f = open(f_path, "r")
3068 uploadable = MutableFileHandle(f)
3070 data = uploadable.read(len(self.test_data))
3071 self.failUnlessEqual("".join(data), self.test_data)
3072 size = uploadable.get_size()
3073 self.failUnlessEqual(size, len(self.test_data))
3076 def test_close(self):
3077 # Make sure that the MutableFileHandle closes its handle when
3079 self.uploadable.close()
3080 self.failUnless(self.sio.closed)
3083 class DataHandle(unittest.TestCase):
3085 self.test_data = "Test Data" * 50000
3086 self.uploadable = MutableData(self.test_data)
3089 def test_datahandle_read(self):
3091 for i in xrange(0, len(self.test_data), chunk_size):
3092 data = self.uploadable.read(chunk_size)
3093 data = "".join(data)
3095 end = i + chunk_size
3096 self.failUnlessEqual(data, self.test_data[start:end])
3099 def test_datahandle_get_size(self):
3100 actual_size = len(self.test_data)
3101 size = self.uploadable.get_size()
3102 self.failUnlessEqual(size, actual_size)
3105 def test_datahandle_get_size_out_of_order(self):
3106 # We should be able to call get_size whenever we want without
3107 # disturbing the location of the seek pointer.
3109 data = self.uploadable.read(chunk_size)
3110 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3113 size = self.uploadable.get_size()
3114 self.failUnlessEqual(size, len(self.test_data))
3116 # Now get more data. We should be right where we left off.
3117 more_data = self.uploadable.read(chunk_size)
3119 end = chunk_size * 2
3120 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3123 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3126 GridTestMixin.setUp(self)
3127 self.basedir = self.mktemp()
3129 self.c = self.g.clients[0]
3130 self.nm = self.c.nodemaker
3131 self.data = "test data" * 100000 # about 900 KiB; MDMF
3132 self.small_data = "test data" * 10 # about 90 B; SDMF
3135 def do_upload_mdmf(self):
3136 d = self.nm.create_mutable_file(MutableData(self.data),
3137 version=MDMF_VERSION)
3139 assert isinstance(n, MutableFileNode)
3140 assert n._protocol_version == MDMF_VERSION
3143 d.addCallback(_then)
3146 def do_upload_sdmf(self):
3147 d = self.nm.create_mutable_file(MutableData(self.small_data))
3149 assert isinstance(n, MutableFileNode)
3150 assert n._protocol_version == SDMF_VERSION
3153 d.addCallback(_then)
3156 def do_upload_empty_sdmf(self):
3157 d = self.nm.create_mutable_file(MutableData(""))
3159 assert isinstance(n, MutableFileNode)
3160 self.sdmf_zero_length_node = n
3161 assert n._protocol_version == SDMF_VERSION
3163 d.addCallback(_then)
3166 def do_upload(self):
3167 d = self.do_upload_mdmf()
3168 d.addCallback(lambda ign: self.do_upload_sdmf())
3171 def test_debug(self):
3172 d = self.do_upload_mdmf()
3174 fso = debug.FindSharesOptions()
3175 storage_index = base32.b2a(n.get_storage_index())
3176 fso.si_s = storage_index
3177 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3179 in self.iterate_servers()]
3180 fso.stdout = StringIO()
3181 fso.stderr = StringIO()
3182 debug.find_shares(fso)
3183 sharefiles = fso.stdout.getvalue().splitlines()
3184 expected = self.nm.default_encoding_parameters["n"]
3185 self.failUnlessEqual(len(sharefiles), expected)
3187 do = debug.DumpOptions()
3188 do["filename"] = sharefiles[0]
3189 do.stdout = StringIO()
3190 debug.dump_share(do)
3191 output = do.stdout.getvalue()
3192 lines = set(output.splitlines())
3193 self.failUnless("Mutable slot found:" in lines, output)
3194 self.failUnless(" share_type: MDMF" in lines, output)
3195 self.failUnless(" num_extra_leases: 0" in lines, output)
3196 self.failUnless(" MDMF contents:" in lines, output)
3197 self.failUnless(" seqnum: 1" in lines, output)
3198 self.failUnless(" required_shares: 3" in lines, output)
3199 self.failUnless(" total_shares: 10" in lines, output)
3200 self.failUnless(" segsize: 131073" in lines, output)
3201 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
3202 vcap = n.get_verify_cap().to_string()
3203 self.failUnless(" verify-cap: %s" % vcap in lines, output)
3205 cso = debug.CatalogSharesOptions()
3206 cso.nodedirs = fso.nodedirs
3207 cso.stdout = StringIO()
3208 cso.stderr = StringIO()
3209 debug.catalog_shares(cso)
3210 shares = cso.stdout.getvalue().splitlines()
3211 oneshare = shares[0] # all shares should be MDMF
3212 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3213 self.failUnless(oneshare.startswith("MDMF"), oneshare)
3214 fields = oneshare.split()
3215 self.failUnlessEqual(fields[0], "MDMF")
3216 self.failUnlessEqual(fields[1], storage_index)
3217 self.failUnlessEqual(fields[2], "3/10")
3218 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3219 self.failUnless(fields[4].startswith("#1:"), fields[3])
3220 # the rest of fields[4] is the roothash, which depends upon
3221 # encryption salts and is not constant. fields[5] is the
3222 # remaining time on the longest lease, which is timing dependent.
3223 # The rest of the line is the quoted pathname to the share.
3224 d.addCallback(_debug)
3227 def test_get_sequence_number(self):
3228 d = self.do_upload()
3229 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3230 d.addCallback(lambda bv:
3231 self.failUnlessEqual(bv.get_sequence_number(), 1))
3232 d.addCallback(lambda ignored:
3233 self.sdmf_node.get_best_readable_version())
3234 d.addCallback(lambda bv:
3235 self.failUnlessEqual(bv.get_sequence_number(), 1))
3236 # Now update. The sequence number in both cases should be 1 in
3238 def _do_update(ignored):
3239 new_data = MutableData("foo bar baz" * 100000)
3240 new_small_data = MutableData("foo bar baz" * 10)
3241 d1 = self.mdmf_node.overwrite(new_data)
3242 d2 = self.sdmf_node.overwrite(new_small_data)
3243 dl = gatherResults([d1, d2])
3245 d.addCallback(_do_update)
3246 d.addCallback(lambda ignored:
3247 self.mdmf_node.get_best_readable_version())
3248 d.addCallback(lambda bv:
3249 self.failUnlessEqual(bv.get_sequence_number(), 2))
3250 d.addCallback(lambda ignored:
3251 self.sdmf_node.get_best_readable_version())
3252 d.addCallback(lambda bv:
3253 self.failUnlessEqual(bv.get_sequence_number(), 2))
3257 def test_cap_after_upload(self):
3258 # If we create a new mutable file and upload things to it, and
3259 # it's an MDMF file, we should get an MDMF cap back from that
3260 # file and should be able to use that.
3261 # That's essentially what MDMF node is, so just check that.
3262 d = self.do_upload_mdmf()
3264 mdmf_uri = self.mdmf_node.get_uri()
3265 cap = uri.from_string(mdmf_uri)
3266 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3267 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3268 cap = uri.from_string(readonly_mdmf_uri)
3269 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3270 d.addCallback(_then)
3273 def test_mutable_version(self):
3274 # assert that getting parameters from the IMutableVersion object
3275 # gives us the same data as getting them from the filenode itself
3276 d = self.do_upload()
3277 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3278 def _check_mdmf(bv):
3280 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3281 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3282 self.failIf(bv.is_readonly())
3283 d.addCallback(_check_mdmf)
3284 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3285 def _check_sdmf(bv):
3287 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3288 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3289 self.failIf(bv.is_readonly())
3290 d.addCallback(_check_sdmf)
3294 def test_get_readonly_version(self):
3295 d = self.do_upload()
3296 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3297 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3299 # Attempting to get a mutable version of a mutable file from a
3300 # filenode initialized with a readcap should return a readonly
3301 # version of that same node.
3302 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3303 d.addCallback(lambda ro: ro.get_best_mutable_version())
3304 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3306 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3307 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3309 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3310 d.addCallback(lambda ro: ro.get_best_mutable_version())
3311 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3315 def test_toplevel_overwrite(self):
3316 new_data = MutableData("foo bar baz" * 100000)
3317 new_small_data = MutableData("foo bar baz" * 10)
3318 d = self.do_upload()
3319 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3320 d.addCallback(lambda ignored:
3321 self.mdmf_node.download_best_version())
3322 d.addCallback(lambda data:
3323 self.failUnlessEqual(data, "foo bar baz" * 100000))
3324 d.addCallback(lambda ignored:
3325 self.sdmf_node.overwrite(new_small_data))
3326 d.addCallback(lambda ignored:
3327 self.sdmf_node.download_best_version())
3328 d.addCallback(lambda data:
3329 self.failUnlessEqual(data, "foo bar baz" * 10))
3333 def test_toplevel_modify(self):
3334 d = self.do_upload()
3335 def modifier(old_contents, servermap, first_time):
3336 return old_contents + "modified"
3337 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3338 d.addCallback(lambda ignored:
3339 self.mdmf_node.download_best_version())
3340 d.addCallback(lambda data:
3341 self.failUnlessIn("modified", data))
3342 d.addCallback(lambda ignored:
3343 self.sdmf_node.modify(modifier))
3344 d.addCallback(lambda ignored:
3345 self.sdmf_node.download_best_version())
3346 d.addCallback(lambda data:
3347 self.failUnlessIn("modified", data))
3351 def test_version_modify(self):
3352 # TODO: When we can publish multiple versions, alter this test
3353 # to modify a version other than the best usable version, then
3354 # test to see that the best recoverable version is that.
3355 d = self.do_upload()
3356 def modifier(old_contents, servermap, first_time):
3357 return old_contents + "modified"
3358 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3359 d.addCallback(lambda ignored:
3360 self.mdmf_node.download_best_version())
3361 d.addCallback(lambda data:
3362 self.failUnlessIn("modified", data))
3363 d.addCallback(lambda ignored:
3364 self.sdmf_node.modify(modifier))
3365 d.addCallback(lambda ignored:
3366 self.sdmf_node.download_best_version())
3367 d.addCallback(lambda data:
3368 self.failUnlessIn("modified", data))
3372 def test_download_version(self):
3373 d = self.publish_multiple()
3374 # We want to have two recoverable versions on the grid.
3375 d.addCallback(lambda res:
3376 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3377 1:1,3:1,5:1,7:1,9:1}))
3378 # Now try to download each version. We should get the plaintext
3379 # associated with that version.
3380 d.addCallback(lambda ignored:
3381 self._fn.get_servermap(mode=MODE_READ))
3382 def _got_servermap(smap):
3383 versions = smap.recoverable_versions()
3384 assert len(versions) == 2
3386 self.servermap = smap
3387 self.version1, self.version2 = versions
3388 assert self.version1 != self.version2
3390 self.version1_seqnum = self.version1[0]
3391 self.version2_seqnum = self.version2[0]
3392 self.version1_index = self.version1_seqnum - 1
3393 self.version2_index = self.version2_seqnum - 1
3395 d.addCallback(_got_servermap)
3396 d.addCallback(lambda ignored:
3397 self._fn.download_version(self.servermap, self.version1))
3398 d.addCallback(lambda results:
3399 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3401 d.addCallback(lambda ignored:
3402 self._fn.download_version(self.servermap, self.version2))
3403 d.addCallback(lambda results:
3404 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3409 def test_download_nonexistent_version(self):
3410 d = self.do_upload_mdmf()
3411 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3412 def _set_servermap(servermap):
3413 self.servermap = servermap
3414 d.addCallback(_set_servermap)
3415 d.addCallback(lambda ignored:
3416 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3418 self.mdmf_node.download_version, self.servermap,
3423 def test_partial_read(self):
3424 d = self.do_upload_mdmf()
3425 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3426 modes = [("start_on_segment_boundary",
3427 mathutil.next_multiple(128 * 1024, 3), 50),
3428 ("ending_one_byte_after_segment_boundary",
3429 mathutil.next_multiple(128 * 1024, 3)-50, 51),
3430 ("zero_length_at_start", 0, 0),
3431 ("zero_length_in_middle", 50, 0),
3432 ("zero_length_at_segment_boundary",
3433 mathutil.next_multiple(128 * 1024, 3), 0),
3435 for (name, offset, length) in modes:
3436 d.addCallback(self._do_partial_read, name, offset, length)
3437 # then read only a few bytes at a time, and see that the results are
3439 def _read_data(version):
3440 c = consumer.MemoryConsumer()
3441 d2 = defer.succeed(None)
3442 for i in xrange(0, len(self.data), 10000):
3443 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3444 d2.addCallback(lambda ignored:
3445 self.failUnlessEqual(self.data, "".join(c.chunks)))
3447 d.addCallback(_read_data)
3449 def _do_partial_read(self, version, name, offset, length):
3450 c = consumer.MemoryConsumer()
3451 d = version.read(c, offset, length)
3452 expected = self.data[offset:offset+length]
3453 d.addCallback(lambda ignored: "".join(c.chunks))
3454 def _check(results):
3455 if results != expected:
3457 print "got: %s ... %s" % (results[:20], results[-20:])
3458 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3459 self.fail("results[%s] != expected" % name)
3460 return version # daisy-chained to next call
3461 d.addCallback(_check)
3465 def _test_read_and_download(self, node, expected):
3466 d = node.get_best_readable_version()
3467 def _read_data(version):
3468 c = consumer.MemoryConsumer()
3469 d2 = defer.succeed(None)
3470 d2.addCallback(lambda ignored: version.read(c))
3471 d2.addCallback(lambda ignored:
3472 self.failUnlessEqual(expected, "".join(c.chunks)))
3474 d.addCallback(_read_data)
3475 d.addCallback(lambda ignored: node.download_best_version())
3476 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3479 def test_read_and_download_mdmf(self):
3480 d = self.do_upload_mdmf()
3481 d.addCallback(self._test_read_and_download, self.data)
3484 def test_read_and_download_sdmf(self):
3485 d = self.do_upload_sdmf()
3486 d.addCallback(self._test_read_and_download, self.small_data)
3489 def test_read_and_download_sdmf_zero_length(self):
3490 d = self.do_upload_empty_sdmf()
3491 d.addCallback(self._test_read_and_download, "")
3495 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3496 timeout = 400 # these tests are too big, 120s is not enough on slow
3499 GridTestMixin.setUp(self)
3500 self.basedir = self.mktemp()
3502 self.c = self.g.clients[0]
3503 self.nm = self.c.nodemaker
3504 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3505 self.small_data = "test data" * 10 # about 90 B; SDMF
3508 def do_upload_sdmf(self):
3509 d = self.nm.create_mutable_file(MutableData(self.small_data))
3511 assert isinstance(n, MutableFileNode)
3513 # Make SDMF node that has 255 shares.
3514 self.nm.default_encoding_parameters['n'] = 255
3515 self.nm.default_encoding_parameters['k'] = 127
3516 return self.nm.create_mutable_file(MutableData(self.small_data))
3517 d.addCallback(_then)
3519 assert isinstance(n, MutableFileNode)
3520 self.sdmf_max_shares_node = n
3521 d.addCallback(_then2)
3524 def do_upload_mdmf(self):
3525 d = self.nm.create_mutable_file(MutableData(self.data),
3526 version=MDMF_VERSION)
3528 assert isinstance(n, MutableFileNode)
3530 # Make MDMF node that has 255 shares.
3531 self.nm.default_encoding_parameters['n'] = 255
3532 self.nm.default_encoding_parameters['k'] = 127
3533 return self.nm.create_mutable_file(MutableData(self.data),
3534 version=MDMF_VERSION)
3535 d.addCallback(_then)
3537 assert isinstance(n, MutableFileNode)
3538 self.mdmf_max_shares_node = n
3539 d.addCallback(_then2)
3542 def _test_replace(self, offset, new_data):
3543 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3544 d0 = self.do_upload_mdmf()
3546 d = defer.succeed(None)
3547 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3548 # close over 'node'.
3549 d.addCallback(lambda ign, node=node:
3550 node.get_best_mutable_version())
3551 d.addCallback(lambda mv:
3552 mv.update(MutableData(new_data), offset))
3553 d.addCallback(lambda ign, node=node:
3554 node.download_best_version())
3555 def _check(results):
3556 if results != expected:
3558 print "got: %s ... %s" % (results[:20], results[-20:])
3559 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3560 self.fail("results != expected")
3561 d.addCallback(_check)
3563 d0.addCallback(_run)
3566 def test_append(self):
3567 # We should be able to append data to a mutable file and get
3569 return self._test_replace(len(self.data), "appended")
3571 def test_replace_middle(self):
3572 # We should be able to replace data in the middle of a mutable
3573 # file and get what we expect back.
3574 return self._test_replace(100, "replaced")
3576 def test_replace_beginning(self):
3577 # We should be able to replace data at the beginning of the file
3578 # without truncating the file
3579 return self._test_replace(0, "beginning")
3581 def test_replace_segstart1(self):
3582 return self._test_replace(128*1024+1, "NNNN")
3584 def test_replace_zero_length_beginning(self):
3585 return self._test_replace(0, "")
3587 def test_replace_zero_length_middle(self):
3588 return self._test_replace(50, "")
3590 def test_replace_zero_length_segstart1(self):
3591 return self._test_replace(128*1024+1, "")
3593 def test_replace_and_extend(self):
3594 # We should be able to replace data in the middle of a mutable
3595 # file and extend that mutable file and get what we expect.
3596 return self._test_replace(100, "modified " * 100000)
3599 def _check_differences(self, got, expected):
3600 # displaying arbitrary file corruption is tricky for a
3601 # 1MB file of repeating data,, so look for likely places
3602 # with problems and display them separately
3603 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3604 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3605 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3606 for (start,end) in gotmods]
3607 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3608 for (start,end) in expmods]
3609 #print "expecting: %s" % expspans
3613 print "differences:"
3614 for segnum in range(len(expected)//SEGSIZE):
3615 start = segnum * SEGSIZE
3616 end = (segnum+1) * SEGSIZE
3617 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3618 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3619 if got_ends != exp_ends:
3620 print "expected[%d]: %s" % (start, exp_ends)
3621 print "got [%d]: %s" % (start, got_ends)
3622 if expspans != gotspans:
3623 print "expected: %s" % expspans
3624 print "got : %s" % gotspans
3625 open("EXPECTED","wb").write(expected)
3626 open("GOT","wb").write(got)
3627 print "wrote data to EXPECTED and GOT"
3628 self.fail("didn't get expected data")
3631 def test_replace_locations(self):
3632 # exercise fencepost conditions
3634 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3635 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3636 d0 = self.do_upload_mdmf()
3638 expected = self.data
3639 d = defer.succeed(None)
3640 for offset in suspects:
3641 new_data = letters.next()*2 # "AA", then "BB", etc
3642 expected = expected[:offset]+new_data+expected[offset+2:]
3643 d.addCallback(lambda ign:
3644 self.mdmf_node.get_best_mutable_version())
3645 def _modify(mv, offset=offset, new_data=new_data):
3646 # close over 'offset','new_data'
3647 md = MutableData(new_data)
3648 return mv.update(md, offset)
3649 d.addCallback(_modify)
3650 d.addCallback(lambda ignored:
3651 self.mdmf_node.download_best_version())
3652 d.addCallback(self._check_differences, expected)
3654 d0.addCallback(_run)
3657 def test_replace_locations_max_shares(self):
3658 # exercise fencepost conditions
3660 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3661 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3662 d0 = self.do_upload_mdmf()
3664 expected = self.data
3665 d = defer.succeed(None)
3666 for offset in suspects:
3667 new_data = letters.next()*2 # "AA", then "BB", etc
3668 expected = expected[:offset]+new_data+expected[offset+2:]
3669 d.addCallback(lambda ign:
3670 self.mdmf_max_shares_node.get_best_mutable_version())
3671 def _modify(mv, offset=offset, new_data=new_data):
3672 # close over 'offset','new_data'
3673 md = MutableData(new_data)
3674 return mv.update(md, offset)
3675 d.addCallback(_modify)
3676 d.addCallback(lambda ignored:
3677 self.mdmf_max_shares_node.download_best_version())
3678 d.addCallback(self._check_differences, expected)
3680 d0.addCallback(_run)
3684 def test_append_power_of_two(self):
3685 # If we attempt to extend a mutable file so that its segment
3686 # count crosses a power-of-two boundary, the update operation
3687 # should know how to reencode the file.
3689 # Note that the data populating self.mdmf_node is about 900 KiB
3690 # long -- this is 7 segments in the default segment size. So we
3691 # need to add 2 segments worth of data to push it over a
3692 # power-of-two boundary.
3693 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3694 new_data = self.data + (segment * 2)
3695 d0 = self.do_upload_mdmf()
3697 d = defer.succeed(None)
3698 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3699 # close over 'node'.
3700 d.addCallback(lambda ign, node=node:
3701 node.get_best_mutable_version())
3702 d.addCallback(lambda mv:
3703 mv.update(MutableData(segment * 2), len(self.data)))
3704 d.addCallback(lambda ign, node=node:
3705 node.download_best_version())
3706 d.addCallback(lambda results:
3707 self.failUnlessEqual(results, new_data))
3709 d0.addCallback(_run)
3712 def test_update_sdmf(self):
3713 # Running update on a single-segment file should still work.
3714 new_data = self.small_data + "appended"
3715 d0 = self.do_upload_sdmf()
3717 d = defer.succeed(None)
3718 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3719 # close over 'node'.
3720 d.addCallback(lambda ign, node=node:
3721 node.get_best_mutable_version())
3722 d.addCallback(lambda mv:
3723 mv.update(MutableData("appended"), len(self.small_data)))
3724 d.addCallback(lambda ign, node=node:
3725 node.download_best_version())
3726 d.addCallback(lambda results:
3727 self.failUnlessEqual(results, new_data))
3729 d0.addCallback(_run)
3732 def test_replace_in_last_segment(self):
3733 # The wrapper should know how to handle the tail segment
3735 replace_offset = len(self.data) - 100
3736 new_data = self.data[:replace_offset] + "replaced"
3737 rest_offset = replace_offset + len("replaced")
3738 new_data += self.data[rest_offset:]
3739 d0 = self.do_upload_mdmf()
3741 d = defer.succeed(None)
3742 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3743 # close over 'node'.
3744 d.addCallback(lambda ign, node=node:
3745 node.get_best_mutable_version())
3746 d.addCallback(lambda mv:
3747 mv.update(MutableData("replaced"), replace_offset))
3748 d.addCallback(lambda ign, node=node:
3749 node.download_best_version())
3750 d.addCallback(lambda results:
3751 self.failUnlessEqual(results, new_data))
3753 d0.addCallback(_run)
3756 def test_multiple_segment_replace(self):
3757 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3758 new_data = self.data[:replace_offset]
3759 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3760 new_data += 2 * new_segment
3761 new_data += "replaced"
3762 rest_offset = len(new_data)
3763 new_data += self.data[rest_offset:]
3764 d0 = self.do_upload_mdmf()
3766 d = defer.succeed(None)
3767 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3768 # close over 'node'.
3769 d.addCallback(lambda ign, node=node:
3770 node.get_best_mutable_version())
3771 d.addCallback(lambda mv:
3772 mv.update(MutableData((2 * new_segment) + "replaced"),
3774 d.addCallback(lambda ignored, node=node:
3775 node.download_best_version())
3776 d.addCallback(lambda results:
3777 self.failUnlessEqual(results, new_data))
3779 d0.addCallback(_run)
3782 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3783 sdmf_old_shares = {}
3784 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3785 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3786 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3787 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3788 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3789 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3790 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3791 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3792 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3793 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3794 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3795 sdmf_old_contents = "This is a test file.\n"
3796 def copy_sdmf_shares(self):
3797 # We'll basically be short-circuiting the upload process.
3798 servernums = self.g.servers_by_number.keys()
3799 assert len(servernums) == 10
3801 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3802 # Get the storage index.
3803 cap = uri.from_string(self.sdmf_old_cap)
3804 si = cap.get_storage_index()
3806 # Now execute each assignment by writing the storage.
3807 for (share, servernum) in assignments:
3808 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3809 storedir = self.get_serverdir(servernum)
3810 storage_path = os.path.join(storedir, "shares",
3811 storage_index_to_dir(si))
3812 fileutil.make_dirs(storage_path)
3813 fileutil.write(os.path.join(storage_path, "%d" % share),
3815 # ...and verify that the shares are there.
3816 shares = self.find_uri_shares(self.sdmf_old_cap)
3817 assert len(shares) == 10
3819 def test_new_downloader_can_read_old_shares(self):
3820 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3822 self.copy_sdmf_shares()
3823 nm = self.g.clients[0].nodemaker
3824 n = nm.create_from_cap(self.sdmf_old_cap)
3825 d = n.download_best_version()
3826 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3829 class DifferentEncoding(unittest.TestCase):
3831 self._storage = s = FakeStorage()
3832 self.nodemaker = make_nodemaker(s)
3834 def test_filenode(self):
3835 # create a file with 3-of-20, then modify it with a client configured
3836 # to do 3-of-10. #1510 tracks a failure here
3837 self.nodemaker.default_encoding_parameters["n"] = 20
3838 d = self.nodemaker.create_mutable_file("old contents")
3840 filecap = n.get_cap().to_string()
3841 del n # we want a new object, not the cached one
3842 self.nodemaker.default_encoding_parameters["n"] = 10
3843 n2 = self.nodemaker.create_from_cap(filecap)
3845 d.addCallback(_created)
3846 def modifier(old_contents, servermap, first_time):
3847 return "new contents"
3848 d.addCallback(lambda n: n.modify(modifier))