2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9 ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import ResponseCache, \
25 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27 NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31 DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39 PausingAndStoppingConsumer, StoppingConsumer, \
40 ImmediatelyStoppingConsumer
43 # this "FakeStorage" exists to put the share data in RAM and avoid using real
44 # network connections, both to speed up the tests and to reduce the amount of
45 # non-mutable.py code being exercised.
48 # this class replaces the collection of storage servers, allowing the
49 # tests to examine and manipulate the published shares. It also lets us
50 # control the order in which read queries are answered, to exercise more
51 # of the error-handling code in Retrieve .
53 # Note that we ignore the storage index: this FakeStorage instance can
54 # only be used for a single storage index.
59 # _sequence is used to cause the responses to occur in a specific
60 # order. If it is in use, then we will defer queries instead of
61 # answering them right away, accumulating the Deferreds in a dict. We
62 # don't know exactly how many queries we'll get, so exactly one
63 # second after the first query arrives, we will release them all (in
67 self._pending_timer = None
69 def read(self, peerid, storage_index):
70 shares = self._peers.get(peerid, {})
71 if self._sequence is None:
72 return defer.succeed(shares)
75 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
76 if peerid not in self._pending:
77 self._pending[peerid] = []
78 self._pending[peerid].append( (d, shares) )
81 def _fire_readers(self):
82 self._pending_timer = None
83 pending = self._pending
85 for peerid in self._sequence:
87 for (d, shares) in pending.pop(peerid):
88 eventually(d.callback, shares)
89 for peerid in pending:
90 for (d, shares) in pending[peerid]:
91 eventually(d.callback, shares)
93 def write(self, peerid, storage_index, shnum, offset, data):
94 if peerid not in self._peers:
95 self._peers[peerid] = {}
96 shares = self._peers[peerid]
98 f.write(shares.get(shnum, ""))
101 shares[shnum] = f.getvalue()
104 class FakeStorageServer:
105 def __init__(self, peerid, storage):
107 self.storage = storage
109 def callRemote(self, methname, *args, **kwargs):
112 meth = getattr(self, methname)
113 return meth(*args, **kwargs)
115 d.addCallback(lambda res: _call())
118 def callRemoteOnly(self, methname, *args, **kwargs):
120 d = self.callRemote(methname, *args, **kwargs)
121 d.addBoth(lambda ignore: None)
124 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
127 def slot_readv(self, storage_index, shnums, readv):
128 d = self.storage.read(self.peerid, storage_index)
132 if shnums and shnum not in shnums:
134 vector = response[shnum] = []
135 for (offset, length) in readv:
136 assert isinstance(offset, (int, long)), offset
137 assert isinstance(length, (int, long)), length
138 vector.append(shares[shnum][offset:offset+length])
143 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
144 tw_vectors, read_vector):
145 # always-pass: parrot the test vectors back to them.
147 for shnum, (testv, writev, new_length) in tw_vectors.items():
148 for (offset, length, op, specimen) in testv:
149 assert op in ("le", "eq", "ge")
150 # TODO: this isn't right, the read is controlled by read_vector,
152 readv[shnum] = [ specimen
153 for (offset, length, op, specimen)
155 for (offset, data) in writev:
156 self.storage.write(self.peerid, storage_index, shnum,
158 answer = (True, readv)
159 return fireEventually(answer)
162 def flip_bit(original, byte_offset):
163 return (original[:byte_offset] +
164 chr(ord(original[byte_offset]) ^ 0x01) +
165 original[byte_offset+1:])
167 def add_two(original, byte_offset):
168 # It isn't enough to simply flip the bit for the version number,
169 # because 1 is a valid version number. So we add two instead.
170 return (original[:byte_offset] +
171 chr(ord(original[byte_offset]) ^ 0x02) +
172 original[byte_offset+1:])
174 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
175 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
176 # list of shnums to corrupt.
178 for peerid in s._peers:
179 shares = s._peers[peerid]
181 if (shnums_to_corrupt is not None
182 and shnum not in shnums_to_corrupt):
185 # We're feeding the reader all of the share data, so it
186 # won't need to use the rref that we didn't provide, nor the
187 # storage index that we didn't provide. We do this because
188 # the reader will work for both MDMF and SDMF.
189 reader = MDMFSlotReadProxy(None, None, shnum, data)
190 # We need to get the offsets for the next part.
191 d = reader.get_verinfo()
192 def _do_corruption(verinfo, data, shnum, shares):
198 k, n, prefix, o) = verinfo
199 if isinstance(offset, tuple):
200 offset1, offset2 = offset
204 if offset1 == "pubkey" and IV:
207 real_offset = o[offset1]
209 real_offset = offset1
210 real_offset = int(real_offset) + offset2 + offset_offset
211 assert isinstance(real_offset, int), offset
212 if offset1 == 0: # verbyte
216 shares[shnum] = f(data, real_offset)
217 d.addCallback(_do_corruption, data, shnum, shares)
219 dl = defer.DeferredList(ds)
220 dl.addCallback(lambda ignored: res)
223 def make_storagebroker(s=None, num_peers=10):
226 peerids = [tagged_hash("peerid", "%d" % i)[:20]
227 for i in range(num_peers)]
228 storage_broker = StorageFarmBroker(None, True)
229 for peerid in peerids:
230 fss = FakeStorageServer(peerid, s)
231 storage_broker.test_add_rref(peerid, fss)
232 return storage_broker
234 def make_nodemaker(s=None, num_peers=10):
235 storage_broker = make_storagebroker(s, num_peers)
236 sh = client.SecretHolder("lease secret", "convergence secret")
237 keygen = client.KeyGenerator()
238 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
239 nodemaker = NodeMaker(storage_broker, sh, None,
241 {"k": 3, "n": 10}, SDMF_VERSION, keygen)
244 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
245 # this used to be in Publish, but we removed the limit. Some of
246 # these tests test whether the new code correctly allows files
247 # larger than the limit.
248 OLD_MAX_SEGMENT_SIZE = 3500000
250 self._storage = s = FakeStorage()
251 self.nodemaker = make_nodemaker(s)
253 def test_create(self):
254 d = self.nodemaker.create_mutable_file()
256 self.failUnless(isinstance(n, MutableFileNode))
257 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
258 sb = self.nodemaker.storage_broker
259 peer0 = sorted(sb.get_all_serverids())[0]
260 shnums = self._storage._peers[peer0].keys()
261 self.failUnlessEqual(len(shnums), 1)
262 d.addCallback(_created)
266 def test_create_mdmf(self):
267 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
269 self.failUnless(isinstance(n, MutableFileNode))
270 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
271 sb = self.nodemaker.storage_broker
272 peer0 = sorted(sb.get_all_serverids())[0]
273 shnums = self._storage._peers[peer0].keys()
274 self.failUnlessEqual(len(shnums), 1)
275 d.addCallback(_created)
278 def test_single_share(self):
279 # Make sure that we tolerate publishing a single share.
280 self.nodemaker.default_encoding_parameters['k'] = 1
281 self.nodemaker.default_encoding_parameters['happy'] = 1
282 self.nodemaker.default_encoding_parameters['n'] = 1
283 d = defer.succeed(None)
284 for v in (SDMF_VERSION, MDMF_VERSION):
285 d.addCallback(lambda ignored, v=v:
286 self.nodemaker.create_mutable_file(version=v))
288 self.failUnless(isinstance(n, MutableFileNode))
291 d.addCallback(_created)
292 d.addCallback(lambda n:
293 n.overwrite(MutableData("Contents" * 50000)))
294 d.addCallback(lambda ignored:
295 self._node.download_best_version())
296 d.addCallback(lambda contents:
297 self.failUnlessEqual(contents, "Contents" * 50000))
300 def test_max_shares(self):
301 self.nodemaker.default_encoding_parameters['n'] = 255
302 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
304 self.failUnless(isinstance(n, MutableFileNode))
305 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
306 sb = self.nodemaker.storage_broker
307 num_shares = sum([len(self._storage._peers[x].keys()) for x \
308 in sb.get_all_serverids()])
309 self.failUnlessEqual(num_shares, 255)
312 d.addCallback(_created)
313 # Now we upload some contents
314 d.addCallback(lambda n:
315 n.overwrite(MutableData("contents" * 50000)))
316 # ...then download contents
317 d.addCallback(lambda ignored:
318 self._node.download_best_version())
319 # ...and check to make sure everything went okay.
320 d.addCallback(lambda contents:
321 self.failUnlessEqual("contents" * 50000, contents))
324 def test_max_shares_mdmf(self):
325 # Test how files behave when there are 255 shares.
326 self.nodemaker.default_encoding_parameters['n'] = 255
327 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
329 self.failUnless(isinstance(n, MutableFileNode))
330 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
331 sb = self.nodemaker.storage_broker
332 num_shares = sum([len(self._storage._peers[x].keys()) for x \
333 in sb.get_all_serverids()])
334 self.failUnlessEqual(num_shares, 255)
337 d.addCallback(_created)
338 d.addCallback(lambda n:
339 n.overwrite(MutableData("contents" * 50000)))
340 d.addCallback(lambda ignored:
341 self._node.download_best_version())
342 d.addCallback(lambda contents:
343 self.failUnlessEqual(contents, "contents" * 50000))
346 def test_mdmf_filenode_cap(self):
347 # Test that an MDMF filenode, once created, returns an MDMF URI.
348 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
350 self.failUnless(isinstance(n, MutableFileNode))
352 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
353 rcap = n.get_readcap()
354 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
355 vcap = n.get_verify_cap()
356 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
357 d.addCallback(_created)
361 def test_create_from_mdmf_writecap(self):
362 # Test that the nodemaker is capable of creating an MDMF
363 # filenode given an MDMF cap.
364 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
366 self.failUnless(isinstance(n, MutableFileNode))
368 self.failUnless(s.startswith("URI:MDMF"))
369 n2 = self.nodemaker.create_from_cap(s)
370 self.failUnless(isinstance(n2, MutableFileNode))
371 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
372 self.failUnlessEqual(n.get_uri(), n2.get_uri())
373 d.addCallback(_created)
377 def test_create_from_mdmf_readcap(self):
378 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
380 self.failUnless(isinstance(n, MutableFileNode))
381 s = n.get_readonly_uri()
382 n2 = self.nodemaker.create_from_cap(s)
383 self.failUnless(isinstance(n2, MutableFileNode))
385 # Check that it's a readonly node
386 self.failUnless(n2.is_readonly())
387 d.addCallback(_created)
391 def test_internal_version_from_cap(self):
392 # MutableFileNodes and MutableFileVersions have an internal
393 # switch that tells them whether they're dealing with an SDMF or
394 # MDMF mutable file when they start doing stuff. We want to make
395 # sure that this is set appropriately given an MDMF cap.
396 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
398 self.uri = n.get_uri()
399 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
401 n2 = self.nodemaker.create_from_cap(self.uri)
402 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
403 d.addCallback(_created)
407 def test_serialize(self):
408 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
410 def _callback(*args, **kwargs):
411 self.failUnlessEqual(args, (4,) )
412 self.failUnlessEqual(kwargs, {"foo": 5})
415 d = n._do_serialized(_callback, 4, foo=5)
416 def _check_callback(res):
417 self.failUnlessEqual(res, 6)
418 self.failUnlessEqual(calls, [1])
419 d.addCallback(_check_callback)
422 raise ValueError("heya")
423 d.addCallback(lambda res:
424 self.shouldFail(ValueError, "_check_errback", "heya",
425 n._do_serialized, _errback))
428 def test_upload_and_download(self):
429 d = self.nodemaker.create_mutable_file()
431 d = defer.succeed(None)
432 d.addCallback(lambda res: n.get_servermap(MODE_READ))
433 d.addCallback(lambda smap: smap.dump(StringIO()))
434 d.addCallback(lambda sio:
435 self.failUnless("3-of-10" in sio.getvalue()))
436 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
437 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
438 d.addCallback(lambda res: n.download_best_version())
439 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
440 d.addCallback(lambda res: n.get_size_of_best_version())
441 d.addCallback(lambda size:
442 self.failUnlessEqual(size, len("contents 1")))
443 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
444 d.addCallback(lambda res: n.download_best_version())
445 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
446 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
447 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
448 d.addCallback(lambda res: n.download_best_version())
449 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
450 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
451 d.addCallback(lambda smap:
452 n.download_version(smap,
453 smap.best_recoverable_version()))
454 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
455 # test a file that is large enough to overcome the
456 # mapupdate-to-retrieve data caching (i.e. make the shares larger
457 # than the default readsize, which is 2000 bytes). A 15kB file
458 # will have 5kB shares.
459 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
460 d.addCallback(lambda res: n.download_best_version())
461 d.addCallback(lambda res:
462 self.failUnlessEqual(res, "large size file" * 1000))
464 d.addCallback(_created)
468 def test_upload_and_download_mdmf(self):
469 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
471 d = defer.succeed(None)
472 d.addCallback(lambda ignored:
473 n.get_servermap(MODE_READ))
474 def _then(servermap):
475 dumped = servermap.dump(StringIO())
476 self.failUnlessIn("3-of-10", dumped.getvalue())
478 # Now overwrite the contents with some new contents. We want
479 # to make them big enough to force the file to be uploaded
480 # in more than one segment.
481 big_contents = "contents1" * 100000 # about 900 KiB
482 big_contents_uploadable = MutableData(big_contents)
483 d.addCallback(lambda ignored:
484 n.overwrite(big_contents_uploadable))
485 d.addCallback(lambda ignored:
486 n.download_best_version())
487 d.addCallback(lambda data:
488 self.failUnlessEqual(data, big_contents))
489 # Overwrite the contents again with some new contents. As
490 # before, they need to be big enough to force multiple
491 # segments, so that we make the downloader deal with
493 bigger_contents = "contents2" * 1000000 # about 9MiB
494 bigger_contents_uploadable = MutableData(bigger_contents)
495 d.addCallback(lambda ignored:
496 n.overwrite(bigger_contents_uploadable))
497 d.addCallback(lambda ignored:
498 n.download_best_version())
499 d.addCallback(lambda data:
500 self.failUnlessEqual(data, bigger_contents))
502 d.addCallback(_created)
506 def test_retrieve_producer_mdmf(self):
507 # We should make sure that the retriever is able to pause and stop
509 data = "contents1" * 100000
510 d = self.nodemaker.create_mutable_file(MutableData(data),
511 version=MDMF_VERSION)
512 d.addCallback(lambda node: node.get_best_mutable_version())
513 d.addCallback(self._test_retrieve_producer, "MDMF", data)
516 # note: SDMF has only one big segment, so we can't use the usual
517 # after-the-first-write() trick to pause or stop the download.
518 # Disabled until we find a better approach.
519 def OFF_test_retrieve_producer_sdmf(self):
520 data = "contents1" * 100000
521 d = self.nodemaker.create_mutable_file(MutableData(data),
522 version=SDMF_VERSION)
523 d.addCallback(lambda node: node.get_best_mutable_version())
524 d.addCallback(self._test_retrieve_producer, "SDMF", data)
527 def _test_retrieve_producer(self, version, kind, data):
528 # Now we'll retrieve it into a pausing consumer.
529 c = PausingConsumer()
531 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
533 c2 = PausingAndStoppingConsumer()
534 d.addCallback(lambda ign:
535 self.shouldFail(DownloadStopped, kind+"_pause_stop",
536 "our Consumer called stopProducing()",
539 c3 = StoppingConsumer()
540 d.addCallback(lambda ign:
541 self.shouldFail(DownloadStopped, kind+"_stop",
542 "our Consumer called stopProducing()",
545 c4 = ImmediatelyStoppingConsumer()
546 d.addCallback(lambda ign:
547 self.shouldFail(DownloadStopped, kind+"_stop_imm",
548 "our Consumer called stopProducing()",
552 c5 = MemoryConsumer()
553 d1 = version.read(c5)
554 c5.producer.stopProducing()
555 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
556 "our Consumer called stopProducing()",
561 def test_download_from_mdmf_cap(self):
562 # We should be able to download an MDMF file given its cap
563 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
565 self.uri = node.get_uri()
566 # also confirm that the cap has no extension fields
567 pieces = self.uri.split(":")
568 self.failUnlessEqual(len(pieces), 4)
570 return node.overwrite(MutableData("contents1" * 100000))
572 node = self.nodemaker.create_from_cap(self.uri)
573 return node.download_best_version()
574 def _downloaded(data):
575 self.failUnlessEqual(data, "contents1" * 100000)
576 d.addCallback(_created)
578 d.addCallback(_downloaded)
582 def test_mdmf_write_count(self):
583 # Publishing an MDMF file should only cause one write for each
584 # share that is to be published. Otherwise, we introduce
585 # undesirable semantics that are a regression from SDMF
586 upload = MutableData("MDMF" * 100000) # about 400 KiB
587 d = self.nodemaker.create_mutable_file(upload,
588 version=MDMF_VERSION)
589 def _check_server_write_counts(ignored):
590 sb = self.nodemaker.storage_broker
591 for server in sb.servers.itervalues():
592 self.failUnlessEqual(server.get_rref().queries, 1)
593 d.addCallback(_check_server_write_counts)
597 def test_create_with_initial_contents(self):
598 upload1 = MutableData("contents 1")
599 d = self.nodemaker.create_mutable_file(upload1)
601 d = n.download_best_version()
602 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
603 upload2 = MutableData("contents 2")
604 d.addCallback(lambda res: n.overwrite(upload2))
605 d.addCallback(lambda res: n.download_best_version())
606 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
608 d.addCallback(_created)
612 def test_create_mdmf_with_initial_contents(self):
613 initial_contents = "foobarbaz" * 131072 # 900KiB
614 initial_contents_uploadable = MutableData(initial_contents)
615 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
616 version=MDMF_VERSION)
618 d = n.download_best_version()
619 d.addCallback(lambda data:
620 self.failUnlessEqual(data, initial_contents))
621 uploadable2 = MutableData(initial_contents + "foobarbaz")
622 d.addCallback(lambda ignored:
623 n.overwrite(uploadable2))
624 d.addCallback(lambda ignored:
625 n.download_best_version())
626 d.addCallback(lambda data:
627 self.failUnlessEqual(data, initial_contents +
630 d.addCallback(_created)
634 def test_response_cache_memory_leak(self):
635 d = self.nodemaker.create_mutable_file("contents")
637 d = n.download_best_version()
638 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
639 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
641 def _check_cache(expected):
642 # The total size of cache entries should not increase on the second download;
643 # in fact the cache contents should be identical.
644 d2 = n.download_best_version()
645 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
647 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
649 d.addCallback(_created)
652 def test_create_with_initial_contents_function(self):
653 data = "initial contents"
654 def _make_contents(n):
655 self.failUnless(isinstance(n, MutableFileNode))
656 key = n.get_writekey()
657 self.failUnless(isinstance(key, str), key)
658 self.failUnlessEqual(len(key), 16) # AES key size
659 return MutableData(data)
660 d = self.nodemaker.create_mutable_file(_make_contents)
662 return n.download_best_version()
663 d.addCallback(_created)
664 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
668 def test_create_mdmf_with_initial_contents_function(self):
669 data = "initial contents" * 100000
670 def _make_contents(n):
671 self.failUnless(isinstance(n, MutableFileNode))
672 key = n.get_writekey()
673 self.failUnless(isinstance(key, str), key)
674 self.failUnlessEqual(len(key), 16)
675 return MutableData(data)
676 d = self.nodemaker.create_mutable_file(_make_contents,
677 version=MDMF_VERSION)
678 d.addCallback(lambda n:
679 n.download_best_version())
680 d.addCallback(lambda data2:
681 self.failUnlessEqual(data2, data))
685 def test_create_with_too_large_contents(self):
686 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
687 BIG_uploadable = MutableData(BIG)
688 d = self.nodemaker.create_mutable_file(BIG_uploadable)
690 other_BIG_uploadable = MutableData(BIG)
691 d = n.overwrite(other_BIG_uploadable)
693 d.addCallback(_created)
696 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
697 d = n.get_servermap(MODE_READ)
698 d.addCallback(lambda servermap: servermap.best_recoverable_version())
699 d.addCallback(lambda verinfo:
700 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
703 def test_modify(self):
704 def _modifier(old_contents, servermap, first_time):
705 new_contents = old_contents + "line2"
707 def _non_modifier(old_contents, servermap, first_time):
709 def _none_modifier(old_contents, servermap, first_time):
711 def _error_modifier(old_contents, servermap, first_time):
712 raise ValueError("oops")
713 def _toobig_modifier(old_contents, servermap, first_time):
714 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
717 def _ucw_error_modifier(old_contents, servermap, first_time):
718 # simulate an UncoordinatedWriteError once
721 raise UncoordinatedWriteError("simulated")
722 new_contents = old_contents + "line3"
724 def _ucw_error_non_modifier(old_contents, servermap, first_time):
725 # simulate an UncoordinatedWriteError once, and don't actually
726 # modify the contents on subsequent invocations
729 raise UncoordinatedWriteError("simulated")
732 initial_contents = "line1"
733 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
735 d = n.modify(_modifier)
736 d.addCallback(lambda res: n.download_best_version())
737 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
738 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
740 d.addCallback(lambda res: n.modify(_non_modifier))
741 d.addCallback(lambda res: n.download_best_version())
742 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
743 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
745 d.addCallback(lambda res: n.modify(_none_modifier))
746 d.addCallback(lambda res: n.download_best_version())
747 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
748 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
750 d.addCallback(lambda res:
751 self.shouldFail(ValueError, "error_modifier", None,
752 n.modify, _error_modifier))
753 d.addCallback(lambda res: n.download_best_version())
754 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
755 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
758 d.addCallback(lambda res: n.download_best_version())
759 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
760 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
762 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
763 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
764 d.addCallback(lambda res: n.download_best_version())
765 d.addCallback(lambda res: self.failUnlessEqual(res,
767 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
769 def _reset_ucw_error_modifier(res):
772 d.addCallback(_reset_ucw_error_modifier)
774 # in practice, this n.modify call should publish twice: the first
775 # one gets a UCWE, the second does not. But our test jig (in
776 # which the modifier raises the UCWE) skips over the first one,
777 # so in this test there will be only one publish, and the seqnum
778 # will only be one larger than the previous test, not two (i.e. 4
780 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
781 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
782 d.addCallback(lambda res: n.download_best_version())
783 d.addCallback(lambda res: self.failUnlessEqual(res,
785 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
786 d.addCallback(lambda res: n.modify(_toobig_modifier))
788 d.addCallback(_created)
792 def test_modify_backoffer(self):
793 def _modifier(old_contents, servermap, first_time):
794 return old_contents + "line2"
796 def _ucw_error_modifier(old_contents, servermap, first_time):
797 # simulate an UncoordinatedWriteError once
800 raise UncoordinatedWriteError("simulated")
801 return old_contents + "line3"
802 def _always_ucw_error_modifier(old_contents, servermap, first_time):
803 raise UncoordinatedWriteError("simulated")
804 def _backoff_stopper(node, f):
806 def _backoff_pauser(node, f):
808 reactor.callLater(0.5, d.callback, None)
811 # the give-up-er will hit its maximum retry count quickly
812 giveuper = BackoffAgent()
813 giveuper._delay = 0.1
816 d = self.nodemaker.create_mutable_file(MutableData("line1"))
818 d = n.modify(_modifier)
819 d.addCallback(lambda res: n.download_best_version())
820 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
821 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
823 d.addCallback(lambda res:
824 self.shouldFail(UncoordinatedWriteError,
825 "_backoff_stopper", None,
826 n.modify, _ucw_error_modifier,
828 d.addCallback(lambda res: n.download_best_version())
829 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
830 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
832 def _reset_ucw_error_modifier(res):
835 d.addCallback(_reset_ucw_error_modifier)
836 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
838 d.addCallback(lambda res: n.download_best_version())
839 d.addCallback(lambda res: self.failUnlessEqual(res,
841 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
843 d.addCallback(lambda res:
844 self.shouldFail(UncoordinatedWriteError,
846 n.modify, _always_ucw_error_modifier,
848 d.addCallback(lambda res: n.download_best_version())
849 d.addCallback(lambda res: self.failUnlessEqual(res,
851 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
854 d.addCallback(_created)
857 def test_upload_and_download_full_size_keys(self):
858 self.nodemaker.key_generator = client.KeyGenerator()
859 d = self.nodemaker.create_mutable_file()
861 d = defer.succeed(None)
862 d.addCallback(lambda res: n.get_servermap(MODE_READ))
863 d.addCallback(lambda smap: smap.dump(StringIO()))
864 d.addCallback(lambda sio:
865 self.failUnless("3-of-10" in sio.getvalue()))
866 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
867 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
868 d.addCallback(lambda res: n.download_best_version())
869 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
870 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
871 d.addCallback(lambda res: n.download_best_version())
872 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
873 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
874 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
875 d.addCallback(lambda res: n.download_best_version())
876 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
877 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
878 d.addCallback(lambda smap:
879 n.download_version(smap,
880 smap.best_recoverable_version()))
881 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
883 d.addCallback(_created)
887 def test_size_after_servermap_update(self):
888 # a mutable file node should have something to say about how big
889 # it is after a servermap update is performed, since this tells
890 # us how large the best version of that mutable file is.
891 d = self.nodemaker.create_mutable_file()
894 return n.get_servermap(MODE_READ)
895 d.addCallback(_created)
896 d.addCallback(lambda ignored:
897 self.failUnlessEqual(self.n.get_size(), 0))
898 d.addCallback(lambda ignored:
899 self.n.overwrite(MutableData("foobarbaz")))
900 d.addCallback(lambda ignored:
901 self.failUnlessEqual(self.n.get_size(), 9))
902 d.addCallback(lambda ignored:
903 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
904 d.addCallback(_created)
905 d.addCallback(lambda ignored:
906 self.failUnlessEqual(self.n.get_size(), 9))
911 def publish_one(self):
912 # publish a file and create shares, which can then be manipulated
914 self.CONTENTS = "New contents go here" * 1000
915 self.uploadable = MutableData(self.CONTENTS)
916 self._storage = FakeStorage()
917 self._nodemaker = make_nodemaker(self._storage)
918 self._storage_broker = self._nodemaker.storage_broker
919 d = self._nodemaker.create_mutable_file(self.uploadable)
922 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
923 d.addCallback(_created)
926 def publish_mdmf(self):
927 # like publish_one, except that the result is guaranteed to be
929 # self.CONTENTS should have more than one segment.
930 self.CONTENTS = "This is an MDMF file" * 100000
931 self.uploadable = MutableData(self.CONTENTS)
932 self._storage = FakeStorage()
933 self._nodemaker = make_nodemaker(self._storage)
934 self._storage_broker = self._nodemaker.storage_broker
935 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
938 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
939 d.addCallback(_created)
943 def publish_sdmf(self):
944 # like publish_one, except that the result is guaranteed to be
946 self.CONTENTS = "This is an SDMF file" * 1000
947 self.uploadable = MutableData(self.CONTENTS)
948 self._storage = FakeStorage()
949 self._nodemaker = make_nodemaker(self._storage)
950 self._storage_broker = self._nodemaker.storage_broker
951 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
954 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
955 d.addCallback(_created)
959 def publish_multiple(self, version=0):
960 self.CONTENTS = ["Contents 0",
965 self.uploadables = [MutableData(d) for d in self.CONTENTS]
966 self._copied_shares = {}
967 self._storage = FakeStorage()
968 self._nodemaker = make_nodemaker(self._storage)
969 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
972 # now create multiple versions of the same file, and accumulate
973 # their shares, so we can mix and match them later.
974 d = defer.succeed(None)
975 d.addCallback(self._copy_shares, 0)
976 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
977 d.addCallback(self._copy_shares, 1)
978 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
979 d.addCallback(self._copy_shares, 2)
980 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
981 d.addCallback(self._copy_shares, 3)
982 # now we replace all the shares with version s3, and upload a new
983 # version to get s4b.
984 rollback = dict([(i,2) for i in range(10)])
985 d.addCallback(lambda res: self._set_versions(rollback))
986 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
987 d.addCallback(self._copy_shares, 4)
988 # we leave the storage in state 4
990 d.addCallback(_created)
994 def _copy_shares(self, ignored, index):
995 shares = self._storage._peers
996 # we need a deep copy
998 for peerid in shares:
999 new_shares[peerid] = {}
1000 for shnum in shares[peerid]:
1001 new_shares[peerid][shnum] = shares[peerid][shnum]
1002 self._copied_shares[index] = new_shares
1004 def _set_versions(self, versionmap):
1005 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1006 # share to be at. Any shnum which is left out of the map will stay at
1007 # its current version.
1008 shares = self._storage._peers
1009 oldshares = self._copied_shares
1010 for peerid in shares:
1011 for shnum in shares[peerid]:
1012 if shnum in versionmap:
1013 index = versionmap[shnum]
1014 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1016 class Servermap(unittest.TestCase, PublishMixin):
1018 return self.publish_one()
1020 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1025 sb = self._storage_broker
1026 smu = ServermapUpdater(fn, sb, Monitor(),
1027 ServerMap(), mode, update_range=update_range)
1031 def update_servermap(self, oldmap, mode=MODE_CHECK):
1032 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1037 def failUnlessOneRecoverable(self, sm, num_shares):
1038 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1039 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1040 best = sm.best_recoverable_version()
1041 self.failIfEqual(best, None)
1042 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1043 self.failUnlessEqual(len(sm.shares_available()), 1)
1044 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1045 shnum, servers = sm.make_sharemap().items()[0]
1046 server = list(servers)[0]
1047 self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1048 self.failUnlessEqual(sm.version_on_server(server, 666), None)
1051 def test_basic(self):
1052 d = defer.succeed(None)
1053 ms = self.make_servermap
1054 us = self.update_servermap
1056 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1057 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1058 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1059 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1060 d.addCallback(lambda res: ms(mode=MODE_READ))
1061 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1062 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1063 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1064 # this mode stops at 'k' shares
1065 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1067 # and can we re-use the same servermap? Note that these are sorted in
1068 # increasing order of number of servers queried, since once a server
1069 # gets into the servermap, we'll always ask it for an update.
1070 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1071 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1072 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1073 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1074 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1075 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1076 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1077 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1078 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1082 def test_fetch_privkey(self):
1083 d = defer.succeed(None)
1084 # use the sibling filenode (which hasn't been used yet), and make
1085 # sure it can fetch the privkey. The file is small, so the privkey
1086 # will be fetched on the first (query) pass.
1087 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1088 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1090 # create a new file, which is large enough to knock the privkey out
1091 # of the early part of the file
1092 LARGE = "These are Larger contents" * 200 # about 5KB
1093 LARGE_uploadable = MutableData(LARGE)
1094 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1095 def _created(large_fn):
1096 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1097 return self.make_servermap(MODE_WRITE, large_fn2)
1098 d.addCallback(_created)
1099 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1103 def test_mark_bad(self):
1104 d = defer.succeed(None)
1105 ms = self.make_servermap
1107 d.addCallback(lambda res: ms(mode=MODE_READ))
1108 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1110 v = sm.best_recoverable_version()
1111 vm = sm.make_versionmap()
1112 shares = list(vm[v])
1113 self.failUnlessEqual(len(shares), 6)
1114 self._corrupted = set()
1115 # mark the first 5 shares as corrupt, then update the servermap.
1116 # The map should not have the marked shares it in any more, and
1117 # new shares should be found to replace the missing ones.
1118 for (shnum, server, timestamp) in shares:
1120 self._corrupted.add( (server, shnum) )
1121 sm.mark_bad_share(server, shnum, "")
1122 return self.update_servermap(sm, MODE_WRITE)
1123 d.addCallback(_made_map)
1125 # this should find all 5 shares that weren't marked bad
1126 v = sm.best_recoverable_version()
1127 vm = sm.make_versionmap()
1128 shares = list(vm[v])
1129 for (server, shnum) in self._corrupted:
1130 server_shares = sm.debug_shares_on_server(server)
1131 self.failIf(shnum in server_shares,
1132 "%d was in %s" % (shnum, server_shares))
1133 self.failUnlessEqual(len(shares), 5)
1134 d.addCallback(_check_map)
1137 def failUnlessNoneRecoverable(self, sm):
1138 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1139 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1140 best = sm.best_recoverable_version()
1141 self.failUnlessEqual(best, None)
1142 self.failUnlessEqual(len(sm.shares_available()), 0)
1144 def test_no_shares(self):
1145 self._storage._peers = {} # delete all shares
1146 ms = self.make_servermap
1147 d = defer.succeed(None)
1149 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1150 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1152 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1153 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1155 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1156 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1158 d.addCallback(lambda res: ms(mode=MODE_READ))
1159 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1163 def failUnlessNotQuiteEnough(self, sm):
1164 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1165 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1166 best = sm.best_recoverable_version()
1167 self.failUnlessEqual(best, None)
1168 self.failUnlessEqual(len(sm.shares_available()), 1)
1169 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1172 def test_not_quite_enough_shares(self):
1174 ms = self.make_servermap
1175 num_shares = len(s._peers)
1176 for peerid in s._peers:
1177 s._peers[peerid] = {}
1181 # now there ought to be only two shares left
1182 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1184 d = defer.succeed(None)
1186 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1187 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1188 d.addCallback(lambda sm:
1189 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1190 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1191 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1192 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1193 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1194 d.addCallback(lambda res: ms(mode=MODE_READ))
1195 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1200 def test_servermapupdater_finds_mdmf_files(self):
1201 # setUp already published an MDMF file for us. We just need to
1202 # make sure that when we run the ServermapUpdater, the file is
1203 # reported to have one recoverable version.
1204 d = defer.succeed(None)
1205 d.addCallback(lambda ignored:
1206 self.publish_mdmf())
1207 d.addCallback(lambda ignored:
1208 self.make_servermap(mode=MODE_CHECK))
1209 # Calling make_servermap also updates the servermap in the mode
1210 # that we specify, so we just need to see what it says.
1211 def _check_servermap(sm):
1212 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1213 d.addCallback(_check_servermap)
1217 def test_fetch_update(self):
1218 d = defer.succeed(None)
1219 d.addCallback(lambda ignored:
1220 self.publish_mdmf())
1221 d.addCallback(lambda ignored:
1222 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1223 def _check_servermap(sm):
1225 self.failUnlessEqual(len(sm.update_data), 10)
1227 for data in sm.update_data.itervalues():
1228 self.failUnlessEqual(len(data), 1)
1229 d.addCallback(_check_servermap)
1233 def test_servermapupdater_finds_sdmf_files(self):
1234 d = defer.succeed(None)
1235 d.addCallback(lambda ignored:
1236 self.publish_sdmf())
1237 d.addCallback(lambda ignored:
1238 self.make_servermap(mode=MODE_CHECK))
1239 d.addCallback(lambda servermap:
1240 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1244 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1246 return self.publish_one()
1248 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1250 oldmap = ServerMap()
1252 sb = self._storage_broker
1253 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1257 def abbrev_verinfo(self, verinfo):
1260 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1261 offsets_tuple) = verinfo
1262 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1264 def abbrev_verinfo_dict(self, verinfo_d):
1266 for verinfo,value in verinfo_d.items():
1267 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1268 offsets_tuple) = verinfo
1269 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1272 def dump_servermap(self, servermap):
1273 print "SERVERMAP", servermap
1274 print "RECOVERABLE", [self.abbrev_verinfo(v)
1275 for v in servermap.recoverable_versions()]
1276 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1277 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1279 def do_download(self, servermap, version=None):
1281 version = servermap.best_recoverable_version()
1282 r = Retrieve(self._fn, self._storage_broker, servermap, version)
1283 c = consumer.MemoryConsumer()
1284 d = r.download(consumer=c)
1285 d.addCallback(lambda mc: "".join(mc.chunks))
1289 def test_basic(self):
1290 d = self.make_servermap()
1291 def _do_retrieve(servermap):
1292 self._smap = servermap
1293 #self.dump_servermap(servermap)
1294 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1295 return self.do_download(servermap)
1296 d.addCallback(_do_retrieve)
1297 def _retrieved(new_contents):
1298 self.failUnlessEqual(new_contents, self.CONTENTS)
1299 d.addCallback(_retrieved)
1300 # we should be able to re-use the same servermap, both with and
1301 # without updating it.
1302 d.addCallback(lambda res: self.do_download(self._smap))
1303 d.addCallback(_retrieved)
1304 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1305 d.addCallback(lambda res: self.do_download(self._smap))
1306 d.addCallback(_retrieved)
1307 # clobbering the pubkey should make the servermap updater re-fetch it
1308 def _clobber_pubkey(res):
1309 self._fn._pubkey = None
1310 d.addCallback(_clobber_pubkey)
1311 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1312 d.addCallback(lambda res: self.do_download(self._smap))
1313 d.addCallback(_retrieved)
1316 def test_all_shares_vanished(self):
1317 d = self.make_servermap()
1318 def _remove_shares(servermap):
1319 for shares in self._storage._peers.values():
1321 d1 = self.shouldFail(NotEnoughSharesError,
1322 "test_all_shares_vanished",
1323 "ran out of servers",
1324 self.do_download, servermap)
1326 d.addCallback(_remove_shares)
1329 def test_no_servers(self):
1330 sb2 = make_storagebroker(num_peers=0)
1331 # if there are no servers, then a MODE_READ servermap should come
1333 d = self.make_servermap(sb=sb2)
1334 def _check_servermap(servermap):
1335 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1336 self.failIf(servermap.recoverable_versions())
1337 self.failIf(servermap.unrecoverable_versions())
1338 self.failIf(servermap.all_servers())
1339 d.addCallback(_check_servermap)
1342 def test_no_servers_download(self):
1343 sb2 = make_storagebroker(num_peers=0)
1344 self._fn._storage_broker = sb2
1345 d = self.shouldFail(UnrecoverableFileError,
1346 "test_no_servers_download",
1347 "no recoverable versions",
1348 self._fn.download_best_version)
1350 # a failed download that occurs while we aren't connected to
1351 # anybody should not prevent a subsequent download from working.
1352 # This isn't quite the webapi-driven test that #463 wants, but it
1353 # should be close enough.
1354 self._fn._storage_broker = self._storage_broker
1355 return self._fn.download_best_version()
1356 def _retrieved(new_contents):
1357 self.failUnlessEqual(new_contents, self.CONTENTS)
1358 d.addCallback(_restore)
1359 d.addCallback(_retrieved)
1363 def _test_corrupt_all(self, offset, substring,
1364 should_succeed=False,
1366 failure_checker=None,
1367 fetch_privkey=False):
1368 d = defer.succeed(None)
1370 d.addCallback(corrupt, self._storage, offset)
1371 d.addCallback(lambda res: self.make_servermap())
1372 if not corrupt_early:
1373 d.addCallback(corrupt, self._storage, offset)
1374 def _do_retrieve(servermap):
1375 ver = servermap.best_recoverable_version()
1376 if ver is None and not should_succeed:
1377 # no recoverable versions == not succeeding. The problem
1378 # should be noted in the servermap's list of problems.
1380 allproblems = [str(f) for f in servermap.get_problems()]
1381 self.failUnlessIn(substring, "".join(allproblems))
1384 d1 = self._fn.download_version(servermap, ver,
1386 d1.addCallback(lambda new_contents:
1387 self.failUnlessEqual(new_contents, self.CONTENTS))
1389 d1 = self.shouldFail(NotEnoughSharesError,
1390 "_corrupt_all(offset=%s)" % (offset,),
1392 self._fn.download_version, servermap,
1396 d1.addCallback(failure_checker)
1397 d1.addCallback(lambda res: servermap)
1399 d.addCallback(_do_retrieve)
1402 def test_corrupt_all_verbyte(self):
1403 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1404 # error in unpack_share().
1405 d = self._test_corrupt_all(0, "UnknownVersionError")
1406 def _check_servermap(servermap):
1407 # and the dump should mention the problems
1409 dump = servermap.dump(s).getvalue()
1410 self.failUnless("30 PROBLEMS" in dump, dump)
1411 d.addCallback(_check_servermap)
1414 def test_corrupt_all_seqnum(self):
1415 # a corrupt sequence number will trigger a bad signature
1416 return self._test_corrupt_all(1, "signature is invalid")
1418 def test_corrupt_all_R(self):
1419 # a corrupt root hash will trigger a bad signature
1420 return self._test_corrupt_all(9, "signature is invalid")
1422 def test_corrupt_all_IV(self):
1423 # a corrupt salt/IV will trigger a bad signature
1424 return self._test_corrupt_all(41, "signature is invalid")
1426 def test_corrupt_all_k(self):
1427 # a corrupt 'k' will trigger a bad signature
1428 return self._test_corrupt_all(57, "signature is invalid")
1430 def test_corrupt_all_N(self):
1431 # a corrupt 'N' will trigger a bad signature
1432 return self._test_corrupt_all(58, "signature is invalid")
1434 def test_corrupt_all_segsize(self):
1435 # a corrupt segsize will trigger a bad signature
1436 return self._test_corrupt_all(59, "signature is invalid")
1438 def test_corrupt_all_datalen(self):
1439 # a corrupt data length will trigger a bad signature
1440 return self._test_corrupt_all(67, "signature is invalid")
1442 def test_corrupt_all_pubkey(self):
1443 # a corrupt pubkey won't match the URI's fingerprint. We need to
1444 # remove the pubkey from the filenode, or else it won't bother trying
1446 self._fn._pubkey = None
1447 return self._test_corrupt_all("pubkey",
1448 "pubkey doesn't match fingerprint")
1450 def test_corrupt_all_sig(self):
1451 # a corrupt signature is a bad one
1452 # the signature runs from about [543:799], depending upon the length
1454 return self._test_corrupt_all("signature", "signature is invalid")
1456 def test_corrupt_all_share_hash_chain_number(self):
1457 # a corrupt share hash chain entry will show up as a bad hash. If we
1458 # mangle the first byte, that will look like a bad hash number,
1459 # causing an IndexError
1460 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1462 def test_corrupt_all_share_hash_chain_hash(self):
1463 # a corrupt share hash chain entry will show up as a bad hash. If we
1464 # mangle a few bytes in, that will look like a bad hash.
1465 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1467 def test_corrupt_all_block_hash_tree(self):
1468 return self._test_corrupt_all("block_hash_tree",
1469 "block hash tree failure")
1471 def test_corrupt_all_block(self):
1472 return self._test_corrupt_all("share_data", "block hash tree failure")
1474 def test_corrupt_all_encprivkey(self):
1475 # a corrupted privkey won't even be noticed by the reader, only by a
1477 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1480 def test_corrupt_all_encprivkey_late(self):
1481 # this should work for the same reason as above, but we corrupt
1482 # after the servermap update to exercise the error handling
1484 # We need to remove the privkey from the node, or the retrieve
1485 # process won't know to update it.
1486 self._fn._privkey = None
1487 return self._test_corrupt_all("enc_privkey",
1488 None, # this shouldn't fail
1489 should_succeed=True,
1490 corrupt_early=False,
1494 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1495 # just use a .todo because the failing-but-ignored test emits about 30kB
1497 def OFF_test_corrupt_all_seqnum_late(self):
1498 # corrupting the seqnum between mapupdate and retrieve should result
1499 # in NotEnoughSharesError, since each share will look invalid
1502 self.failUnless(f.check(NotEnoughSharesError))
1503 self.failUnless("uncoordinated write" in str(f))
1504 return self._test_corrupt_all(1, "ran out of servers",
1505 corrupt_early=False,
1506 failure_checker=_check)
1508 def test_corrupt_all_block_hash_tree_late(self):
1511 self.failUnless(f.check(NotEnoughSharesError))
1512 return self._test_corrupt_all("block_hash_tree",
1513 "block hash tree failure",
1514 corrupt_early=False,
1515 failure_checker=_check)
1518 def test_corrupt_all_block_late(self):
1521 self.failUnless(f.check(NotEnoughSharesError))
1522 return self._test_corrupt_all("share_data", "block hash tree failure",
1523 corrupt_early=False,
1524 failure_checker=_check)
1527 def test_basic_pubkey_at_end(self):
1528 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1529 # download to succeed but forcing a bunch of retries first. Note that
1530 # this is rather pessimistic: our Retrieve process will throw away
1531 # the whole share if the pubkey is bad, even though the rest of the
1532 # share might be good.
1534 self._fn._pubkey = None
1535 k = self._fn.get_required_shares()
1536 N = self._fn.get_total_shares()
1537 d = defer.succeed(None)
1538 d.addCallback(corrupt, self._storage, "pubkey",
1539 shnums_to_corrupt=range(0, N-k))
1540 d.addCallback(lambda res: self.make_servermap())
1541 def _do_retrieve(servermap):
1542 self.failUnless(servermap.get_problems())
1543 self.failUnless("pubkey doesn't match fingerprint"
1544 in str(servermap.get_problems()[0]))
1545 ver = servermap.best_recoverable_version()
1546 r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1547 c = consumer.MemoryConsumer()
1548 return r.download(c)
1549 d.addCallback(_do_retrieve)
1550 d.addCallback(lambda mc: "".join(mc.chunks))
1551 d.addCallback(lambda new_contents:
1552 self.failUnlessEqual(new_contents, self.CONTENTS))
1556 def _test_corrupt_some(self, offset, mdmf=False):
1558 d = self.publish_mdmf()
1560 d = defer.succeed(None)
1561 d.addCallback(lambda ignored:
1562 corrupt(None, self._storage, offset, range(5)))
1563 d.addCallback(lambda ignored:
1564 self.make_servermap())
1565 def _do_retrieve(servermap):
1566 ver = servermap.best_recoverable_version()
1567 self.failUnless(ver)
1568 return self._fn.download_best_version()
1569 d.addCallback(_do_retrieve)
1570 d.addCallback(lambda new_contents:
1571 self.failUnlessEqual(new_contents, self.CONTENTS))
1575 def test_corrupt_some(self):
1576 # corrupt the data of first five shares (so the servermap thinks
1577 # they're good but retrieve marks them as bad), so that the
1578 # MODE_READ set of 6 will be insufficient, forcing node.download to
1579 # retry with more servers.
1580 return self._test_corrupt_some("share_data")
1583 def test_download_fails(self):
1584 d = corrupt(None, self._storage, "signature")
1585 d.addCallback(lambda ignored:
1586 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1587 "no recoverable versions",
1588 self._fn.download_best_version))
1593 def test_corrupt_mdmf_block_hash_tree(self):
1594 d = self.publish_mdmf()
1595 d.addCallback(lambda ignored:
1596 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1597 "block hash tree failure",
1598 corrupt_early=False,
1599 should_succeed=False))
1603 def test_corrupt_mdmf_block_hash_tree_late(self):
1604 d = self.publish_mdmf()
1605 d.addCallback(lambda ignored:
1606 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1607 "block hash tree failure",
1609 should_succeed=False))
1613 def test_corrupt_mdmf_share_data(self):
1614 d = self.publish_mdmf()
1615 d.addCallback(lambda ignored:
1616 # TODO: Find out what the block size is and corrupt a
1617 # specific block, rather than just guessing.
1618 self._test_corrupt_all(("share_data", 12 * 40),
1619 "block hash tree failure",
1621 should_succeed=False))
1625 def test_corrupt_some_mdmf(self):
1626 return self._test_corrupt_some(("share_data", 12 * 40),
1631 def check_good(self, r, where):
1632 self.failUnless(r.is_healthy(), where)
1635 def check_bad(self, r, where):
1636 self.failIf(r.is_healthy(), where)
1639 def check_expected_failure(self, r, expected_exception, substring, where):
1640 for (peerid, storage_index, shnum, f) in r.problems:
1641 if f.check(expected_exception):
1642 self.failUnless(substring in str(f),
1643 "%s: substring '%s' not in '%s'" %
1644 (where, substring, str(f)))
1646 self.fail("%s: didn't see expected exception %s in problems %s" %
1647 (where, expected_exception, r.problems))
1650 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1652 return self.publish_one()
1655 def test_check_good(self):
1656 d = self._fn.check(Monitor())
1657 d.addCallback(self.check_good, "test_check_good")
1660 def test_check_mdmf_good(self):
1661 d = self.publish_mdmf()
1662 d.addCallback(lambda ignored:
1663 self._fn.check(Monitor()))
1664 d.addCallback(self.check_good, "test_check_mdmf_good")
1667 def test_check_no_shares(self):
1668 for shares in self._storage._peers.values():
1670 d = self._fn.check(Monitor())
1671 d.addCallback(self.check_bad, "test_check_no_shares")
1674 def test_check_mdmf_no_shares(self):
1675 d = self.publish_mdmf()
1677 for share in self._storage._peers.values():
1679 d.addCallback(_then)
1680 d.addCallback(lambda ignored:
1681 self._fn.check(Monitor()))
1682 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1685 def test_check_not_enough_shares(self):
1686 for shares in self._storage._peers.values():
1687 for shnum in shares.keys():
1690 d = self._fn.check(Monitor())
1691 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1694 def test_check_mdmf_not_enough_shares(self):
1695 d = self.publish_mdmf()
1697 for shares in self._storage._peers.values():
1698 for shnum in shares.keys():
1701 d.addCallback(_then)
1702 d.addCallback(lambda ignored:
1703 self._fn.check(Monitor()))
1704 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1708 def test_check_all_bad_sig(self):
1709 d = corrupt(None, self._storage, 1) # bad sig
1710 d.addCallback(lambda ignored:
1711 self._fn.check(Monitor()))
1712 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1715 def test_check_mdmf_all_bad_sig(self):
1716 d = self.publish_mdmf()
1717 d.addCallback(lambda ignored:
1718 corrupt(None, self._storage, 1))
1719 d.addCallback(lambda ignored:
1720 self._fn.check(Monitor()))
1721 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1724 def test_verify_mdmf_all_bad_sharedata(self):
1725 d = self.publish_mdmf()
1726 # On 8 of the shares, corrupt the beginning of the share data.
1727 # The signature check during the servermap update won't catch this.
1728 d.addCallback(lambda ignored:
1729 corrupt(None, self._storage, "share_data", range(8)))
1730 # On 2 of the shares, corrupt the end of the share data.
1731 # The signature check during the servermap update won't catch
1732 # this either, and the retrieval process will have to process
1733 # all of the segments before it notices.
1734 d.addCallback(lambda ignored:
1735 # the block hash tree comes right after the share data, so if we
1736 # corrupt a little before the block hash tree, we'll corrupt in the
1737 # last block of each share.
1738 corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1739 d.addCallback(lambda ignored:
1740 self._fn.check(Monitor(), verify=True))
1741 # The verifier should flag the file as unhealthy, and should
1742 # list all 10 shares as bad.
1743 d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1744 def _check_num_bad(r):
1745 self.failIf(r.is_recoverable())
1746 smap = r.get_servermap()
1747 self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1748 d.addCallback(_check_num_bad)
1751 def test_check_all_bad_blocks(self):
1752 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1753 # the Checker won't notice this.. it doesn't look at actual data
1754 d.addCallback(lambda ignored:
1755 self._fn.check(Monitor()))
1756 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1760 def test_check_mdmf_all_bad_blocks(self):
1761 d = self.publish_mdmf()
1762 d.addCallback(lambda ignored:
1763 corrupt(None, self._storage, "share_data"))
1764 d.addCallback(lambda ignored:
1765 self._fn.check(Monitor()))
1766 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1769 def test_verify_good(self):
1770 d = self._fn.check(Monitor(), verify=True)
1771 d.addCallback(self.check_good, "test_verify_good")
1774 def test_verify_all_bad_sig(self):
1775 d = corrupt(None, self._storage, 1) # bad sig
1776 d.addCallback(lambda ignored:
1777 self._fn.check(Monitor(), verify=True))
1778 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1781 def test_verify_one_bad_sig(self):
1782 d = corrupt(None, self._storage, 1, [9]) # bad sig
1783 d.addCallback(lambda ignored:
1784 self._fn.check(Monitor(), verify=True))
1785 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1788 def test_verify_one_bad_block(self):
1789 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1790 # the Verifier *will* notice this, since it examines every byte
1791 d.addCallback(lambda ignored:
1792 self._fn.check(Monitor(), verify=True))
1793 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1794 d.addCallback(self.check_expected_failure,
1795 CorruptShareError, "block hash tree failure",
1796 "test_verify_one_bad_block")
1799 def test_verify_one_bad_sharehash(self):
1800 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1801 d.addCallback(lambda ignored:
1802 self._fn.check(Monitor(), verify=True))
1803 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1804 d.addCallback(self.check_expected_failure,
1805 CorruptShareError, "corrupt hashes",
1806 "test_verify_one_bad_sharehash")
1809 def test_verify_one_bad_encprivkey(self):
1810 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1811 d.addCallback(lambda ignored:
1812 self._fn.check(Monitor(), verify=True))
1813 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1814 d.addCallback(self.check_expected_failure,
1815 CorruptShareError, "invalid privkey",
1816 "test_verify_one_bad_encprivkey")
1819 def test_verify_one_bad_encprivkey_uncheckable(self):
1820 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1821 readonly_fn = self._fn.get_readonly()
1822 # a read-only node has no way to validate the privkey
1823 d.addCallback(lambda ignored:
1824 readonly_fn.check(Monitor(), verify=True))
1825 d.addCallback(self.check_good,
1826 "test_verify_one_bad_encprivkey_uncheckable")
1830 def test_verify_mdmf_good(self):
1831 d = self.publish_mdmf()
1832 d.addCallback(lambda ignored:
1833 self._fn.check(Monitor(), verify=True))
1834 d.addCallback(self.check_good, "test_verify_mdmf_good")
1838 def test_verify_mdmf_one_bad_block(self):
1839 d = self.publish_mdmf()
1840 d.addCallback(lambda ignored:
1841 corrupt(None, self._storage, "share_data", [1]))
1842 d.addCallback(lambda ignored:
1843 self._fn.check(Monitor(), verify=True))
1844 # We should find one bad block here
1845 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1846 d.addCallback(self.check_expected_failure,
1847 CorruptShareError, "block hash tree failure",
1848 "test_verify_mdmf_one_bad_block")
1852 def test_verify_mdmf_bad_encprivkey(self):
1853 d = self.publish_mdmf()
1854 d.addCallback(lambda ignored:
1855 corrupt(None, self._storage, "enc_privkey", [0]))
1856 d.addCallback(lambda ignored:
1857 self._fn.check(Monitor(), verify=True))
1858 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1859 d.addCallback(self.check_expected_failure,
1860 CorruptShareError, "privkey",
1861 "test_verify_mdmf_bad_encprivkey")
1865 def test_verify_mdmf_bad_sig(self):
1866 d = self.publish_mdmf()
1867 d.addCallback(lambda ignored:
1868 corrupt(None, self._storage, 1, [1]))
1869 d.addCallback(lambda ignored:
1870 self._fn.check(Monitor(), verify=True))
1871 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1875 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1876 d = self.publish_mdmf()
1877 d.addCallback(lambda ignored:
1878 corrupt(None, self._storage, "enc_privkey", [1]))
1879 d.addCallback(lambda ignored:
1880 self._fn.get_readonly())
1881 d.addCallback(lambda fn:
1882 fn.check(Monitor(), verify=True))
1883 d.addCallback(self.check_good,
1884 "test_verify_mdmf_bad_encprivkey_uncheckable")
1888 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1890 def get_shares(self, s):
1891 all_shares = {} # maps (peerid, shnum) to share data
1892 for peerid in s._peers:
1893 shares = s._peers[peerid]
1894 for shnum in shares:
1895 data = shares[shnum]
1896 all_shares[ (peerid, shnum) ] = data
1899 def copy_shares(self, ignored=None):
1900 self.old_shares.append(self.get_shares(self._storage))
1902 def test_repair_nop(self):
1903 self.old_shares = []
1904 d = self.publish_one()
1905 d.addCallback(self.copy_shares)
1906 d.addCallback(lambda res: self._fn.check(Monitor()))
1907 d.addCallback(lambda check_results: self._fn.repair(check_results))
1908 def _check_results(rres):
1909 self.failUnless(IRepairResults.providedBy(rres))
1910 self.failUnless(rres.get_successful())
1911 # TODO: examine results
1915 initial_shares = self.old_shares[0]
1916 new_shares = self.old_shares[1]
1917 # TODO: this really shouldn't change anything. When we implement
1918 # a "minimal-bandwidth" repairer", change this test to assert:
1919 #self.failUnlessEqual(new_shares, initial_shares)
1921 # all shares should be in the same place as before
1922 self.failUnlessEqual(set(initial_shares.keys()),
1923 set(new_shares.keys()))
1924 # but they should all be at a newer seqnum. The IV will be
1925 # different, so the roothash will be too.
1926 for key in initial_shares:
1931 k0, N0, segsize0, datalen0,
1932 o0) = unpack_header(initial_shares[key])
1937 k1, N1, segsize1, datalen1,
1938 o1) = unpack_header(new_shares[key])
1939 self.failUnlessEqual(version0, version1)
1940 self.failUnlessEqual(seqnum0+1, seqnum1)
1941 self.failUnlessEqual(k0, k1)
1942 self.failUnlessEqual(N0, N1)
1943 self.failUnlessEqual(segsize0, segsize1)
1944 self.failUnlessEqual(datalen0, datalen1)
1945 d.addCallback(_check_results)
1948 def failIfSharesChanged(self, ignored=None):
1949 old_shares = self.old_shares[-2]
1950 current_shares = self.old_shares[-1]
1951 self.failUnlessEqual(old_shares, current_shares)
1954 def test_unrepairable_0shares(self):
1955 d = self.publish_one()
1956 def _delete_all_shares(ign):
1957 shares = self._storage._peers
1958 for peerid in shares:
1960 d.addCallback(_delete_all_shares)
1961 d.addCallback(lambda ign: self._fn.check(Monitor()))
1962 d.addCallback(lambda check_results: self._fn.repair(check_results))
1964 self.failUnlessEqual(crr.get_successful(), False)
1965 d.addCallback(_check)
1968 def test_mdmf_unrepairable_0shares(self):
1969 d = self.publish_mdmf()
1970 def _delete_all_shares(ign):
1971 shares = self._storage._peers
1972 for peerid in shares:
1974 d.addCallback(_delete_all_shares)
1975 d.addCallback(lambda ign: self._fn.check(Monitor()))
1976 d.addCallback(lambda check_results: self._fn.repair(check_results))
1977 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
1981 def test_unrepairable_1share(self):
1982 d = self.publish_one()
1983 def _delete_all_shares(ign):
1984 shares = self._storage._peers
1985 for peerid in shares:
1986 for shnum in list(shares[peerid]):
1988 del shares[peerid][shnum]
1989 d.addCallback(_delete_all_shares)
1990 d.addCallback(lambda ign: self._fn.check(Monitor()))
1991 d.addCallback(lambda check_results: self._fn.repair(check_results))
1993 self.failUnlessEqual(crr.get_successful(), False)
1994 d.addCallback(_check)
1997 def test_mdmf_unrepairable_1share(self):
1998 d = self.publish_mdmf()
1999 def _delete_all_shares(ign):
2000 shares = self._storage._peers
2001 for peerid in shares:
2002 for shnum in list(shares[peerid]):
2004 del shares[peerid][shnum]
2005 d.addCallback(_delete_all_shares)
2006 d.addCallback(lambda ign: self._fn.check(Monitor()))
2007 d.addCallback(lambda check_results: self._fn.repair(check_results))
2009 self.failUnlessEqual(crr.get_successful(), False)
2010 d.addCallback(_check)
2013 def test_repairable_5shares(self):
2014 d = self.publish_mdmf()
2015 def _delete_all_shares(ign):
2016 shares = self._storage._peers
2017 for peerid in shares:
2018 for shnum in list(shares[peerid]):
2020 del shares[peerid][shnum]
2021 d.addCallback(_delete_all_shares)
2022 d.addCallback(lambda ign: self._fn.check(Monitor()))
2023 d.addCallback(lambda check_results: self._fn.repair(check_results))
2025 self.failUnlessEqual(crr.get_successful(), True)
2026 d.addCallback(_check)
2029 def test_mdmf_repairable_5shares(self):
2030 d = self.publish_mdmf()
2031 def _delete_some_shares(ign):
2032 shares = self._storage._peers
2033 for peerid in shares:
2034 for shnum in list(shares[peerid]):
2036 del shares[peerid][shnum]
2037 d.addCallback(_delete_some_shares)
2038 d.addCallback(lambda ign: self._fn.check(Monitor()))
2040 self.failIf(cr.is_healthy())
2041 self.failUnless(cr.is_recoverable())
2043 d.addCallback(_check)
2044 d.addCallback(lambda check_results: self._fn.repair(check_results))
2046 self.failUnlessEqual(crr.get_successful(), True)
2047 d.addCallback(_check1)
2051 def test_merge(self):
2052 self.old_shares = []
2053 d = self.publish_multiple()
2054 # repair will refuse to merge multiple highest seqnums unless you
2056 d.addCallback(lambda res:
2057 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2058 1:4,3:4,5:4,7:4,9:4}))
2059 d.addCallback(self.copy_shares)
2060 d.addCallback(lambda res: self._fn.check(Monitor()))
2061 def _try_repair(check_results):
2062 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2063 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2064 self._fn.repair, check_results)
2065 d2.addCallback(self.copy_shares)
2066 d2.addCallback(self.failIfSharesChanged)
2067 d2.addCallback(lambda res: check_results)
2069 d.addCallback(_try_repair)
2070 d.addCallback(lambda check_results:
2071 self._fn.repair(check_results, force=True))
2072 # this should give us 10 shares of the highest roothash
2073 def _check_repair_results(rres):
2074 self.failUnless(rres.get_successful())
2076 d.addCallback(_check_repair_results)
2077 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2078 def _check_smap(smap):
2079 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2080 self.failIf(smap.unrecoverable_versions())
2081 # now, which should have won?
2082 roothash_s4a = self.get_roothash_for(3)
2083 roothash_s4b = self.get_roothash_for(4)
2084 if roothash_s4b > roothash_s4a:
2085 expected_contents = self.CONTENTS[4]
2087 expected_contents = self.CONTENTS[3]
2088 new_versionid = smap.best_recoverable_version()
2089 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2090 d2 = self._fn.download_version(smap, new_versionid)
2091 d2.addCallback(self.failUnlessEqual, expected_contents)
2093 d.addCallback(_check_smap)
2096 def test_non_merge(self):
2097 self.old_shares = []
2098 d = self.publish_multiple()
2099 # repair should not refuse a repair that doesn't need to merge. In
2100 # this case, we combine v2 with v3. The repair should ignore v2 and
2101 # copy v3 into a new v5.
2102 d.addCallback(lambda res:
2103 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2104 1:3,3:3,5:3,7:3,9:3}))
2105 d.addCallback(lambda res: self._fn.check(Monitor()))
2106 d.addCallback(lambda check_results: self._fn.repair(check_results))
2107 # this should give us 10 shares of v3
2108 def _check_repair_results(rres):
2109 self.failUnless(rres.get_successful())
2111 d.addCallback(_check_repair_results)
2112 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2113 def _check_smap(smap):
2114 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2115 self.failIf(smap.unrecoverable_versions())
2116 # now, which should have won?
2117 expected_contents = self.CONTENTS[3]
2118 new_versionid = smap.best_recoverable_version()
2119 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2120 d2 = self._fn.download_version(smap, new_versionid)
2121 d2.addCallback(self.failUnlessEqual, expected_contents)
2123 d.addCallback(_check_smap)
2126 def get_roothash_for(self, index):
2127 # return the roothash for the first share we see in the saved set
2128 shares = self._copied_shares[index]
2129 for peerid in shares:
2130 for shnum in shares[peerid]:
2131 share = shares[peerid][shnum]
2132 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2133 unpack_header(share)
2136 def test_check_and_repair_readcap(self):
2137 # we can't currently repair from a mutable readcap: #625
2138 self.old_shares = []
2139 d = self.publish_one()
2140 d.addCallback(self.copy_shares)
2141 def _get_readcap(res):
2142 self._fn3 = self._fn.get_readonly()
2143 # also delete some shares
2144 for peerid,shares in self._storage._peers.items():
2146 d.addCallback(_get_readcap)
2147 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2148 def _check_results(crr):
2149 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2150 # we should detect the unhealthy, but skip over mutable-readcap
2151 # repairs until #625 is fixed
2152 self.failIf(crr.get_pre_repair_results().is_healthy())
2153 self.failIf(crr.get_repair_attempted())
2154 self.failIf(crr.get_post_repair_results().is_healthy())
2155 d.addCallback(_check_results)
2158 class DevNullDictionary(dict):
2159 def __setitem__(self, key, value):
2162 class MultipleEncodings(unittest.TestCase):
2164 self.CONTENTS = "New contents go here"
2165 self.uploadable = MutableData(self.CONTENTS)
2166 self._storage = FakeStorage()
2167 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2168 self._storage_broker = self._nodemaker.storage_broker
2169 d = self._nodemaker.create_mutable_file(self.uploadable)
2172 d.addCallback(_created)
2175 def _encode(self, k, n, data, version=SDMF_VERSION):
2176 # encode 'data' into a peerid->shares dict.
2179 # disable the nodecache, since for these tests we explicitly need
2180 # multiple nodes pointing at the same file
2181 self._nodemaker._node_cache = DevNullDictionary()
2182 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2183 # then we copy over other fields that are normally fetched from the
2185 fn2._pubkey = fn._pubkey
2186 fn2._privkey = fn._privkey
2187 fn2._encprivkey = fn._encprivkey
2188 # and set the encoding parameters to something completely different
2189 fn2._required_shares = k
2190 fn2._total_shares = n
2193 s._peers = {} # clear existing storage
2194 p2 = Publish(fn2, self._storage_broker, None)
2195 uploadable = MutableData(data)
2196 d = p2.publish(uploadable)
2197 def _published(res):
2201 d.addCallback(_published)
2204 def make_servermap(self, mode=MODE_READ, oldmap=None):
2206 oldmap = ServerMap()
2207 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2212 def test_multiple_encodings(self):
2213 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2214 # then mix up the shares, to make sure that download survives seeing
2215 # a variety of encodings. This is actually kind of tricky to set up.
2217 contents1 = "Contents for encoding 1 (3-of-10) go here"
2218 contents2 = "Contents for encoding 2 (4-of-9) go here"
2219 contents3 = "Contents for encoding 3 (4-of-7) go here"
2221 # we make a retrieval object that doesn't know what encoding
2223 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2225 # now we upload a file through fn1, and grab its shares
2226 d = self._encode(3, 10, contents1)
2227 def _encoded_1(shares):
2228 self._shares1 = shares
2229 d.addCallback(_encoded_1)
2230 d.addCallback(lambda res: self._encode(4, 9, contents2))
2231 def _encoded_2(shares):
2232 self._shares2 = shares
2233 d.addCallback(_encoded_2)
2234 d.addCallback(lambda res: self._encode(4, 7, contents3))
2235 def _encoded_3(shares):
2236 self._shares3 = shares
2237 d.addCallback(_encoded_3)
2240 log.msg("merging sharelists")
2241 # we merge the shares from the two sets, leaving each shnum in
2242 # its original location, but using a share from set1 or set2
2243 # according to the following sequence:
2254 # so that neither form can be recovered until fetch [f], at which
2255 # point version-s1 (the 3-of-10 form) should be recoverable. If
2256 # the implementation latches on to the first version it sees,
2257 # then s2 will be recoverable at fetch [g].
2259 # Later, when we implement code that handles multiple versions,
2260 # we can use this framework to assert that all recoverable
2261 # versions are retrieved, and test that 'epsilon' does its job
2263 places = [2, 2, 3, 2, 1, 1, 1, 2]
2266 sb = self._storage_broker
2268 for peerid in sorted(sb.get_all_serverids()):
2269 for shnum in self._shares1.get(peerid, {}):
2270 if shnum < len(places):
2271 which = places[shnum]
2274 self._storage._peers[peerid] = peers = {}
2275 in_1 = shnum in self._shares1[peerid]
2276 in_2 = shnum in self._shares2.get(peerid, {})
2277 in_3 = shnum in self._shares3.get(peerid, {})
2280 peers[shnum] = self._shares1[peerid][shnum]
2281 sharemap[shnum] = peerid
2284 peers[shnum] = self._shares2[peerid][shnum]
2285 sharemap[shnum] = peerid
2288 peers[shnum] = self._shares3[peerid][shnum]
2289 sharemap[shnum] = peerid
2291 # we don't bother placing any other shares
2292 # now sort the sequence so that share 0 is returned first
2293 new_sequence = [sharemap[shnum]
2294 for shnum in sorted(sharemap.keys())]
2295 self._storage._sequence = new_sequence
2296 log.msg("merge done")
2297 d.addCallback(_merge)
2298 d.addCallback(lambda res: fn3.download_best_version())
2299 def _retrieved(new_contents):
2300 # the current specified behavior is "first version recoverable"
2301 self.failUnlessEqual(new_contents, contents1)
2302 d.addCallback(_retrieved)
2306 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2309 return self.publish_multiple()
2311 def test_multiple_versions(self):
2312 # if we see a mix of versions in the grid, download_best_version
2313 # should get the latest one
2314 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2315 d = self._fn.download_best_version()
2316 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2317 # and the checker should report problems
2318 d.addCallback(lambda res: self._fn.check(Monitor()))
2319 d.addCallback(self.check_bad, "test_multiple_versions")
2321 # but if everything is at version 2, that's what we should download
2322 d.addCallback(lambda res:
2323 self._set_versions(dict([(i,2) for i in range(10)])))
2324 d.addCallback(lambda res: self._fn.download_best_version())
2325 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2326 # if exactly one share is at version 3, we should still get v2
2327 d.addCallback(lambda res:
2328 self._set_versions({0:3}))
2329 d.addCallback(lambda res: self._fn.download_best_version())
2330 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2331 # but the servermap should see the unrecoverable version. This
2332 # depends upon the single newer share being queried early.
2333 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2334 def _check_smap(smap):
2335 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2336 newer = smap.unrecoverable_newer_versions()
2337 self.failUnlessEqual(len(newer), 1)
2338 verinfo, health = newer.items()[0]
2339 self.failUnlessEqual(verinfo[0], 4)
2340 self.failUnlessEqual(health, (1,3))
2341 self.failIf(smap.needs_merge())
2342 d.addCallback(_check_smap)
2343 # if we have a mix of two parallel versions (s4a and s4b), we could
2345 d.addCallback(lambda res:
2346 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2347 1:4,3:4,5:4,7:4,9:4}))
2348 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2349 def _check_smap_mixed(smap):
2350 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2351 newer = smap.unrecoverable_newer_versions()
2352 self.failUnlessEqual(len(newer), 0)
2353 self.failUnless(smap.needs_merge())
2354 d.addCallback(_check_smap_mixed)
2355 d.addCallback(lambda res: self._fn.download_best_version())
2356 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2357 res == self.CONTENTS[4]))
2360 def test_replace(self):
2361 # if we see a mix of versions in the grid, we should be able to
2362 # replace them all with a newer version
2364 # if exactly one share is at version 3, we should download (and
2365 # replace) v2, and the result should be v4. Note that the index we
2366 # give to _set_versions is different than the sequence number.
2367 target = dict([(i,2) for i in range(10)]) # seqnum3
2368 target[0] = 3 # seqnum4
2369 self._set_versions(target)
2371 def _modify(oldversion, servermap, first_time):
2372 return oldversion + " modified"
2373 d = self._fn.modify(_modify)
2374 d.addCallback(lambda res: self._fn.download_best_version())
2375 expected = self.CONTENTS[2] + " modified"
2376 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2377 # and the servermap should indicate that the outlier was replaced too
2378 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2379 def _check_smap(smap):
2380 self.failUnlessEqual(smap.highest_seqnum(), 5)
2381 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2382 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2383 d.addCallback(_check_smap)
2387 class Utils(unittest.TestCase):
2388 def test_cache(self):
2390 # xdata = base62.b2a(os.urandom(100))[:100]
2391 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2392 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2393 c.add("v1", 1, 0, xdata)
2394 c.add("v1", 1, 2000, ydata)
2395 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2396 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2397 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2398 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2399 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2400 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2401 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2402 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2403 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2404 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2405 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2406 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2407 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2408 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2409 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2410 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2411 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2412 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2414 # test joining fragments
2416 c.add("v1", 1, 0, xdata[:10])
2417 c.add("v1", 1, 10, xdata[10:20])
2418 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2420 class Exceptions(unittest.TestCase):
2421 def test_repr(self):
2422 nmde = NeedMoreDataError(100, 50, 100)
2423 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2424 ucwe = UncoordinatedWriteError()
2425 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2427 class SameKeyGenerator:
2428 def __init__(self, pubkey, privkey):
2429 self.pubkey = pubkey
2430 self.privkey = privkey
2431 def generate(self, keysize=None):
2432 return defer.succeed( (self.pubkey, self.privkey) )
2434 class FirstServerGetsKilled:
2436 def notify(self, retval, wrapper, methname):
2438 wrapper.broken = True
2442 class FirstServerGetsDeleted:
2445 self.silenced = None
2446 def notify(self, retval, wrapper, methname):
2448 # this query will work, but later queries should think the share
2451 self.silenced = wrapper
2453 if wrapper == self.silenced:
2454 assert methname == "slot_testv_and_readv_and_writev"
2458 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2459 def do_publish_surprise(self, version):
2460 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2462 nm = self.g.clients[0].nodemaker
2463 d = nm.create_mutable_file(MutableData("contents 1"),
2466 d = defer.succeed(None)
2467 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2468 def _got_smap1(smap):
2469 # stash the old state of the file
2471 d.addCallback(_got_smap1)
2472 # then modify the file, leaving the old map untouched
2473 d.addCallback(lambda res: log.msg("starting winning write"))
2474 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2475 # now attempt to modify the file with the old servermap. This
2476 # will look just like an uncoordinated write, in which every
2477 # single share got updated between our mapupdate and our publish
2478 d.addCallback(lambda res: log.msg("starting doomed write"))
2479 d.addCallback(lambda res:
2480 self.shouldFail(UncoordinatedWriteError,
2481 "test_publish_surprise", None,
2483 MutableData("contents 2a"), self.old_map))
2485 d.addCallback(_created)
2488 def test_publish_surprise_sdmf(self):
2489 return self.do_publish_surprise(SDMF_VERSION)
2491 def test_publish_surprise_mdmf(self):
2492 return self.do_publish_surprise(MDMF_VERSION)
2494 def test_retrieve_surprise(self):
2495 self.basedir = "mutable/Problems/test_retrieve_surprise"
2497 nm = self.g.clients[0].nodemaker
2498 d = nm.create_mutable_file(MutableData("contents 1"))
2500 d = defer.succeed(None)
2501 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2502 def _got_smap1(smap):
2503 # stash the old state of the file
2505 d.addCallback(_got_smap1)
2506 # then modify the file, leaving the old map untouched
2507 d.addCallback(lambda res: log.msg("starting winning write"))
2508 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2509 # now attempt to retrieve the old version with the old servermap.
2510 # This will look like someone has changed the file since we
2511 # updated the servermap.
2512 d.addCallback(lambda res: n._cache._clear())
2513 d.addCallback(lambda res: log.msg("starting doomed read"))
2514 d.addCallback(lambda res:
2515 self.shouldFail(NotEnoughSharesError,
2516 "test_retrieve_surprise",
2517 "ran out of servers: have 0 of 1",
2520 self.old_map.best_recoverable_version(),
2523 d.addCallback(_created)
2527 def test_unexpected_shares(self):
2528 # upload the file, take a servermap, shut down one of the servers,
2529 # upload it again (causing shares to appear on a new server), then
2530 # upload using the old servermap. The last upload should fail with an
2531 # UncoordinatedWriteError, because of the shares that didn't appear
2533 self.basedir = "mutable/Problems/test_unexpected_shares"
2535 nm = self.g.clients[0].nodemaker
2536 d = nm.create_mutable_file(MutableData("contents 1"))
2538 d = defer.succeed(None)
2539 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2540 def _got_smap1(smap):
2541 # stash the old state of the file
2543 # now shut down one of the servers
2544 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2545 self.g.remove_server(peer0)
2546 # then modify the file, leaving the old map untouched
2547 log.msg("starting winning write")
2548 return n.overwrite(MutableData("contents 2"))
2549 d.addCallback(_got_smap1)
2550 # now attempt to modify the file with the old servermap. This
2551 # will look just like an uncoordinated write, in which every
2552 # single share got updated between our mapupdate and our publish
2553 d.addCallback(lambda res: log.msg("starting doomed write"))
2554 d.addCallback(lambda res:
2555 self.shouldFail(UncoordinatedWriteError,
2556 "test_surprise", None,
2558 MutableData("contents 2a"), self.old_map))
2560 d.addCallback(_created)
2563 def test_multiply_placed_shares(self):
2564 self.basedir = "mutable/Problems/test_multiply_placed_shares"
2566 nm = self.g.clients[0].nodemaker
2567 d = nm.create_mutable_file(MutableData("contents 1"))
2568 # remove one of the servers and reupload the file.
2572 servers = self.g.get_all_serverids()
2573 self.ss = self.g.remove_server(servers[len(servers)-1])
2575 new_server = self.g.make_server(len(servers)-1)
2576 self.g.add_server(len(servers)-1, new_server)
2578 return self._node.download_best_version()
2579 d.addCallback(_created)
2580 d.addCallback(lambda data: MutableData(data))
2581 d.addCallback(lambda data: self._node.overwrite(data))
2583 # restore the server we removed earlier, then download+upload
2585 def _overwritten(ign):
2586 self.g.add_server(len(self.g.servers_by_number), self.ss)
2587 return self._node.download_best_version()
2588 d.addCallback(_overwritten)
2589 d.addCallback(lambda data: MutableData(data))
2590 d.addCallback(lambda data: self._node.overwrite(data))
2591 d.addCallback(lambda ignored:
2592 self._node.get_servermap(MODE_CHECK))
2593 def _overwritten_again(smap):
2594 # Make sure that all shares were updated by making sure that
2595 # there aren't any other versions in the sharemap.
2596 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2597 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2598 d.addCallback(_overwritten_again)
2601 def test_bad_server(self):
2602 # Break one server, then create the file: the initial publish should
2603 # complete with an alternate server. Breaking a second server should
2604 # not prevent an update from succeeding either.
2605 self.basedir = "mutable/Problems/test_bad_server"
2607 nm = self.g.clients[0].nodemaker
2609 # to make sure that one of the initial peers is broken, we have to
2610 # get creative. We create an RSA key and compute its storage-index.
2611 # Then we make a KeyGenerator that always returns that one key, and
2612 # use it to create the mutable file. This will get easier when we can
2613 # use #467 static-server-selection to disable permutation and force
2614 # the choice of server for share[0].
2616 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2617 def _got_key( (pubkey, privkey) ):
2618 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2619 pubkey_s = pubkey.serialize()
2620 privkey_s = privkey.serialize()
2621 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2622 ssk_pubkey_fingerprint_hash(pubkey_s))
2623 self._storage_index = u.get_storage_index()
2624 d.addCallback(_got_key)
2625 def _break_peer0(res):
2626 si = self._storage_index
2627 servers = nm.storage_broker.get_servers_for_psi(si)
2628 self.g.break_server(servers[0].get_serverid())
2629 self.server1 = servers[1]
2630 d.addCallback(_break_peer0)
2631 # now "create" the file, using the pre-established key, and let the
2632 # initial publish finally happen
2633 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2634 # that ought to work
2636 d = n.download_best_version()
2637 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2638 # now break the second peer
2639 def _break_peer1(res):
2640 self.g.break_server(self.server1.get_serverid())
2641 d.addCallback(_break_peer1)
2642 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2643 # that ought to work too
2644 d.addCallback(lambda res: n.download_best_version())
2645 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2646 def _explain_error(f):
2648 if f.check(NotEnoughServersError):
2649 print "first_error:", f.value.first_error
2651 d.addErrback(_explain_error)
2653 d.addCallback(_got_node)
2656 def test_bad_server_overlap(self):
2657 # like test_bad_server, but with no extra unused servers to fall back
2658 # upon. This means that we must re-use a server which we've already
2659 # used. If we don't remember the fact that we sent them one share
2660 # already, we'll mistakenly think we're experiencing an
2661 # UncoordinatedWriteError.
2663 # Break one server, then create the file: the initial publish should
2664 # complete with an alternate server. Breaking a second server should
2665 # not prevent an update from succeeding either.
2666 self.basedir = "mutable/Problems/test_bad_server_overlap"
2668 nm = self.g.clients[0].nodemaker
2669 sb = nm.storage_broker
2671 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2672 self.g.break_server(peerids[0])
2674 d = nm.create_mutable_file(MutableData("contents 1"))
2676 d = n.download_best_version()
2677 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2678 # now break one of the remaining servers
2679 def _break_second_server(res):
2680 self.g.break_server(peerids[1])
2681 d.addCallback(_break_second_server)
2682 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2683 # that ought to work too
2684 d.addCallback(lambda res: n.download_best_version())
2685 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2687 d.addCallback(_created)
2690 def test_publish_all_servers_bad(self):
2691 # Break all servers: the publish should fail
2692 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2694 nm = self.g.clients[0].nodemaker
2695 for s in nm.storage_broker.get_connected_servers():
2696 s.get_rref().broken = True
2698 d = self.shouldFail(NotEnoughServersError,
2699 "test_publish_all_servers_bad",
2700 "ran out of good servers",
2701 nm.create_mutable_file, MutableData("contents"))
2704 def test_publish_no_servers(self):
2705 # no servers at all: the publish should fail
2706 self.basedir = "mutable/Problems/test_publish_no_servers"
2707 self.set_up_grid(num_servers=0)
2708 nm = self.g.clients[0].nodemaker
2710 d = self.shouldFail(NotEnoughServersError,
2711 "test_publish_no_servers",
2712 "Ran out of non-bad servers",
2713 nm.create_mutable_file, MutableData("contents"))
2717 def test_privkey_query_error(self):
2718 # when a servermap is updated with MODE_WRITE, it tries to get the
2719 # privkey. Something might go wrong during this query attempt.
2720 # Exercise the code in _privkey_query_failed which tries to handle
2722 self.basedir = "mutable/Problems/test_privkey_query_error"
2723 self.set_up_grid(num_servers=20)
2724 nm = self.g.clients[0].nodemaker
2725 nm._node_cache = DevNullDictionary() # disable the nodecache
2727 # we need some contents that are large enough to push the privkey out
2728 # of the early part of the file
2729 LARGE = "These are Larger contents" * 2000 # about 50KB
2730 LARGE_uploadable = MutableData(LARGE)
2731 d = nm.create_mutable_file(LARGE_uploadable)
2733 self.uri = n.get_uri()
2734 self.n2 = nm.create_from_cap(self.uri)
2736 # When a mapupdate is performed on a node that doesn't yet know
2737 # the privkey, a short read is sent to a batch of servers, to get
2738 # the verinfo and (hopefully, if the file is short enough) the
2739 # encprivkey. Our file is too large to let this first read
2740 # contain the encprivkey. Each non-encprivkey-bearing response
2741 # that arrives (until the node gets the encprivkey) will trigger
2742 # a second read to specifically read the encprivkey.
2744 # So, to exercise this case:
2745 # 1. notice which server gets a read() call first
2746 # 2. tell that server to start throwing errors
2747 killer = FirstServerGetsKilled()
2748 for s in nm.storage_broker.get_connected_servers():
2749 s.get_rref().post_call_notifier = killer.notify
2750 d.addCallback(_created)
2752 # now we update a servermap from a new node (which doesn't have the
2753 # privkey yet, forcing it to use a separate privkey query). Note that
2754 # the map-update will succeed, since we'll just get a copy from one
2755 # of the other shares.
2756 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2760 def test_privkey_query_missing(self):
2761 # like test_privkey_query_error, but the shares are deleted by the
2762 # second query, instead of raising an exception.
2763 self.basedir = "mutable/Problems/test_privkey_query_missing"
2764 self.set_up_grid(num_servers=20)
2765 nm = self.g.clients[0].nodemaker
2766 LARGE = "These are Larger contents" * 2000 # about 50KiB
2767 LARGE_uploadable = MutableData(LARGE)
2768 nm._node_cache = DevNullDictionary() # disable the nodecache
2770 d = nm.create_mutable_file(LARGE_uploadable)
2772 self.uri = n.get_uri()
2773 self.n2 = nm.create_from_cap(self.uri)
2774 deleter = FirstServerGetsDeleted()
2775 for s in nm.storage_broker.get_connected_servers():
2776 s.get_rref().post_call_notifier = deleter.notify
2777 d.addCallback(_created)
2778 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2782 def test_block_and_hash_query_error(self):
2783 # This tests for what happens when a query to a remote server
2784 # fails in either the hash validation step or the block getting
2785 # step (because of batching, this is the same actual query).
2786 # We need to have the storage server persist up until the point
2787 # that its prefix is validated, then suddenly die. This
2788 # exercises some exception handling code in Retrieve.
2789 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2790 self.set_up_grid(num_servers=20)
2791 nm = self.g.clients[0].nodemaker
2792 CONTENTS = "contents" * 2000
2793 CONTENTS_uploadable = MutableData(CONTENTS)
2794 d = nm.create_mutable_file(CONTENTS_uploadable)
2797 d.addCallback(_created)
2798 d.addCallback(lambda ignored:
2799 self._node.get_servermap(MODE_READ))
2800 def _then(servermap):
2801 # we have our servermap. Now we set up the servers like the
2802 # tests above -- the first one that gets a read call should
2803 # start throwing errors, but only after returning its prefix
2804 # for validation. Since we'll download without fetching the
2805 # private key, the next query to the remote server will be
2806 # for either a block and salt or for hashes, either of which
2807 # will exercise the error handling code.
2808 killer = FirstServerGetsKilled()
2809 for s in nm.storage_broker.get_connected_servers():
2810 s.get_rref().post_call_notifier = killer.notify
2811 ver = servermap.best_recoverable_version()
2813 return self._node.download_version(servermap, ver)
2814 d.addCallback(_then)
2815 d.addCallback(lambda data:
2816 self.failUnlessEqual(data, CONTENTS))
2819 def test_1654(self):
2820 # test that the Retrieve object unconditionally verifies the block
2821 # hash tree root for mutable shares. The failure mode is that
2822 # carefully crafted shares can cause undetected corruption (the
2823 # retrieve appears to finish successfully, but the result is
2824 # corrupted). When fixed, these shares always cause a
2825 # CorruptShareError, which results in NotEnoughSharesError in this
2827 self.basedir = "mutable/Problems/test_1654"
2828 self.set_up_grid(num_servers=2)
2829 cap = uri.from_string(TEST_1654_CAP)
2830 si = cap.get_storage_index()
2832 for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2833 sharedata = base64.b64decode(share)
2834 storedir = self.get_serverdir(shnum)
2835 storage_path = os.path.join(storedir, "shares",
2836 storage_index_to_dir(si))
2837 fileutil.make_dirs(storage_path)
2838 fileutil.write(os.path.join(storage_path, "%d" % shnum),
2841 nm = self.g.clients[0].nodemaker
2842 n = nm.create_from_cap(TEST_1654_CAP)
2843 # to exercise the problem correctly, we must ensure that sh0 is
2844 # processed first, and sh1 second. NoNetworkGrid has facilities to
2845 # stall the first request from a single server, but it's not
2846 # currently easy to extend that to stall the second request (mutable
2847 # retrievals will see two: first the mapupdate, then the fetch).
2848 # However, repeated executions of this run without the #1654 fix
2849 # suggests that we're failing reliably even without explicit stalls,
2850 # probably because the servers are queried in a fixed order. So I'm
2851 # ok with relying upon that.
2852 d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2853 "ran out of servers",
2854 n.download_best_version)
2858 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2860 TEST_1654_SH0 = """\
2861 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2862 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2863 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2864 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2865 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2866 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2867 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2868 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2869 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2870 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2871 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2872 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2873 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2874 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2875 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2876 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2877 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2878 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2879 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2880 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2881 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2882 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2883 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2884 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2885 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2886 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2887 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2888 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2889 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2890 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2891 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2892 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2893 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2894 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2895 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2896 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2897 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2898 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2899 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2900 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2901 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2902 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2903 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2904 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2905 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2906 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2907 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2908 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2909 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2910 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2911 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2912 72mXGlqyLyWYuAAAAAA="""
2914 TEST_1654_SH1 = """\
2915 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2916 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2917 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2918 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2919 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2920 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2921 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2922 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2923 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2924 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2925 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2926 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2927 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2928 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2929 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2930 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2931 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2932 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2933 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2934 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2935 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2936 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2937 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2938 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2939 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2940 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2941 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2942 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2943 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2944 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2945 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2946 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2947 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2948 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2949 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2950 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2951 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2952 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2953 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2954 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2955 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2956 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2957 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2958 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2959 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2960 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2961 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2962 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2963 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2964 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2965 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2966 72mXGlqyLyWYuAAAAAA="""
2969 class FileHandle(unittest.TestCase):
2971 self.test_data = "Test Data" * 50000
2972 self.sio = StringIO(self.test_data)
2973 self.uploadable = MutableFileHandle(self.sio)
2976 def test_filehandle_read(self):
2977 self.basedir = "mutable/FileHandle/test_filehandle_read"
2979 for i in xrange(0, len(self.test_data), chunk_size):
2980 data = self.uploadable.read(chunk_size)
2981 data = "".join(data)
2983 end = i + chunk_size
2984 self.failUnlessEqual(data, self.test_data[start:end])
2987 def test_filehandle_get_size(self):
2988 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2989 actual_size = len(self.test_data)
2990 size = self.uploadable.get_size()
2991 self.failUnlessEqual(size, actual_size)
2994 def test_filehandle_get_size_out_of_order(self):
2995 # We should be able to call get_size whenever we want without
2996 # disturbing the location of the seek pointer.
2998 data = self.uploadable.read(chunk_size)
2999 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3002 size = self.uploadable.get_size()
3003 self.failUnlessEqual(size, len(self.test_data))
3005 # Now get more data. We should be right where we left off.
3006 more_data = self.uploadable.read(chunk_size)
3008 end = chunk_size * 2
3009 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3012 def test_filehandle_file(self):
3013 # Make sure that the MutableFileHandle works on a file as well
3014 # as a StringIO object, since in some cases it will be asked to
3016 self.basedir = self.mktemp()
3017 # necessary? What am I doing wrong here?
3018 os.mkdir(self.basedir)
3019 f_path = os.path.join(self.basedir, "test_file")
3020 f = open(f_path, "w")
3021 f.write(self.test_data)
3023 f = open(f_path, "r")
3025 uploadable = MutableFileHandle(f)
3027 data = uploadable.read(len(self.test_data))
3028 self.failUnlessEqual("".join(data), self.test_data)
3029 size = uploadable.get_size()
3030 self.failUnlessEqual(size, len(self.test_data))
3033 def test_close(self):
3034 # Make sure that the MutableFileHandle closes its handle when
3036 self.uploadable.close()
3037 self.failUnless(self.sio.closed)
3040 class DataHandle(unittest.TestCase):
3042 self.test_data = "Test Data" * 50000
3043 self.uploadable = MutableData(self.test_data)
3046 def test_datahandle_read(self):
3048 for i in xrange(0, len(self.test_data), chunk_size):
3049 data = self.uploadable.read(chunk_size)
3050 data = "".join(data)
3052 end = i + chunk_size
3053 self.failUnlessEqual(data, self.test_data[start:end])
3056 def test_datahandle_get_size(self):
3057 actual_size = len(self.test_data)
3058 size = self.uploadable.get_size()
3059 self.failUnlessEqual(size, actual_size)
3062 def test_datahandle_get_size_out_of_order(self):
3063 # We should be able to call get_size whenever we want without
3064 # disturbing the location of the seek pointer.
3066 data = self.uploadable.read(chunk_size)
3067 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3070 size = self.uploadable.get_size()
3071 self.failUnlessEqual(size, len(self.test_data))
3073 # Now get more data. We should be right where we left off.
3074 more_data = self.uploadable.read(chunk_size)
3076 end = chunk_size * 2
3077 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3080 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3083 GridTestMixin.setUp(self)
3084 self.basedir = self.mktemp()
3086 self.c = self.g.clients[0]
3087 self.nm = self.c.nodemaker
3088 self.data = "test data" * 100000 # about 900 KiB; MDMF
3089 self.small_data = "test data" * 10 # about 90 B; SDMF
3092 def do_upload_mdmf(self):
3093 d = self.nm.create_mutable_file(MutableData(self.data),
3094 version=MDMF_VERSION)
3096 assert isinstance(n, MutableFileNode)
3097 assert n._protocol_version == MDMF_VERSION
3100 d.addCallback(_then)
3103 def do_upload_sdmf(self):
3104 d = self.nm.create_mutable_file(MutableData(self.small_data))
3106 assert isinstance(n, MutableFileNode)
3107 assert n._protocol_version == SDMF_VERSION
3110 d.addCallback(_then)
3113 def do_upload_empty_sdmf(self):
3114 d = self.nm.create_mutable_file(MutableData(""))
3116 assert isinstance(n, MutableFileNode)
3117 self.sdmf_zero_length_node = n
3118 assert n._protocol_version == SDMF_VERSION
3120 d.addCallback(_then)
3123 def do_upload(self):
3124 d = self.do_upload_mdmf()
3125 d.addCallback(lambda ign: self.do_upload_sdmf())
3128 def test_debug(self):
3129 d = self.do_upload_mdmf()
3131 fso = debug.FindSharesOptions()
3132 storage_index = base32.b2a(n.get_storage_index())
3133 fso.si_s = storage_index
3134 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3136 in self.iterate_servers()]
3137 fso.stdout = StringIO()
3138 fso.stderr = StringIO()
3139 debug.find_shares(fso)
3140 sharefiles = fso.stdout.getvalue().splitlines()
3141 expected = self.nm.default_encoding_parameters["n"]
3142 self.failUnlessEqual(len(sharefiles), expected)
3144 do = debug.DumpOptions()
3145 do["filename"] = sharefiles[0]
3146 do.stdout = StringIO()
3147 debug.dump_share(do)
3148 output = do.stdout.getvalue()
3149 lines = set(output.splitlines())
3150 self.failUnless("Mutable slot found:" in lines, output)
3151 self.failUnless(" share_type: MDMF" in lines, output)
3152 self.failUnless(" num_extra_leases: 0" in lines, output)
3153 self.failUnless(" MDMF contents:" in lines, output)
3154 self.failUnless(" seqnum: 1" in lines, output)
3155 self.failUnless(" required_shares: 3" in lines, output)
3156 self.failUnless(" total_shares: 10" in lines, output)
3157 self.failUnless(" segsize: 131073" in lines, output)
3158 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
3159 vcap = n.get_verify_cap().to_string()
3160 self.failUnless(" verify-cap: %s" % vcap in lines, output)
3162 cso = debug.CatalogSharesOptions()
3163 cso.nodedirs = fso.nodedirs
3164 cso.stdout = StringIO()
3165 cso.stderr = StringIO()
3166 debug.catalog_shares(cso)
3167 shares = cso.stdout.getvalue().splitlines()
3168 oneshare = shares[0] # all shares should be MDMF
3169 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3170 self.failUnless(oneshare.startswith("MDMF"), oneshare)
3171 fields = oneshare.split()
3172 self.failUnlessEqual(fields[0], "MDMF")
3173 self.failUnlessEqual(fields[1], storage_index)
3174 self.failUnlessEqual(fields[2], "3/10")
3175 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3176 self.failUnless(fields[4].startswith("#1:"), fields[3])
3177 # the rest of fields[4] is the roothash, which depends upon
3178 # encryption salts and is not constant. fields[5] is the
3179 # remaining time on the longest lease, which is timing dependent.
3180 # The rest of the line is the quoted pathname to the share.
3181 d.addCallback(_debug)
3184 def test_get_sequence_number(self):
3185 d = self.do_upload()
3186 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3187 d.addCallback(lambda bv:
3188 self.failUnlessEqual(bv.get_sequence_number(), 1))
3189 d.addCallback(lambda ignored:
3190 self.sdmf_node.get_best_readable_version())
3191 d.addCallback(lambda bv:
3192 self.failUnlessEqual(bv.get_sequence_number(), 1))
3193 # Now update. The sequence number in both cases should be 1 in
3195 def _do_update(ignored):
3196 new_data = MutableData("foo bar baz" * 100000)
3197 new_small_data = MutableData("foo bar baz" * 10)
3198 d1 = self.mdmf_node.overwrite(new_data)
3199 d2 = self.sdmf_node.overwrite(new_small_data)
3200 dl = gatherResults([d1, d2])
3202 d.addCallback(_do_update)
3203 d.addCallback(lambda ignored:
3204 self.mdmf_node.get_best_readable_version())
3205 d.addCallback(lambda bv:
3206 self.failUnlessEqual(bv.get_sequence_number(), 2))
3207 d.addCallback(lambda ignored:
3208 self.sdmf_node.get_best_readable_version())
3209 d.addCallback(lambda bv:
3210 self.failUnlessEqual(bv.get_sequence_number(), 2))
3214 def test_cap_after_upload(self):
3215 # If we create a new mutable file and upload things to it, and
3216 # it's an MDMF file, we should get an MDMF cap back from that
3217 # file and should be able to use that.
3218 # That's essentially what MDMF node is, so just check that.
3219 d = self.do_upload_mdmf()
3221 mdmf_uri = self.mdmf_node.get_uri()
3222 cap = uri.from_string(mdmf_uri)
3223 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3224 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3225 cap = uri.from_string(readonly_mdmf_uri)
3226 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3227 d.addCallback(_then)
3230 def test_mutable_version(self):
3231 # assert that getting parameters from the IMutableVersion object
3232 # gives us the same data as getting them from the filenode itself
3233 d = self.do_upload()
3234 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3235 def _check_mdmf(bv):
3237 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3238 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3239 self.failIf(bv.is_readonly())
3240 d.addCallback(_check_mdmf)
3241 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3242 def _check_sdmf(bv):
3244 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3245 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3246 self.failIf(bv.is_readonly())
3247 d.addCallback(_check_sdmf)
3251 def test_get_readonly_version(self):
3252 d = self.do_upload()
3253 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3254 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3256 # Attempting to get a mutable version of a mutable file from a
3257 # filenode initialized with a readcap should return a readonly
3258 # version of that same node.
3259 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3260 d.addCallback(lambda ro: ro.get_best_mutable_version())
3261 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3263 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3264 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3266 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3267 d.addCallback(lambda ro: ro.get_best_mutable_version())
3268 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3272 def test_toplevel_overwrite(self):
3273 new_data = MutableData("foo bar baz" * 100000)
3274 new_small_data = MutableData("foo bar baz" * 10)
3275 d = self.do_upload()
3276 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3277 d.addCallback(lambda ignored:
3278 self.mdmf_node.download_best_version())
3279 d.addCallback(lambda data:
3280 self.failUnlessEqual(data, "foo bar baz" * 100000))
3281 d.addCallback(lambda ignored:
3282 self.sdmf_node.overwrite(new_small_data))
3283 d.addCallback(lambda ignored:
3284 self.sdmf_node.download_best_version())
3285 d.addCallback(lambda data:
3286 self.failUnlessEqual(data, "foo bar baz" * 10))
3290 def test_toplevel_modify(self):
3291 d = self.do_upload()
3292 def modifier(old_contents, servermap, first_time):
3293 return old_contents + "modified"
3294 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3295 d.addCallback(lambda ignored:
3296 self.mdmf_node.download_best_version())
3297 d.addCallback(lambda data:
3298 self.failUnlessIn("modified", data))
3299 d.addCallback(lambda ignored:
3300 self.sdmf_node.modify(modifier))
3301 d.addCallback(lambda ignored:
3302 self.sdmf_node.download_best_version())
3303 d.addCallback(lambda data:
3304 self.failUnlessIn("modified", data))
3308 def test_version_modify(self):
3309 # TODO: When we can publish multiple versions, alter this test
3310 # to modify a version other than the best usable version, then
3311 # test to see that the best recoverable version is that.
3312 d = self.do_upload()
3313 def modifier(old_contents, servermap, first_time):
3314 return old_contents + "modified"
3315 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3316 d.addCallback(lambda ignored:
3317 self.mdmf_node.download_best_version())
3318 d.addCallback(lambda data:
3319 self.failUnlessIn("modified", data))
3320 d.addCallback(lambda ignored:
3321 self.sdmf_node.modify(modifier))
3322 d.addCallback(lambda ignored:
3323 self.sdmf_node.download_best_version())
3324 d.addCallback(lambda data:
3325 self.failUnlessIn("modified", data))
3329 def test_download_version(self):
3330 d = self.publish_multiple()
3331 # We want to have two recoverable versions on the grid.
3332 d.addCallback(lambda res:
3333 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3334 1:1,3:1,5:1,7:1,9:1}))
3335 # Now try to download each version. We should get the plaintext
3336 # associated with that version.
3337 d.addCallback(lambda ignored:
3338 self._fn.get_servermap(mode=MODE_READ))
3339 def _got_servermap(smap):
3340 versions = smap.recoverable_versions()
3341 assert len(versions) == 2
3343 self.servermap = smap
3344 self.version1, self.version2 = versions
3345 assert self.version1 != self.version2
3347 self.version1_seqnum = self.version1[0]
3348 self.version2_seqnum = self.version2[0]
3349 self.version1_index = self.version1_seqnum - 1
3350 self.version2_index = self.version2_seqnum - 1
3352 d.addCallback(_got_servermap)
3353 d.addCallback(lambda ignored:
3354 self._fn.download_version(self.servermap, self.version1))
3355 d.addCallback(lambda results:
3356 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3358 d.addCallback(lambda ignored:
3359 self._fn.download_version(self.servermap, self.version2))
3360 d.addCallback(lambda results:
3361 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3366 def test_download_nonexistent_version(self):
3367 d = self.do_upload_mdmf()
3368 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3369 def _set_servermap(servermap):
3370 self.servermap = servermap
3371 d.addCallback(_set_servermap)
3372 d.addCallback(lambda ignored:
3373 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3375 self.mdmf_node.download_version, self.servermap,
3380 def test_partial_read(self):
3381 d = self.do_upload_mdmf()
3382 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3383 modes = [("start_on_segment_boundary",
3384 mathutil.next_multiple(128 * 1024, 3), 50),
3385 ("ending_one_byte_after_segment_boundary",
3386 mathutil.next_multiple(128 * 1024, 3)-50, 51),
3387 ("zero_length_at_start", 0, 0),
3388 ("zero_length_in_middle", 50, 0),
3389 ("zero_length_at_segment_boundary",
3390 mathutil.next_multiple(128 * 1024, 3), 0),
3392 for (name, offset, length) in modes:
3393 d.addCallback(self._do_partial_read, name, offset, length)
3394 # then read only a few bytes at a time, and see that the results are
3396 def _read_data(version):
3397 c = consumer.MemoryConsumer()
3398 d2 = defer.succeed(None)
3399 for i in xrange(0, len(self.data), 10000):
3400 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3401 d2.addCallback(lambda ignored:
3402 self.failUnlessEqual(self.data, "".join(c.chunks)))
3404 d.addCallback(_read_data)
3406 def _do_partial_read(self, version, name, offset, length):
3407 c = consumer.MemoryConsumer()
3408 d = version.read(c, offset, length)
3409 expected = self.data[offset:offset+length]
3410 d.addCallback(lambda ignored: "".join(c.chunks))
3411 def _check(results):
3412 if results != expected:
3414 print "got: %s ... %s" % (results[:20], results[-20:])
3415 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3416 self.fail("results[%s] != expected" % name)
3417 return version # daisy-chained to next call
3418 d.addCallback(_check)
3422 def _test_read_and_download(self, node, expected):
3423 d = node.get_best_readable_version()
3424 def _read_data(version):
3425 c = consumer.MemoryConsumer()
3426 d2 = defer.succeed(None)
3427 d2.addCallback(lambda ignored: version.read(c))
3428 d2.addCallback(lambda ignored:
3429 self.failUnlessEqual(expected, "".join(c.chunks)))
3431 d.addCallback(_read_data)
3432 d.addCallback(lambda ignored: node.download_best_version())
3433 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3436 def test_read_and_download_mdmf(self):
3437 d = self.do_upload_mdmf()
3438 d.addCallback(self._test_read_and_download, self.data)
3441 def test_read_and_download_sdmf(self):
3442 d = self.do_upload_sdmf()
3443 d.addCallback(self._test_read_and_download, self.small_data)
3446 def test_read_and_download_sdmf_zero_length(self):
3447 d = self.do_upload_empty_sdmf()
3448 d.addCallback(self._test_read_and_download, "")
3452 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3453 timeout = 400 # these tests are too big, 120s is not enough on slow
3456 GridTestMixin.setUp(self)
3457 self.basedir = self.mktemp()
3459 self.c = self.g.clients[0]
3460 self.nm = self.c.nodemaker
3461 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3462 self.small_data = "test data" * 10 # about 90 B; SDMF
3465 def do_upload_sdmf(self):
3466 d = self.nm.create_mutable_file(MutableData(self.small_data))
3468 assert isinstance(n, MutableFileNode)
3470 # Make SDMF node that has 255 shares.
3471 self.nm.default_encoding_parameters['n'] = 255
3472 self.nm.default_encoding_parameters['k'] = 127
3473 return self.nm.create_mutable_file(MutableData(self.small_data))
3474 d.addCallback(_then)
3476 assert isinstance(n, MutableFileNode)
3477 self.sdmf_max_shares_node = n
3478 d.addCallback(_then2)
3481 def do_upload_mdmf(self):
3482 d = self.nm.create_mutable_file(MutableData(self.data),
3483 version=MDMF_VERSION)
3485 assert isinstance(n, MutableFileNode)
3487 # Make MDMF node that has 255 shares.
3488 self.nm.default_encoding_parameters['n'] = 255
3489 self.nm.default_encoding_parameters['k'] = 127
3490 return self.nm.create_mutable_file(MutableData(self.data),
3491 version=MDMF_VERSION)
3492 d.addCallback(_then)
3494 assert isinstance(n, MutableFileNode)
3495 self.mdmf_max_shares_node = n
3496 d.addCallback(_then2)
3499 def _test_replace(self, offset, new_data):
3500 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3501 d0 = self.do_upload_mdmf()
3503 d = defer.succeed(None)
3504 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3505 # close over 'node'.
3506 d.addCallback(lambda ign, node=node:
3507 node.get_best_mutable_version())
3508 d.addCallback(lambda mv:
3509 mv.update(MutableData(new_data), offset))
3510 d.addCallback(lambda ign, node=node:
3511 node.download_best_version())
3512 def _check(results):
3513 if results != expected:
3515 print "got: %s ... %s" % (results[:20], results[-20:])
3516 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3517 self.fail("results != expected")
3518 d.addCallback(_check)
3520 d0.addCallback(_run)
3523 def test_append(self):
3524 # We should be able to append data to a mutable file and get
3526 return self._test_replace(len(self.data), "appended")
3528 def test_replace_middle(self):
3529 # We should be able to replace data in the middle of a mutable
3530 # file and get what we expect back.
3531 return self._test_replace(100, "replaced")
3533 def test_replace_beginning(self):
3534 # We should be able to replace data at the beginning of the file
3535 # without truncating the file
3536 return self._test_replace(0, "beginning")
3538 def test_replace_segstart1(self):
3539 return self._test_replace(128*1024+1, "NNNN")
3541 def test_replace_zero_length_beginning(self):
3542 return self._test_replace(0, "")
3544 def test_replace_zero_length_middle(self):
3545 return self._test_replace(50, "")
3547 def test_replace_zero_length_segstart1(self):
3548 return self._test_replace(128*1024+1, "")
3550 def test_replace_and_extend(self):
3551 # We should be able to replace data in the middle of a mutable
3552 # file and extend that mutable file and get what we expect.
3553 return self._test_replace(100, "modified " * 100000)
3556 def _check_differences(self, got, expected):
3557 # displaying arbitrary file corruption is tricky for a
3558 # 1MB file of repeating data,, so look for likely places
3559 # with problems and display them separately
3560 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3561 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3562 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3563 for (start,end) in gotmods]
3564 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3565 for (start,end) in expmods]
3566 #print "expecting: %s" % expspans
3570 print "differences:"
3571 for segnum in range(len(expected)//SEGSIZE):
3572 start = segnum * SEGSIZE
3573 end = (segnum+1) * SEGSIZE
3574 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3575 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3576 if got_ends != exp_ends:
3577 print "expected[%d]: %s" % (start, exp_ends)
3578 print "got [%d]: %s" % (start, got_ends)
3579 if expspans != gotspans:
3580 print "expected: %s" % expspans
3581 print "got : %s" % gotspans
3582 open("EXPECTED","wb").write(expected)
3583 open("GOT","wb").write(got)
3584 print "wrote data to EXPECTED and GOT"
3585 self.fail("didn't get expected data")
3588 def test_replace_locations(self):
3589 # exercise fencepost conditions
3591 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3592 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3593 d0 = self.do_upload_mdmf()
3595 expected = self.data
3596 d = defer.succeed(None)
3597 for offset in suspects:
3598 new_data = letters.next()*2 # "AA", then "BB", etc
3599 expected = expected[:offset]+new_data+expected[offset+2:]
3600 d.addCallback(lambda ign:
3601 self.mdmf_node.get_best_mutable_version())
3602 def _modify(mv, offset=offset, new_data=new_data):
3603 # close over 'offset','new_data'
3604 md = MutableData(new_data)
3605 return mv.update(md, offset)
3606 d.addCallback(_modify)
3607 d.addCallback(lambda ignored:
3608 self.mdmf_node.download_best_version())
3609 d.addCallback(self._check_differences, expected)
3611 d0.addCallback(_run)
3614 def test_replace_locations_max_shares(self):
3615 # exercise fencepost conditions
3617 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3618 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3619 d0 = self.do_upload_mdmf()
3621 expected = self.data
3622 d = defer.succeed(None)
3623 for offset in suspects:
3624 new_data = letters.next()*2 # "AA", then "BB", etc
3625 expected = expected[:offset]+new_data+expected[offset+2:]
3626 d.addCallback(lambda ign:
3627 self.mdmf_max_shares_node.get_best_mutable_version())
3628 def _modify(mv, offset=offset, new_data=new_data):
3629 # close over 'offset','new_data'
3630 md = MutableData(new_data)
3631 return mv.update(md, offset)
3632 d.addCallback(_modify)
3633 d.addCallback(lambda ignored:
3634 self.mdmf_max_shares_node.download_best_version())
3635 d.addCallback(self._check_differences, expected)
3637 d0.addCallback(_run)
3641 def test_append_power_of_two(self):
3642 # If we attempt to extend a mutable file so that its segment
3643 # count crosses a power-of-two boundary, the update operation
3644 # should know how to reencode the file.
3646 # Note that the data populating self.mdmf_node is about 900 KiB
3647 # long -- this is 7 segments in the default segment size. So we
3648 # need to add 2 segments worth of data to push it over a
3649 # power-of-two boundary.
3650 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3651 new_data = self.data + (segment * 2)
3652 d0 = self.do_upload_mdmf()
3654 d = defer.succeed(None)
3655 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3656 # close over 'node'.
3657 d.addCallback(lambda ign, node=node:
3658 node.get_best_mutable_version())
3659 d.addCallback(lambda mv:
3660 mv.update(MutableData(segment * 2), len(self.data)))
3661 d.addCallback(lambda ign, node=node:
3662 node.download_best_version())
3663 d.addCallback(lambda results:
3664 self.failUnlessEqual(results, new_data))
3666 d0.addCallback(_run)
3669 def test_update_sdmf(self):
3670 # Running update on a single-segment file should still work.
3671 new_data = self.small_data + "appended"
3672 d0 = self.do_upload_sdmf()
3674 d = defer.succeed(None)
3675 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3676 # close over 'node'.
3677 d.addCallback(lambda ign, node=node:
3678 node.get_best_mutable_version())
3679 d.addCallback(lambda mv:
3680 mv.update(MutableData("appended"), len(self.small_data)))
3681 d.addCallback(lambda ign, node=node:
3682 node.download_best_version())
3683 d.addCallback(lambda results:
3684 self.failUnlessEqual(results, new_data))
3686 d0.addCallback(_run)
3689 def test_replace_in_last_segment(self):
3690 # The wrapper should know how to handle the tail segment
3692 replace_offset = len(self.data) - 100
3693 new_data = self.data[:replace_offset] + "replaced"
3694 rest_offset = replace_offset + len("replaced")
3695 new_data += self.data[rest_offset:]
3696 d0 = self.do_upload_mdmf()
3698 d = defer.succeed(None)
3699 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3700 # close over 'node'.
3701 d.addCallback(lambda ign, node=node:
3702 node.get_best_mutable_version())
3703 d.addCallback(lambda mv:
3704 mv.update(MutableData("replaced"), replace_offset))
3705 d.addCallback(lambda ign, node=node:
3706 node.download_best_version())
3707 d.addCallback(lambda results:
3708 self.failUnlessEqual(results, new_data))
3710 d0.addCallback(_run)
3713 def test_multiple_segment_replace(self):
3714 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3715 new_data = self.data[:replace_offset]
3716 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3717 new_data += 2 * new_segment
3718 new_data += "replaced"
3719 rest_offset = len(new_data)
3720 new_data += self.data[rest_offset:]
3721 d0 = self.do_upload_mdmf()
3723 d = defer.succeed(None)
3724 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3725 # close over 'node'.
3726 d.addCallback(lambda ign, node=node:
3727 node.get_best_mutable_version())
3728 d.addCallback(lambda mv:
3729 mv.update(MutableData((2 * new_segment) + "replaced"),
3731 d.addCallback(lambda ignored, node=node:
3732 node.download_best_version())
3733 d.addCallback(lambda results:
3734 self.failUnlessEqual(results, new_data))
3736 d0.addCallback(_run)
3739 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3740 sdmf_old_shares = {}
3741 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3742 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3743 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3744 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3745 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3746 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3747 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3748 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3749 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3750 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3751 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3752 sdmf_old_contents = "This is a test file.\n"
3753 def copy_sdmf_shares(self):
3754 # We'll basically be short-circuiting the upload process.
3755 servernums = self.g.servers_by_number.keys()
3756 assert len(servernums) == 10
3758 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3759 # Get the storage index.
3760 cap = uri.from_string(self.sdmf_old_cap)
3761 si = cap.get_storage_index()
3763 # Now execute each assignment by writing the storage.
3764 for (share, servernum) in assignments:
3765 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3766 storedir = self.get_serverdir(servernum)
3767 storage_path = os.path.join(storedir, "shares",
3768 storage_index_to_dir(si))
3769 fileutil.make_dirs(storage_path)
3770 fileutil.write(os.path.join(storage_path, "%d" % share),
3772 # ...and verify that the shares are there.
3773 shares = self.find_uri_shares(self.sdmf_old_cap)
3774 assert len(shares) == 10
3776 def test_new_downloader_can_read_old_shares(self):
3777 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3779 self.copy_sdmf_shares()
3780 nm = self.g.clients[0].nodemaker
3781 n = nm.create_from_cap(self.sdmf_old_cap)
3782 d = n.download_best_version()
3783 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3786 class DifferentEncoding(unittest.TestCase):
3788 self._storage = s = FakeStorage()
3789 self.nodemaker = make_nodemaker(s)
3791 def test_filenode(self):
3792 # create a file with 3-of-20, then modify it with a client configured
3793 # to do 3-of-10. #1510 tracks a failure here
3794 self.nodemaker.default_encoding_parameters["n"] = 20
3795 d = self.nodemaker.create_mutable_file("old contents")
3797 filecap = n.get_cap().to_string()
3798 del n # we want a new object, not the cached one
3799 self.nodemaker.default_encoding_parameters["n"] = 10
3800 n2 = self.nodemaker.create_from_cap(filecap)
3802 d.addCallback(_created)
3803 def modifier(old_contents, servermap, first_time):
3804 return "new contents"
3805 d.addCallback(lambda n: n.modify(modifier))