2 from cStringIO import StringIO
3 from twisted.trial import unittest
4 from twisted.internet import defer, reactor
5 from allmydata import uri, client
6 from allmydata.nodemaker import NodeMaker
7 from allmydata.util import base32, consumer, fileutil, mathutil
8 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
9 ssk_pubkey_fingerprint_hash
10 from allmydata.util.consumer import MemoryConsumer
11 from allmydata.util.deferredutil import gatherResults
12 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
13 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
14 from allmydata.monitor import Monitor
15 from allmydata.test.common import ShouldFailMixin
16 from allmydata.test.no_network import GridTestMixin
17 from foolscap.api import eventually, fireEventually
18 from foolscap.logging import log
19 from allmydata.storage_client import StorageFarmBroker
20 from allmydata.storage.common import storage_index_to_dir
21 from allmydata.scripts import debug
23 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
24 from allmydata.mutable.common import \
25 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
26 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
27 NotEnoughServersError, CorruptShareError
28 from allmydata.mutable.retrieve import Retrieve
29 from allmydata.mutable.publish import Publish, MutableFileHandle, \
31 DEFAULT_MAX_SEGMENT_SIZE
32 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
33 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
34 from allmydata.mutable.repairer import MustForceRepairError
36 import allmydata.test.common_util as testutil
37 from allmydata.test.common import TEST_RSA_KEY_SIZE
38 from allmydata.test.test_download import PausingConsumer, \
39 PausingAndStoppingConsumer, StoppingConsumer, \
40 ImmediatelyStoppingConsumer
42 def eventuaaaaaly(res=None):
43 d = fireEventually(res)
44 d.addCallback(fireEventually)
45 d.addCallback(fireEventually)
49 # this "FakeStorage" exists to put the share data in RAM and avoid using real
50 # network connections, both to speed up the tests and to reduce the amount of
51 # non-mutable.py code being exercised.
54 # this class replaces the collection of storage servers, allowing the
55 # tests to examine and manipulate the published shares. It also lets us
56 # control the order in which read queries are answered, to exercise more
57 # of the error-handling code in Retrieve .
59 # Note that we ignore the storage index: this FakeStorage instance can
60 # only be used for a single storage index.
65 # _sequence is used to cause the responses to occur in a specific
66 # order. If it is in use, then we will defer queries instead of
67 # answering them right away, accumulating the Deferreds in a dict. We
68 # don't know exactly how many queries we'll get, so exactly one
69 # second after the first query arrives, we will release them all (in
73 self._pending_timer = None
75 def read(self, peerid, storage_index):
76 shares = self._peers.get(peerid, {})
77 if self._sequence is None:
78 return eventuaaaaaly(shares)
81 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
82 if peerid not in self._pending:
83 self._pending[peerid] = []
84 self._pending[peerid].append( (d, shares) )
87 def _fire_readers(self):
88 self._pending_timer = None
89 pending = self._pending
91 for peerid in self._sequence:
93 for (d, shares) in pending.pop(peerid):
94 eventually(d.callback, shares)
95 for peerid in pending:
96 for (d, shares) in pending[peerid]:
97 eventually(d.callback, shares)
99 def write(self, peerid, storage_index, shnum, offset, data):
100 if peerid not in self._peers:
101 self._peers[peerid] = {}
102 shares = self._peers[peerid]
104 f.write(shares.get(shnum, ""))
107 shares[shnum] = f.getvalue()
110 class FakeStorageServer:
111 def __init__(self, peerid, storage):
113 self.storage = storage
115 def callRemote(self, methname, *args, **kwargs):
118 meth = getattr(self, methname)
119 return meth(*args, **kwargs)
121 d.addCallback(lambda res: _call())
124 def callRemoteOnly(self, methname, *args, **kwargs):
126 d = self.callRemote(methname, *args, **kwargs)
127 d.addBoth(lambda ignore: None)
130 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
133 def slot_readv(self, storage_index, shnums, readv):
134 d = self.storage.read(self.peerid, storage_index)
138 if shnums and shnum not in shnums:
140 vector = response[shnum] = []
141 for (offset, length) in readv:
142 assert isinstance(offset, (int, long)), offset
143 assert isinstance(length, (int, long)), length
144 vector.append(shares[shnum][offset:offset+length])
149 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
150 tw_vectors, read_vector):
151 # always-pass: parrot the test vectors back to them.
153 for shnum, (testv, writev, new_length) in tw_vectors.items():
154 for (offset, length, op, specimen) in testv:
155 assert op in ("le", "eq", "ge")
156 # TODO: this isn't right, the read is controlled by read_vector,
158 readv[shnum] = [ specimen
159 for (offset, length, op, specimen)
161 for (offset, data) in writev:
162 self.storage.write(self.peerid, storage_index, shnum,
164 answer = (True, readv)
165 return fireEventually(answer)
168 def flip_bit(original, byte_offset):
169 return (original[:byte_offset] +
170 chr(ord(original[byte_offset]) ^ 0x01) +
171 original[byte_offset+1:])
173 def add_two(original, byte_offset):
174 # It isn't enough to simply flip the bit for the version number,
175 # because 1 is a valid version number. So we add two instead.
176 return (original[:byte_offset] +
177 chr(ord(original[byte_offset]) ^ 0x02) +
178 original[byte_offset+1:])
180 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
181 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
182 # list of shnums to corrupt.
184 for peerid in s._peers:
185 shares = s._peers[peerid]
187 if (shnums_to_corrupt is not None
188 and shnum not in shnums_to_corrupt):
191 # We're feeding the reader all of the share data, so it
192 # won't need to use the rref that we didn't provide, nor the
193 # storage index that we didn't provide. We do this because
194 # the reader will work for both MDMF and SDMF.
195 reader = MDMFSlotReadProxy(None, None, shnum, data)
196 # We need to get the offsets for the next part.
197 d = reader.get_verinfo()
198 def _do_corruption(verinfo, data, shnum, shares):
204 k, n, prefix, o) = verinfo
205 if isinstance(offset, tuple):
206 offset1, offset2 = offset
210 if offset1 == "pubkey" and IV:
213 real_offset = o[offset1]
215 real_offset = offset1
216 real_offset = int(real_offset) + offset2 + offset_offset
217 assert isinstance(real_offset, int), offset
218 if offset1 == 0: # verbyte
222 shares[shnum] = f(data, real_offset)
223 d.addCallback(_do_corruption, data, shnum, shares)
225 dl = defer.DeferredList(ds)
226 dl.addCallback(lambda ignored: res)
229 def make_storagebroker(s=None, num_peers=10):
232 peerids = [tagged_hash("peerid", "%d" % i)[:20]
233 for i in range(num_peers)]
234 storage_broker = StorageFarmBroker(None, True)
235 for peerid in peerids:
236 fss = FakeStorageServer(peerid, s)
237 ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
238 "permutation-seed-base32": base32.b2a(peerid) }
239 storage_broker.test_add_rref(peerid, fss, ann)
240 return storage_broker
242 def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
243 storage_broker = make_storagebroker(s, num_peers)
244 sh = client.SecretHolder("lease secret", "convergence secret")
245 keygen = client.KeyGenerator()
247 keygen.set_default_keysize(keysize)
248 nodemaker = NodeMaker(storage_broker, sh, None,
250 {"k": 3, "n": 10}, SDMF_VERSION, keygen)
253 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
254 # this used to be in Publish, but we removed the limit. Some of
255 # these tests test whether the new code correctly allows files
256 # larger than the limit.
257 OLD_MAX_SEGMENT_SIZE = 3500000
259 self._storage = s = FakeStorage()
260 self.nodemaker = make_nodemaker(s)
262 def test_create(self):
263 d = self.nodemaker.create_mutable_file()
265 self.failUnless(isinstance(n, MutableFileNode))
266 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267 sb = self.nodemaker.storage_broker
268 peer0 = sorted(sb.get_all_serverids())[0]
269 shnums = self._storage._peers[peer0].keys()
270 self.failUnlessEqual(len(shnums), 1)
271 d.addCallback(_created)
275 def test_create_mdmf(self):
276 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
278 self.failUnless(isinstance(n, MutableFileNode))
279 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
280 sb = self.nodemaker.storage_broker
281 peer0 = sorted(sb.get_all_serverids())[0]
282 shnums = self._storage._peers[peer0].keys()
283 self.failUnlessEqual(len(shnums), 1)
284 d.addCallback(_created)
287 def test_single_share(self):
288 # Make sure that we tolerate publishing a single share.
289 self.nodemaker.default_encoding_parameters['k'] = 1
290 self.nodemaker.default_encoding_parameters['happy'] = 1
291 self.nodemaker.default_encoding_parameters['n'] = 1
292 d = defer.succeed(None)
293 for v in (SDMF_VERSION, MDMF_VERSION):
294 d.addCallback(lambda ignored, v=v:
295 self.nodemaker.create_mutable_file(version=v))
297 self.failUnless(isinstance(n, MutableFileNode))
300 d.addCallback(_created)
301 d.addCallback(lambda n:
302 n.overwrite(MutableData("Contents" * 50000)))
303 d.addCallback(lambda ignored:
304 self._node.download_best_version())
305 d.addCallback(lambda contents:
306 self.failUnlessEqual(contents, "Contents" * 50000))
309 def test_max_shares(self):
310 self.nodemaker.default_encoding_parameters['n'] = 255
311 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
313 self.failUnless(isinstance(n, MutableFileNode))
314 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
315 sb = self.nodemaker.storage_broker
316 num_shares = sum([len(self._storage._peers[x].keys()) for x \
317 in sb.get_all_serverids()])
318 self.failUnlessEqual(num_shares, 255)
321 d.addCallback(_created)
322 # Now we upload some contents
323 d.addCallback(lambda n:
324 n.overwrite(MutableData("contents" * 50000)))
325 # ...then download contents
326 d.addCallback(lambda ignored:
327 self._node.download_best_version())
328 # ...and check to make sure everything went okay.
329 d.addCallback(lambda contents:
330 self.failUnlessEqual("contents" * 50000, contents))
333 def test_max_shares_mdmf(self):
334 # Test how files behave when there are 255 shares.
335 self.nodemaker.default_encoding_parameters['n'] = 255
336 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
338 self.failUnless(isinstance(n, MutableFileNode))
339 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
340 sb = self.nodemaker.storage_broker
341 num_shares = sum([len(self._storage._peers[x].keys()) for x \
342 in sb.get_all_serverids()])
343 self.failUnlessEqual(num_shares, 255)
346 d.addCallback(_created)
347 d.addCallback(lambda n:
348 n.overwrite(MutableData("contents" * 50000)))
349 d.addCallback(lambda ignored:
350 self._node.download_best_version())
351 d.addCallback(lambda contents:
352 self.failUnlessEqual(contents, "contents" * 50000))
355 def test_mdmf_filenode_cap(self):
356 # Test that an MDMF filenode, once created, returns an MDMF URI.
357 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
359 self.failUnless(isinstance(n, MutableFileNode))
361 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
362 rcap = n.get_readcap()
363 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
364 vcap = n.get_verify_cap()
365 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
366 d.addCallback(_created)
370 def test_create_from_mdmf_writecap(self):
371 # Test that the nodemaker is capable of creating an MDMF
372 # filenode given an MDMF cap.
373 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
375 self.failUnless(isinstance(n, MutableFileNode))
377 self.failUnless(s.startswith("URI:MDMF"))
378 n2 = self.nodemaker.create_from_cap(s)
379 self.failUnless(isinstance(n2, MutableFileNode))
380 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
381 self.failUnlessEqual(n.get_uri(), n2.get_uri())
382 d.addCallback(_created)
386 def test_create_from_mdmf_readcap(self):
387 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
389 self.failUnless(isinstance(n, MutableFileNode))
390 s = n.get_readonly_uri()
391 n2 = self.nodemaker.create_from_cap(s)
392 self.failUnless(isinstance(n2, MutableFileNode))
394 # Check that it's a readonly node
395 self.failUnless(n2.is_readonly())
396 d.addCallback(_created)
400 def test_internal_version_from_cap(self):
401 # MutableFileNodes and MutableFileVersions have an internal
402 # switch that tells them whether they're dealing with an SDMF or
403 # MDMF mutable file when they start doing stuff. We want to make
404 # sure that this is set appropriately given an MDMF cap.
405 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
407 self.uri = n.get_uri()
408 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
410 n2 = self.nodemaker.create_from_cap(self.uri)
411 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
412 d.addCallback(_created)
416 def test_serialize(self):
417 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
419 def _callback(*args, **kwargs):
420 self.failUnlessEqual(args, (4,) )
421 self.failUnlessEqual(kwargs, {"foo": 5})
424 d = n._do_serialized(_callback, 4, foo=5)
425 def _check_callback(res):
426 self.failUnlessEqual(res, 6)
427 self.failUnlessEqual(calls, [1])
428 d.addCallback(_check_callback)
431 raise ValueError("heya")
432 d.addCallback(lambda res:
433 self.shouldFail(ValueError, "_check_errback", "heya",
434 n._do_serialized, _errback))
437 def test_upload_and_download(self):
438 d = self.nodemaker.create_mutable_file()
440 d = defer.succeed(None)
441 d.addCallback(lambda res: n.get_servermap(MODE_READ))
442 d.addCallback(lambda smap: smap.dump(StringIO()))
443 d.addCallback(lambda sio:
444 self.failUnless("3-of-10" in sio.getvalue()))
445 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
446 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
447 d.addCallback(lambda res: n.download_best_version())
448 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
449 d.addCallback(lambda res: n.get_size_of_best_version())
450 d.addCallback(lambda size:
451 self.failUnlessEqual(size, len("contents 1")))
452 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
453 d.addCallback(lambda res: n.download_best_version())
454 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
455 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
456 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
457 d.addCallback(lambda res: n.download_best_version())
458 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
459 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
460 d.addCallback(lambda smap:
461 n.download_version(smap,
462 smap.best_recoverable_version()))
463 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
464 # test a file that is large enough to overcome the
465 # mapupdate-to-retrieve data caching (i.e. make the shares larger
466 # than the default readsize, which is 2000 bytes). A 15kB file
467 # will have 5kB shares.
468 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
469 d.addCallback(lambda res: n.download_best_version())
470 d.addCallback(lambda res:
471 self.failUnlessEqual(res, "large size file" * 1000))
473 d.addCallback(_created)
477 def test_upload_and_download_mdmf(self):
478 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
480 d = defer.succeed(None)
481 d.addCallback(lambda ignored:
482 n.get_servermap(MODE_READ))
483 def _then(servermap):
484 dumped = servermap.dump(StringIO())
485 self.failUnlessIn("3-of-10", dumped.getvalue())
487 # Now overwrite the contents with some new contents. We want
488 # to make them big enough to force the file to be uploaded
489 # in more than one segment.
490 big_contents = "contents1" * 100000 # about 900 KiB
491 big_contents_uploadable = MutableData(big_contents)
492 d.addCallback(lambda ignored:
493 n.overwrite(big_contents_uploadable))
494 d.addCallback(lambda ignored:
495 n.download_best_version())
496 d.addCallback(lambda data:
497 self.failUnlessEqual(data, big_contents))
498 # Overwrite the contents again with some new contents. As
499 # before, they need to be big enough to force multiple
500 # segments, so that we make the downloader deal with
502 bigger_contents = "contents2" * 1000000 # about 9MiB
503 bigger_contents_uploadable = MutableData(bigger_contents)
504 d.addCallback(lambda ignored:
505 n.overwrite(bigger_contents_uploadable))
506 d.addCallback(lambda ignored:
507 n.download_best_version())
508 d.addCallback(lambda data:
509 self.failUnlessEqual(data, bigger_contents))
511 d.addCallback(_created)
515 def test_retrieve_producer_mdmf(self):
516 # We should make sure that the retriever is able to pause and stop
518 data = "contents1" * 100000
519 d = self.nodemaker.create_mutable_file(MutableData(data),
520 version=MDMF_VERSION)
521 d.addCallback(lambda node: node.get_best_mutable_version())
522 d.addCallback(self._test_retrieve_producer, "MDMF", data)
525 # note: SDMF has only one big segment, so we can't use the usual
526 # after-the-first-write() trick to pause or stop the download.
527 # Disabled until we find a better approach.
528 def OFF_test_retrieve_producer_sdmf(self):
529 data = "contents1" * 100000
530 d = self.nodemaker.create_mutable_file(MutableData(data),
531 version=SDMF_VERSION)
532 d.addCallback(lambda node: node.get_best_mutable_version())
533 d.addCallback(self._test_retrieve_producer, "SDMF", data)
536 def _test_retrieve_producer(self, version, kind, data):
537 # Now we'll retrieve it into a pausing consumer.
538 c = PausingConsumer()
540 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
542 c2 = PausingAndStoppingConsumer()
543 d.addCallback(lambda ign:
544 self.shouldFail(DownloadStopped, kind+"_pause_stop",
545 "our Consumer called stopProducing()",
548 c3 = StoppingConsumer()
549 d.addCallback(lambda ign:
550 self.shouldFail(DownloadStopped, kind+"_stop",
551 "our Consumer called stopProducing()",
554 c4 = ImmediatelyStoppingConsumer()
555 d.addCallback(lambda ign:
556 self.shouldFail(DownloadStopped, kind+"_stop_imm",
557 "our Consumer called stopProducing()",
561 c5 = MemoryConsumer()
562 d1 = version.read(c5)
563 c5.producer.stopProducing()
564 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
565 "our Consumer called stopProducing()",
570 def test_download_from_mdmf_cap(self):
571 # We should be able to download an MDMF file given its cap
572 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
574 self.uri = node.get_uri()
575 # also confirm that the cap has no extension fields
576 pieces = self.uri.split(":")
577 self.failUnlessEqual(len(pieces), 4)
579 return node.overwrite(MutableData("contents1" * 100000))
581 node = self.nodemaker.create_from_cap(self.uri)
582 return node.download_best_version()
583 def _downloaded(data):
584 self.failUnlessEqual(data, "contents1" * 100000)
585 d.addCallback(_created)
587 d.addCallback(_downloaded)
591 def test_mdmf_write_count(self):
592 # Publishing an MDMF file should only cause one write for each
593 # share that is to be published. Otherwise, we introduce
594 # undesirable semantics that are a regression from SDMF
595 upload = MutableData("MDMF" * 100000) # about 400 KiB
596 d = self.nodemaker.create_mutable_file(upload,
597 version=MDMF_VERSION)
598 def _check_server_write_counts(ignored):
599 sb = self.nodemaker.storage_broker
600 for server in sb.servers.itervalues():
601 self.failUnlessEqual(server.get_rref().queries, 1)
602 d.addCallback(_check_server_write_counts)
606 def test_create_with_initial_contents(self):
607 upload1 = MutableData("contents 1")
608 d = self.nodemaker.create_mutable_file(upload1)
610 d = n.download_best_version()
611 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
612 upload2 = MutableData("contents 2")
613 d.addCallback(lambda res: n.overwrite(upload2))
614 d.addCallback(lambda res: n.download_best_version())
615 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
617 d.addCallback(_created)
621 def test_create_mdmf_with_initial_contents(self):
622 initial_contents = "foobarbaz" * 131072 # 900KiB
623 initial_contents_uploadable = MutableData(initial_contents)
624 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
625 version=MDMF_VERSION)
627 d = n.download_best_version()
628 d.addCallback(lambda data:
629 self.failUnlessEqual(data, initial_contents))
630 uploadable2 = MutableData(initial_contents + "foobarbaz")
631 d.addCallback(lambda ignored:
632 n.overwrite(uploadable2))
633 d.addCallback(lambda ignored:
634 n.download_best_version())
635 d.addCallback(lambda data:
636 self.failUnlessEqual(data, initial_contents +
639 d.addCallback(_created)
642 def test_create_with_initial_contents_function(self):
643 data = "initial contents"
644 def _make_contents(n):
645 self.failUnless(isinstance(n, MutableFileNode))
646 key = n.get_writekey()
647 self.failUnless(isinstance(key, str), key)
648 self.failUnlessEqual(len(key), 16) # AES key size
649 return MutableData(data)
650 d = self.nodemaker.create_mutable_file(_make_contents)
652 return n.download_best_version()
653 d.addCallback(_created)
654 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
658 def test_create_mdmf_with_initial_contents_function(self):
659 data = "initial contents" * 100000
660 def _make_contents(n):
661 self.failUnless(isinstance(n, MutableFileNode))
662 key = n.get_writekey()
663 self.failUnless(isinstance(key, str), key)
664 self.failUnlessEqual(len(key), 16)
665 return MutableData(data)
666 d = self.nodemaker.create_mutable_file(_make_contents,
667 version=MDMF_VERSION)
668 d.addCallback(lambda n:
669 n.download_best_version())
670 d.addCallback(lambda data2:
671 self.failUnlessEqual(data2, data))
675 def test_create_with_too_large_contents(self):
676 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
677 BIG_uploadable = MutableData(BIG)
678 d = self.nodemaker.create_mutable_file(BIG_uploadable)
680 other_BIG_uploadable = MutableData(BIG)
681 d = n.overwrite(other_BIG_uploadable)
683 d.addCallback(_created)
686 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
687 d = n.get_servermap(MODE_READ)
688 d.addCallback(lambda servermap: servermap.best_recoverable_version())
689 d.addCallback(lambda verinfo:
690 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
693 def test_modify(self):
694 def _modifier(old_contents, servermap, first_time):
695 new_contents = old_contents + "line2"
697 def _non_modifier(old_contents, servermap, first_time):
699 def _none_modifier(old_contents, servermap, first_time):
701 def _error_modifier(old_contents, servermap, first_time):
702 raise ValueError("oops")
703 def _toobig_modifier(old_contents, servermap, first_time):
704 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
707 def _ucw_error_modifier(old_contents, servermap, first_time):
708 # simulate an UncoordinatedWriteError once
711 raise UncoordinatedWriteError("simulated")
712 new_contents = old_contents + "line3"
714 def _ucw_error_non_modifier(old_contents, servermap, first_time):
715 # simulate an UncoordinatedWriteError once, and don't actually
716 # modify the contents on subsequent invocations
719 raise UncoordinatedWriteError("simulated")
722 initial_contents = "line1"
723 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
725 d = n.modify(_modifier)
726 d.addCallback(lambda res: n.download_best_version())
727 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
728 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
730 d.addCallback(lambda res: n.modify(_non_modifier))
731 d.addCallback(lambda res: n.download_best_version())
732 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
733 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
735 d.addCallback(lambda res: n.modify(_none_modifier))
736 d.addCallback(lambda res: n.download_best_version())
737 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
738 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
740 d.addCallback(lambda res:
741 self.shouldFail(ValueError, "error_modifier", None,
742 n.modify, _error_modifier))
743 d.addCallback(lambda res: n.download_best_version())
744 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
745 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
748 d.addCallback(lambda res: n.download_best_version())
749 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
750 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
752 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
753 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
754 d.addCallback(lambda res: n.download_best_version())
755 d.addCallback(lambda res: self.failUnlessEqual(res,
757 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
759 def _reset_ucw_error_modifier(res):
762 d.addCallback(_reset_ucw_error_modifier)
764 # in practice, this n.modify call should publish twice: the first
765 # one gets a UCWE, the second does not. But our test jig (in
766 # which the modifier raises the UCWE) skips over the first one,
767 # so in this test there will be only one publish, and the seqnum
768 # will only be one larger than the previous test, not two (i.e. 4
770 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
771 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
772 d.addCallback(lambda res: n.download_best_version())
773 d.addCallback(lambda res: self.failUnlessEqual(res,
775 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
776 d.addCallback(lambda res: n.modify(_toobig_modifier))
778 d.addCallback(_created)
782 def test_modify_backoffer(self):
783 def _modifier(old_contents, servermap, first_time):
784 return old_contents + "line2"
786 def _ucw_error_modifier(old_contents, servermap, first_time):
787 # simulate an UncoordinatedWriteError once
790 raise UncoordinatedWriteError("simulated")
791 return old_contents + "line3"
792 def _always_ucw_error_modifier(old_contents, servermap, first_time):
793 raise UncoordinatedWriteError("simulated")
794 def _backoff_stopper(node, f):
796 def _backoff_pauser(node, f):
798 reactor.callLater(0.5, d.callback, None)
801 # the give-up-er will hit its maximum retry count quickly
802 giveuper = BackoffAgent()
803 giveuper._delay = 0.1
806 d = self.nodemaker.create_mutable_file(MutableData("line1"))
808 d = n.modify(_modifier)
809 d.addCallback(lambda res: n.download_best_version())
810 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
811 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
813 d.addCallback(lambda res:
814 self.shouldFail(UncoordinatedWriteError,
815 "_backoff_stopper", None,
816 n.modify, _ucw_error_modifier,
818 d.addCallback(lambda res: n.download_best_version())
819 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
820 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
822 def _reset_ucw_error_modifier(res):
825 d.addCallback(_reset_ucw_error_modifier)
826 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
828 d.addCallback(lambda res: n.download_best_version())
829 d.addCallback(lambda res: self.failUnlessEqual(res,
831 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
833 d.addCallback(lambda res:
834 self.shouldFail(UncoordinatedWriteError,
836 n.modify, _always_ucw_error_modifier,
838 d.addCallback(lambda res: n.download_best_version())
839 d.addCallback(lambda res: self.failUnlessEqual(res,
841 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
844 d.addCallback(_created)
847 def test_upload_and_download_full_size_keys(self):
848 self.nodemaker.key_generator = client.KeyGenerator()
849 d = self.nodemaker.create_mutable_file()
851 d = defer.succeed(None)
852 d.addCallback(lambda res: n.get_servermap(MODE_READ))
853 d.addCallback(lambda smap: smap.dump(StringIO()))
854 d.addCallback(lambda sio:
855 self.failUnless("3-of-10" in sio.getvalue()))
856 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
857 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
858 d.addCallback(lambda res: n.download_best_version())
859 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
860 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
861 d.addCallback(lambda res: n.download_best_version())
862 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
863 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
864 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
865 d.addCallback(lambda res: n.download_best_version())
866 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
867 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
868 d.addCallback(lambda smap:
869 n.download_version(smap,
870 smap.best_recoverable_version()))
871 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
873 d.addCallback(_created)
877 def test_size_after_servermap_update(self):
878 # a mutable file node should have something to say about how big
879 # it is after a servermap update is performed, since this tells
880 # us how large the best version of that mutable file is.
881 d = self.nodemaker.create_mutable_file()
884 return n.get_servermap(MODE_READ)
885 d.addCallback(_created)
886 d.addCallback(lambda ignored:
887 self.failUnlessEqual(self.n.get_size(), 0))
888 d.addCallback(lambda ignored:
889 self.n.overwrite(MutableData("foobarbaz")))
890 d.addCallback(lambda ignored:
891 self.failUnlessEqual(self.n.get_size(), 9))
892 d.addCallback(lambda ignored:
893 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
894 d.addCallback(_created)
895 d.addCallback(lambda ignored:
896 self.failUnlessEqual(self.n.get_size(), 9))
901 def publish_one(self):
902 # publish a file and create shares, which can then be manipulated
904 self.CONTENTS = "New contents go here" * 1000
905 self.uploadable = MutableData(self.CONTENTS)
906 self._storage = FakeStorage()
907 self._nodemaker = make_nodemaker(self._storage)
908 self._storage_broker = self._nodemaker.storage_broker
909 d = self._nodemaker.create_mutable_file(self.uploadable)
912 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
913 d.addCallback(_created)
916 def publish_mdmf(self):
917 # like publish_one, except that the result is guaranteed to be
919 # self.CONTENTS should have more than one segment.
920 self.CONTENTS = "This is an MDMF file" * 100000
921 self.uploadable = MutableData(self.CONTENTS)
922 self._storage = FakeStorage()
923 self._nodemaker = make_nodemaker(self._storage)
924 self._storage_broker = self._nodemaker.storage_broker
925 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
928 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
929 d.addCallback(_created)
933 def publish_sdmf(self):
934 # like publish_one, except that the result is guaranteed to be
936 self.CONTENTS = "This is an SDMF file" * 1000
937 self.uploadable = MutableData(self.CONTENTS)
938 self._storage = FakeStorage()
939 self._nodemaker = make_nodemaker(self._storage)
940 self._storage_broker = self._nodemaker.storage_broker
941 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
944 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
945 d.addCallback(_created)
948 def publish_empty_sdmf(self):
950 self.uploadable = MutableData(self.CONTENTS)
951 self._storage = FakeStorage()
952 self._nodemaker = make_nodemaker(self._storage, keysize=None)
953 self._storage_broker = self._nodemaker.storage_broker
954 d = self._nodemaker.create_mutable_file(self.uploadable,
955 version=SDMF_VERSION)
958 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
959 d.addCallback(_created)
963 def publish_multiple(self, version=0):
964 self.CONTENTS = ["Contents 0",
969 self.uploadables = [MutableData(d) for d in self.CONTENTS]
970 self._copied_shares = {}
971 self._storage = FakeStorage()
972 self._nodemaker = make_nodemaker(self._storage)
973 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
976 # now create multiple versions of the same file, and accumulate
977 # their shares, so we can mix and match them later.
978 d = defer.succeed(None)
979 d.addCallback(self._copy_shares, 0)
980 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
981 d.addCallback(self._copy_shares, 1)
982 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
983 d.addCallback(self._copy_shares, 2)
984 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
985 d.addCallback(self._copy_shares, 3)
986 # now we replace all the shares with version s3, and upload a new
987 # version to get s4b.
988 rollback = dict([(i,2) for i in range(10)])
989 d.addCallback(lambda res: self._set_versions(rollback))
990 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
991 d.addCallback(self._copy_shares, 4)
992 # we leave the storage in state 4
994 d.addCallback(_created)
998 def _copy_shares(self, ignored, index):
999 shares = self._storage._peers
1000 # we need a deep copy
1002 for peerid in shares:
1003 new_shares[peerid] = {}
1004 for shnum in shares[peerid]:
1005 new_shares[peerid][shnum] = shares[peerid][shnum]
1006 self._copied_shares[index] = new_shares
1008 def _set_versions(self, versionmap):
1009 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1010 # share to be at. Any shnum which is left out of the map will stay at
1011 # its current version.
1012 shares = self._storage._peers
1013 oldshares = self._copied_shares
1014 for peerid in shares:
1015 for shnum in shares[peerid]:
1016 if shnum in versionmap:
1017 index = versionmap[shnum]
1018 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1020 class Servermap(unittest.TestCase, PublishMixin):
1022 return self.publish_one()
1024 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1029 sb = self._storage_broker
1030 smu = ServermapUpdater(fn, sb, Monitor(),
1031 ServerMap(), mode, update_range=update_range)
1035 def update_servermap(self, oldmap, mode=MODE_CHECK):
1036 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1041 def failUnlessOneRecoverable(self, sm, num_shares):
1042 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1043 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1044 best = sm.best_recoverable_version()
1045 self.failIfEqual(best, None)
1046 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1047 self.failUnlessEqual(len(sm.shares_available()), 1)
1048 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1049 shnum, servers = sm.make_sharemap().items()[0]
1050 server = list(servers)[0]
1051 self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1052 self.failUnlessEqual(sm.version_on_server(server, 666), None)
1055 def test_basic(self):
1056 d = defer.succeed(None)
1057 ms = self.make_servermap
1058 us = self.update_servermap
1060 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1061 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1062 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1063 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1064 d.addCallback(lambda res: ms(mode=MODE_READ))
1065 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1066 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1067 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1068 # this mode stops at 'k' shares
1069 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1071 # and can we re-use the same servermap? Note that these are sorted in
1072 # increasing order of number of servers queried, since once a server
1073 # gets into the servermap, we'll always ask it for an update.
1074 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1075 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1076 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1077 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1078 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1079 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1080 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1081 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1082 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1086 def test_fetch_privkey(self):
1087 d = defer.succeed(None)
1088 # use the sibling filenode (which hasn't been used yet), and make
1089 # sure it can fetch the privkey. The file is small, so the privkey
1090 # will be fetched on the first (query) pass.
1091 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1092 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1094 # create a new file, which is large enough to knock the privkey out
1095 # of the early part of the file
1096 LARGE = "These are Larger contents" * 200 # about 5KB
1097 LARGE_uploadable = MutableData(LARGE)
1098 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1099 def _created(large_fn):
1100 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1101 return self.make_servermap(MODE_WRITE, large_fn2)
1102 d.addCallback(_created)
1103 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1107 def test_mark_bad(self):
1108 d = defer.succeed(None)
1109 ms = self.make_servermap
1111 d.addCallback(lambda res: ms(mode=MODE_READ))
1112 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1114 v = sm.best_recoverable_version()
1115 vm = sm.make_versionmap()
1116 shares = list(vm[v])
1117 self.failUnlessEqual(len(shares), 6)
1118 self._corrupted = set()
1119 # mark the first 5 shares as corrupt, then update the servermap.
1120 # The map should not have the marked shares it in any more, and
1121 # new shares should be found to replace the missing ones.
1122 for (shnum, server, timestamp) in shares:
1124 self._corrupted.add( (server, shnum) )
1125 sm.mark_bad_share(server, shnum, "")
1126 return self.update_servermap(sm, MODE_WRITE)
1127 d.addCallback(_made_map)
1129 # this should find all 5 shares that weren't marked bad
1130 v = sm.best_recoverable_version()
1131 vm = sm.make_versionmap()
1132 shares = list(vm[v])
1133 for (server, shnum) in self._corrupted:
1134 server_shares = sm.debug_shares_on_server(server)
1135 self.failIf(shnum in server_shares,
1136 "%d was in %s" % (shnum, server_shares))
1137 self.failUnlessEqual(len(shares), 5)
1138 d.addCallback(_check_map)
1141 def failUnlessNoneRecoverable(self, sm):
1142 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1143 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1144 best = sm.best_recoverable_version()
1145 self.failUnlessEqual(best, None)
1146 self.failUnlessEqual(len(sm.shares_available()), 0)
1148 def test_no_shares(self):
1149 self._storage._peers = {} # delete all shares
1150 ms = self.make_servermap
1151 d = defer.succeed(None)
1153 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1154 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1156 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1157 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1159 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1160 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1162 d.addCallback(lambda res: ms(mode=MODE_READ))
1163 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1167 def failUnlessNotQuiteEnough(self, sm):
1168 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1169 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1170 best = sm.best_recoverable_version()
1171 self.failUnlessEqual(best, None)
1172 self.failUnlessEqual(len(sm.shares_available()), 1)
1173 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1176 def test_not_quite_enough_shares(self):
1178 ms = self.make_servermap
1179 num_shares = len(s._peers)
1180 for peerid in s._peers:
1181 s._peers[peerid] = {}
1185 # now there ought to be only two shares left
1186 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1188 d = defer.succeed(None)
1190 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1191 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1192 d.addCallback(lambda sm:
1193 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1194 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1195 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1196 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1197 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1198 d.addCallback(lambda res: ms(mode=MODE_READ))
1199 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1204 def test_servermapupdater_finds_mdmf_files(self):
1205 # setUp already published an MDMF file for us. We just need to
1206 # make sure that when we run the ServermapUpdater, the file is
1207 # reported to have one recoverable version.
1208 d = defer.succeed(None)
1209 d.addCallback(lambda ignored:
1210 self.publish_mdmf())
1211 d.addCallback(lambda ignored:
1212 self.make_servermap(mode=MODE_CHECK))
1213 # Calling make_servermap also updates the servermap in the mode
1214 # that we specify, so we just need to see what it says.
1215 def _check_servermap(sm):
1216 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1217 d.addCallback(_check_servermap)
1221 def test_fetch_update(self):
1222 d = defer.succeed(None)
1223 d.addCallback(lambda ignored:
1224 self.publish_mdmf())
1225 d.addCallback(lambda ignored:
1226 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1227 def _check_servermap(sm):
1229 self.failUnlessEqual(len(sm.update_data), 10)
1231 for data in sm.update_data.itervalues():
1232 self.failUnlessEqual(len(data), 1)
1233 d.addCallback(_check_servermap)
1237 def test_servermapupdater_finds_sdmf_files(self):
1238 d = defer.succeed(None)
1239 d.addCallback(lambda ignored:
1240 self.publish_sdmf())
1241 d.addCallback(lambda ignored:
1242 self.make_servermap(mode=MODE_CHECK))
1243 d.addCallback(lambda servermap:
1244 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1248 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1250 return self.publish_one()
1252 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1254 oldmap = ServerMap()
1256 sb = self._storage_broker
1257 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1261 def abbrev_verinfo(self, verinfo):
1264 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1265 offsets_tuple) = verinfo
1266 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1268 def abbrev_verinfo_dict(self, verinfo_d):
1270 for verinfo,value in verinfo_d.items():
1271 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1272 offsets_tuple) = verinfo
1273 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1276 def dump_servermap(self, servermap):
1277 print "SERVERMAP", servermap
1278 print "RECOVERABLE", [self.abbrev_verinfo(v)
1279 for v in servermap.recoverable_versions()]
1280 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1281 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1283 def do_download(self, servermap, version=None):
1285 version = servermap.best_recoverable_version()
1286 r = Retrieve(self._fn, self._storage_broker, servermap, version)
1287 c = consumer.MemoryConsumer()
1288 d = r.download(consumer=c)
1289 d.addCallback(lambda mc: "".join(mc.chunks))
1293 def test_basic(self):
1294 d = self.make_servermap()
1295 def _do_retrieve(servermap):
1296 self._smap = servermap
1297 #self.dump_servermap(servermap)
1298 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1299 return self.do_download(servermap)
1300 d.addCallback(_do_retrieve)
1301 def _retrieved(new_contents):
1302 self.failUnlessEqual(new_contents, self.CONTENTS)
1303 d.addCallback(_retrieved)
1304 # we should be able to re-use the same servermap, both with and
1305 # without updating it.
1306 d.addCallback(lambda res: self.do_download(self._smap))
1307 d.addCallback(_retrieved)
1308 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1309 d.addCallback(lambda res: self.do_download(self._smap))
1310 d.addCallback(_retrieved)
1311 # clobbering the pubkey should make the servermap updater re-fetch it
1312 def _clobber_pubkey(res):
1313 self._fn._pubkey = None
1314 d.addCallback(_clobber_pubkey)
1315 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1316 d.addCallback(lambda res: self.do_download(self._smap))
1317 d.addCallback(_retrieved)
1320 def test_all_shares_vanished(self):
1321 d = self.make_servermap()
1322 def _remove_shares(servermap):
1323 for shares in self._storage._peers.values():
1325 d1 = self.shouldFail(NotEnoughSharesError,
1326 "test_all_shares_vanished",
1327 "ran out of servers",
1328 self.do_download, servermap)
1330 d.addCallback(_remove_shares)
1333 def test_all_shares_vanished_new_servermap(self):
1334 d = self.make_servermap()
1335 def _remove_shares(servermap):
1336 self._version = servermap.best_recoverable_version()
1337 for shares in self._storage._peers.values()[2:]:
1339 return self.make_servermap()
1340 d.addCallback(_remove_shares)
1341 def _check(new_servermap):
1342 d1 = self.shouldFail(NotEnoughSharesError,
1343 "test_all_shares_vanished",
1344 "ran out of servers",
1345 self.do_download, new_servermap, version=self._version)
1347 d.addCallback(_check)
1350 def test_no_servers(self):
1351 sb2 = make_storagebroker(num_peers=0)
1352 # if there are no servers, then a MODE_READ servermap should come
1354 d = self.make_servermap(sb=sb2)
1355 def _check_servermap(servermap):
1356 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1357 self.failIf(servermap.recoverable_versions())
1358 self.failIf(servermap.unrecoverable_versions())
1359 self.failIf(servermap.all_servers())
1360 d.addCallback(_check_servermap)
1363 def test_no_servers_download(self):
1364 sb2 = make_storagebroker(num_peers=0)
1365 self._fn._storage_broker = sb2
1366 d = self.shouldFail(UnrecoverableFileError,
1367 "test_no_servers_download",
1368 "no recoverable versions",
1369 self._fn.download_best_version)
1371 # a failed download that occurs while we aren't connected to
1372 # anybody should not prevent a subsequent download from working.
1373 # This isn't quite the webapi-driven test that #463 wants, but it
1374 # should be close enough.
1375 self._fn._storage_broker = self._storage_broker
1376 return self._fn.download_best_version()
1377 def _retrieved(new_contents):
1378 self.failUnlessEqual(new_contents, self.CONTENTS)
1379 d.addCallback(_restore)
1380 d.addCallback(_retrieved)
1384 def _test_corrupt_all(self, offset, substring,
1385 should_succeed=False,
1387 failure_checker=None,
1388 fetch_privkey=False):
1389 d = defer.succeed(None)
1391 d.addCallback(corrupt, self._storage, offset)
1392 d.addCallback(lambda res: self.make_servermap())
1393 if not corrupt_early:
1394 d.addCallback(corrupt, self._storage, offset)
1395 def _do_retrieve(servermap):
1396 ver = servermap.best_recoverable_version()
1397 if ver is None and not should_succeed:
1398 # no recoverable versions == not succeeding. The problem
1399 # should be noted in the servermap's list of problems.
1401 allproblems = [str(f) for f in servermap.get_problems()]
1402 self.failUnlessIn(substring, "".join(allproblems))
1405 d1 = self._fn.download_version(servermap, ver,
1407 d1.addCallback(lambda new_contents:
1408 self.failUnlessEqual(new_contents, self.CONTENTS))
1410 d1 = self.shouldFail(NotEnoughSharesError,
1411 "_corrupt_all(offset=%s)" % (offset,),
1413 self._fn.download_version, servermap,
1417 d1.addCallback(failure_checker)
1418 d1.addCallback(lambda res: servermap)
1420 d.addCallback(_do_retrieve)
1423 def test_corrupt_all_verbyte(self):
1424 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1425 # error in unpack_share().
1426 d = self._test_corrupt_all(0, "UnknownVersionError")
1427 def _check_servermap(servermap):
1428 # and the dump should mention the problems
1430 dump = servermap.dump(s).getvalue()
1431 self.failUnless("30 PROBLEMS" in dump, dump)
1432 d.addCallback(_check_servermap)
1435 def test_corrupt_all_seqnum(self):
1436 # a corrupt sequence number will trigger a bad signature
1437 return self._test_corrupt_all(1, "signature is invalid")
1439 def test_corrupt_all_R(self):
1440 # a corrupt root hash will trigger a bad signature
1441 return self._test_corrupt_all(9, "signature is invalid")
1443 def test_corrupt_all_IV(self):
1444 # a corrupt salt/IV will trigger a bad signature
1445 return self._test_corrupt_all(41, "signature is invalid")
1447 def test_corrupt_all_k(self):
1448 # a corrupt 'k' will trigger a bad signature
1449 return self._test_corrupt_all(57, "signature is invalid")
1451 def test_corrupt_all_N(self):
1452 # a corrupt 'N' will trigger a bad signature
1453 return self._test_corrupt_all(58, "signature is invalid")
1455 def test_corrupt_all_segsize(self):
1456 # a corrupt segsize will trigger a bad signature
1457 return self._test_corrupt_all(59, "signature is invalid")
1459 def test_corrupt_all_datalen(self):
1460 # a corrupt data length will trigger a bad signature
1461 return self._test_corrupt_all(67, "signature is invalid")
1463 def test_corrupt_all_pubkey(self):
1464 # a corrupt pubkey won't match the URI's fingerprint. We need to
1465 # remove the pubkey from the filenode, or else it won't bother trying
1467 self._fn._pubkey = None
1468 return self._test_corrupt_all("pubkey",
1469 "pubkey doesn't match fingerprint")
1471 def test_corrupt_all_sig(self):
1472 # a corrupt signature is a bad one
1473 # the signature runs from about [543:799], depending upon the length
1475 return self._test_corrupt_all("signature", "signature is invalid")
1477 def test_corrupt_all_share_hash_chain_number(self):
1478 # a corrupt share hash chain entry will show up as a bad hash. If we
1479 # mangle the first byte, that will look like a bad hash number,
1480 # causing an IndexError
1481 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1483 def test_corrupt_all_share_hash_chain_hash(self):
1484 # a corrupt share hash chain entry will show up as a bad hash. If we
1485 # mangle a few bytes in, that will look like a bad hash.
1486 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1488 def test_corrupt_all_block_hash_tree(self):
1489 return self._test_corrupt_all("block_hash_tree",
1490 "block hash tree failure")
1492 def test_corrupt_all_block(self):
1493 return self._test_corrupt_all("share_data", "block hash tree failure")
1495 def test_corrupt_all_encprivkey(self):
1496 # a corrupted privkey won't even be noticed by the reader, only by a
1498 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1501 def test_corrupt_all_encprivkey_late(self):
1502 # this should work for the same reason as above, but we corrupt
1503 # after the servermap update to exercise the error handling
1505 # We need to remove the privkey from the node, or the retrieve
1506 # process won't know to update it.
1507 self._fn._privkey = None
1508 return self._test_corrupt_all("enc_privkey",
1509 None, # this shouldn't fail
1510 should_succeed=True,
1511 corrupt_early=False,
1515 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1516 # just use a .todo because the failing-but-ignored test emits about 30kB
1518 def OFF_test_corrupt_all_seqnum_late(self):
1519 # corrupting the seqnum between mapupdate and retrieve should result
1520 # in NotEnoughSharesError, since each share will look invalid
1523 self.failUnless(f.check(NotEnoughSharesError))
1524 self.failUnless("uncoordinated write" in str(f))
1525 return self._test_corrupt_all(1, "ran out of servers",
1526 corrupt_early=False,
1527 failure_checker=_check)
1530 def test_corrupt_all_block_late(self):
1533 self.failUnless(f.check(NotEnoughSharesError))
1534 return self._test_corrupt_all("share_data", "block hash tree failure",
1535 corrupt_early=False,
1536 failure_checker=_check)
1539 def test_basic_pubkey_at_end(self):
1540 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1541 # download to succeed but forcing a bunch of retries first. Note that
1542 # this is rather pessimistic: our Retrieve process will throw away
1543 # the whole share if the pubkey is bad, even though the rest of the
1544 # share might be good.
1546 self._fn._pubkey = None
1547 k = self._fn.get_required_shares()
1548 N = self._fn.get_total_shares()
1549 d = defer.succeed(None)
1550 d.addCallback(corrupt, self._storage, "pubkey",
1551 shnums_to_corrupt=range(0, N-k))
1552 d.addCallback(lambda res: self.make_servermap())
1553 def _do_retrieve(servermap):
1554 self.failUnless(servermap.get_problems())
1555 self.failUnless("pubkey doesn't match fingerprint"
1556 in str(servermap.get_problems()[0]))
1557 ver = servermap.best_recoverable_version()
1558 r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1559 c = consumer.MemoryConsumer()
1560 return r.download(c)
1561 d.addCallback(_do_retrieve)
1562 d.addCallback(lambda mc: "".join(mc.chunks))
1563 d.addCallback(lambda new_contents:
1564 self.failUnlessEqual(new_contents, self.CONTENTS))
1568 def _test_corrupt_some(self, offset, mdmf=False):
1570 d = self.publish_mdmf()
1572 d = defer.succeed(None)
1573 d.addCallback(lambda ignored:
1574 corrupt(None, self._storage, offset, range(5)))
1575 d.addCallback(lambda ignored:
1576 self.make_servermap())
1577 def _do_retrieve(servermap):
1578 ver = servermap.best_recoverable_version()
1579 self.failUnless(ver)
1580 return self._fn.download_best_version()
1581 d.addCallback(_do_retrieve)
1582 d.addCallback(lambda new_contents:
1583 self.failUnlessEqual(new_contents, self.CONTENTS))
1587 def test_corrupt_some(self):
1588 # corrupt the data of first five shares (so the servermap thinks
1589 # they're good but retrieve marks them as bad), so that the
1590 # MODE_READ set of 6 will be insufficient, forcing node.download to
1591 # retry with more servers.
1592 return self._test_corrupt_some("share_data")
1595 def test_download_fails(self):
1596 d = corrupt(None, self._storage, "signature")
1597 d.addCallback(lambda ignored:
1598 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1599 "no recoverable versions",
1600 self._fn.download_best_version))
1605 def test_corrupt_mdmf_block_hash_tree(self):
1606 d = self.publish_mdmf()
1607 d.addCallback(lambda ignored:
1608 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1609 "block hash tree failure",
1611 should_succeed=False))
1615 def test_corrupt_mdmf_block_hash_tree_late(self):
1616 # Note - there is no SDMF counterpart to this test, as the SDMF
1617 # files are guaranteed to have exactly one block, and therefore
1618 # the block hash tree fits within the initial read (#1240).
1619 d = self.publish_mdmf()
1620 d.addCallback(lambda ignored:
1621 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1622 "block hash tree failure",
1623 corrupt_early=False,
1624 should_succeed=False))
1628 def test_corrupt_mdmf_share_data(self):
1629 d = self.publish_mdmf()
1630 d.addCallback(lambda ignored:
1631 # TODO: Find out what the block size is and corrupt a
1632 # specific block, rather than just guessing.
1633 self._test_corrupt_all(("share_data", 12 * 40),
1634 "block hash tree failure",
1636 should_succeed=False))
1640 def test_corrupt_some_mdmf(self):
1641 return self._test_corrupt_some(("share_data", 12 * 40),
1646 def check_good(self, r, where):
1647 self.failUnless(r.is_healthy(), where)
1650 def check_bad(self, r, where):
1651 self.failIf(r.is_healthy(), where)
1654 def check_expected_failure(self, r, expected_exception, substring, where):
1655 for (peerid, storage_index, shnum, f) in r.get_share_problems():
1656 if f.check(expected_exception):
1657 self.failUnless(substring in str(f),
1658 "%s: substring '%s' not in '%s'" %
1659 (where, substring, str(f)))
1661 self.fail("%s: didn't see expected exception %s in problems %s" %
1662 (where, expected_exception, r.get_share_problems()))
1665 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1667 return self.publish_one()
1670 def test_check_good(self):
1671 d = self._fn.check(Monitor())
1672 d.addCallback(self.check_good, "test_check_good")
1675 def test_check_mdmf_good(self):
1676 d = self.publish_mdmf()
1677 d.addCallback(lambda ignored:
1678 self._fn.check(Monitor()))
1679 d.addCallback(self.check_good, "test_check_mdmf_good")
1682 def test_check_no_shares(self):
1683 for shares in self._storage._peers.values():
1685 d = self._fn.check(Monitor())
1686 d.addCallback(self.check_bad, "test_check_no_shares")
1689 def test_check_mdmf_no_shares(self):
1690 d = self.publish_mdmf()
1692 for share in self._storage._peers.values():
1694 d.addCallback(_then)
1695 d.addCallback(lambda ignored:
1696 self._fn.check(Monitor()))
1697 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1700 def test_check_not_enough_shares(self):
1701 for shares in self._storage._peers.values():
1702 for shnum in shares.keys():
1705 d = self._fn.check(Monitor())
1706 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1709 def test_check_mdmf_not_enough_shares(self):
1710 d = self.publish_mdmf()
1712 for shares in self._storage._peers.values():
1713 for shnum in shares.keys():
1716 d.addCallback(_then)
1717 d.addCallback(lambda ignored:
1718 self._fn.check(Monitor()))
1719 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1723 def test_check_all_bad_sig(self):
1724 d = corrupt(None, self._storage, 1) # bad sig
1725 d.addCallback(lambda ignored:
1726 self._fn.check(Monitor()))
1727 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1730 def test_check_mdmf_all_bad_sig(self):
1731 d = self.publish_mdmf()
1732 d.addCallback(lambda ignored:
1733 corrupt(None, self._storage, 1))
1734 d.addCallback(lambda ignored:
1735 self._fn.check(Monitor()))
1736 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1739 def test_verify_mdmf_all_bad_sharedata(self):
1740 d = self.publish_mdmf()
1741 # On 8 of the shares, corrupt the beginning of the share data.
1742 # The signature check during the servermap update won't catch this.
1743 d.addCallback(lambda ignored:
1744 corrupt(None, self._storage, "share_data", range(8)))
1745 # On 2 of the shares, corrupt the end of the share data.
1746 # The signature check during the servermap update won't catch
1747 # this either, and the retrieval process will have to process
1748 # all of the segments before it notices.
1749 d.addCallback(lambda ignored:
1750 # the block hash tree comes right after the share data, so if we
1751 # corrupt a little before the block hash tree, we'll corrupt in the
1752 # last block of each share.
1753 corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1754 d.addCallback(lambda ignored:
1755 self._fn.check(Monitor(), verify=True))
1756 # The verifier should flag the file as unhealthy, and should
1757 # list all 10 shares as bad.
1758 d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1759 def _check_num_bad(r):
1760 self.failIf(r.is_recoverable())
1761 smap = r.get_servermap()
1762 self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1763 d.addCallback(_check_num_bad)
1766 def test_check_all_bad_blocks(self):
1767 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1768 # the Checker won't notice this.. it doesn't look at actual data
1769 d.addCallback(lambda ignored:
1770 self._fn.check(Monitor()))
1771 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1775 def test_check_mdmf_all_bad_blocks(self):
1776 d = self.publish_mdmf()
1777 d.addCallback(lambda ignored:
1778 corrupt(None, self._storage, "share_data"))
1779 d.addCallback(lambda ignored:
1780 self._fn.check(Monitor()))
1781 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1784 def test_verify_good(self):
1785 d = self._fn.check(Monitor(), verify=True)
1786 d.addCallback(self.check_good, "test_verify_good")
1789 def test_verify_all_bad_sig(self):
1790 d = corrupt(None, self._storage, 1) # bad sig
1791 d.addCallback(lambda ignored:
1792 self._fn.check(Monitor(), verify=True))
1793 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1796 def test_verify_one_bad_sig(self):
1797 d = corrupt(None, self._storage, 1, [9]) # bad sig
1798 d.addCallback(lambda ignored:
1799 self._fn.check(Monitor(), verify=True))
1800 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1803 def test_verify_one_bad_block(self):
1804 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1805 # the Verifier *will* notice this, since it examines every byte
1806 d.addCallback(lambda ignored:
1807 self._fn.check(Monitor(), verify=True))
1808 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1809 d.addCallback(self.check_expected_failure,
1810 CorruptShareError, "block hash tree failure",
1811 "test_verify_one_bad_block")
1814 def test_verify_one_bad_sharehash(self):
1815 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1816 d.addCallback(lambda ignored:
1817 self._fn.check(Monitor(), verify=True))
1818 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1819 d.addCallback(self.check_expected_failure,
1820 CorruptShareError, "corrupt hashes",
1821 "test_verify_one_bad_sharehash")
1824 def test_verify_one_bad_encprivkey(self):
1825 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1826 d.addCallback(lambda ignored:
1827 self._fn.check(Monitor(), verify=True))
1828 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1829 d.addCallback(self.check_expected_failure,
1830 CorruptShareError, "invalid privkey",
1831 "test_verify_one_bad_encprivkey")
1834 def test_verify_one_bad_encprivkey_uncheckable(self):
1835 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1836 readonly_fn = self._fn.get_readonly()
1837 # a read-only node has no way to validate the privkey
1838 d.addCallback(lambda ignored:
1839 readonly_fn.check(Monitor(), verify=True))
1840 d.addCallback(self.check_good,
1841 "test_verify_one_bad_encprivkey_uncheckable")
1845 def test_verify_mdmf_good(self):
1846 d = self.publish_mdmf()
1847 d.addCallback(lambda ignored:
1848 self._fn.check(Monitor(), verify=True))
1849 d.addCallback(self.check_good, "test_verify_mdmf_good")
1853 def test_verify_mdmf_one_bad_block(self):
1854 d = self.publish_mdmf()
1855 d.addCallback(lambda ignored:
1856 corrupt(None, self._storage, "share_data", [1]))
1857 d.addCallback(lambda ignored:
1858 self._fn.check(Monitor(), verify=True))
1859 # We should find one bad block here
1860 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1861 d.addCallback(self.check_expected_failure,
1862 CorruptShareError, "block hash tree failure",
1863 "test_verify_mdmf_one_bad_block")
1867 def test_verify_mdmf_bad_encprivkey(self):
1868 d = self.publish_mdmf()
1869 d.addCallback(lambda ignored:
1870 corrupt(None, self._storage, "enc_privkey", [0]))
1871 d.addCallback(lambda ignored:
1872 self._fn.check(Monitor(), verify=True))
1873 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1874 d.addCallback(self.check_expected_failure,
1875 CorruptShareError, "privkey",
1876 "test_verify_mdmf_bad_encprivkey")
1880 def test_verify_mdmf_bad_sig(self):
1881 d = self.publish_mdmf()
1882 d.addCallback(lambda ignored:
1883 corrupt(None, self._storage, 1, [1]))
1884 d.addCallback(lambda ignored:
1885 self._fn.check(Monitor(), verify=True))
1886 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1890 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1891 d = self.publish_mdmf()
1892 d.addCallback(lambda ignored:
1893 corrupt(None, self._storage, "enc_privkey", [1]))
1894 d.addCallback(lambda ignored:
1895 self._fn.get_readonly())
1896 d.addCallback(lambda fn:
1897 fn.check(Monitor(), verify=True))
1898 d.addCallback(self.check_good,
1899 "test_verify_mdmf_bad_encprivkey_uncheckable")
1903 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1905 def get_shares(self, s):
1906 all_shares = {} # maps (peerid, shnum) to share data
1907 for peerid in s._peers:
1908 shares = s._peers[peerid]
1909 for shnum in shares:
1910 data = shares[shnum]
1911 all_shares[ (peerid, shnum) ] = data
1914 def copy_shares(self, ignored=None):
1915 self.old_shares.append(self.get_shares(self._storage))
1917 def test_repair_nop(self):
1918 self.old_shares = []
1919 d = self.publish_one()
1920 d.addCallback(self.copy_shares)
1921 d.addCallback(lambda res: self._fn.check(Monitor()))
1922 d.addCallback(lambda check_results: self._fn.repair(check_results))
1923 def _check_results(rres):
1924 self.failUnless(IRepairResults.providedBy(rres))
1925 self.failUnless(rres.get_successful())
1926 # TODO: examine results
1930 initial_shares = self.old_shares[0]
1931 new_shares = self.old_shares[1]
1932 # TODO: this really shouldn't change anything. When we implement
1933 # a "minimal-bandwidth" repairer", change this test to assert:
1934 #self.failUnlessEqual(new_shares, initial_shares)
1936 # all shares should be in the same place as before
1937 self.failUnlessEqual(set(initial_shares.keys()),
1938 set(new_shares.keys()))
1939 # but they should all be at a newer seqnum. The IV will be
1940 # different, so the roothash will be too.
1941 for key in initial_shares:
1946 k0, N0, segsize0, datalen0,
1947 o0) = unpack_header(initial_shares[key])
1952 k1, N1, segsize1, datalen1,
1953 o1) = unpack_header(new_shares[key])
1954 self.failUnlessEqual(version0, version1)
1955 self.failUnlessEqual(seqnum0+1, seqnum1)
1956 self.failUnlessEqual(k0, k1)
1957 self.failUnlessEqual(N0, N1)
1958 self.failUnlessEqual(segsize0, segsize1)
1959 self.failUnlessEqual(datalen0, datalen1)
1960 d.addCallback(_check_results)
1963 def failIfSharesChanged(self, ignored=None):
1964 old_shares = self.old_shares[-2]
1965 current_shares = self.old_shares[-1]
1966 self.failUnlessEqual(old_shares, current_shares)
1969 def _test_whether_repairable(self, publisher, nshares, expected_result):
1971 def _delete_some_shares(ign):
1972 shares = self._storage._peers
1973 for peerid in shares:
1974 for shnum in list(shares[peerid]):
1975 if shnum >= nshares:
1976 del shares[peerid][shnum]
1977 d.addCallback(_delete_some_shares)
1978 d.addCallback(lambda ign: self._fn.check(Monitor()))
1980 self.failIf(cr.is_healthy())
1981 self.failUnlessEqual(cr.is_recoverable(), expected_result)
1983 d.addCallback(_check)
1984 d.addCallback(lambda check_results: self._fn.repair(check_results))
1985 d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
1988 def test_unrepairable_0shares(self):
1989 return self._test_whether_repairable(self.publish_one, 0, False)
1991 def test_mdmf_unrepairable_0shares(self):
1992 return self._test_whether_repairable(self.publish_mdmf, 0, False)
1994 def test_unrepairable_1share(self):
1995 return self._test_whether_repairable(self.publish_one, 1, False)
1997 def test_mdmf_unrepairable_1share(self):
1998 return self._test_whether_repairable(self.publish_mdmf, 1, False)
2000 def test_repairable_5shares(self):
2001 return self._test_whether_repairable(self.publish_one, 5, True)
2003 def test_mdmf_repairable_5shares(self):
2004 return self._test_whether_repairable(self.publish_mdmf, 5, True)
2006 def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
2008 Like the _test_whether_repairable tests, but invoking check_and_repair
2009 instead of invoking check and then invoking repair.
2012 def _delete_some_shares(ign):
2013 shares = self._storage._peers
2014 for peerid in shares:
2015 for shnum in list(shares[peerid]):
2016 if shnum >= nshares:
2017 del shares[peerid][shnum]
2018 d.addCallback(_delete_some_shares)
2019 d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
2020 d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
2023 def test_unrepairable_0shares_checkandrepair(self):
2024 return self._test_whether_checkandrepairable(self.publish_one, 0, False)
2026 def test_mdmf_unrepairable_0shares_checkandrepair(self):
2027 return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
2029 def test_unrepairable_1share_checkandrepair(self):
2030 return self._test_whether_checkandrepairable(self.publish_one, 1, False)
2032 def test_mdmf_unrepairable_1share_checkandrepair(self):
2033 return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
2035 def test_repairable_5shares_checkandrepair(self):
2036 return self._test_whether_checkandrepairable(self.publish_one, 5, True)
2038 def test_mdmf_repairable_5shares_checkandrepair(self):
2039 return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
2042 def test_merge(self):
2043 self.old_shares = []
2044 d = self.publish_multiple()
2045 # repair will refuse to merge multiple highest seqnums unless you
2047 d.addCallback(lambda res:
2048 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2049 1:4,3:4,5:4,7:4,9:4}))
2050 d.addCallback(self.copy_shares)
2051 d.addCallback(lambda res: self._fn.check(Monitor()))
2052 def _try_repair(check_results):
2053 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2054 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2055 self._fn.repair, check_results)
2056 d2.addCallback(self.copy_shares)
2057 d2.addCallback(self.failIfSharesChanged)
2058 d2.addCallback(lambda res: check_results)
2060 d.addCallback(_try_repair)
2061 d.addCallback(lambda check_results:
2062 self._fn.repair(check_results, force=True))
2063 # this should give us 10 shares of the highest roothash
2064 def _check_repair_results(rres):
2065 self.failUnless(rres.get_successful())
2067 d.addCallback(_check_repair_results)
2068 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2069 def _check_smap(smap):
2070 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2071 self.failIf(smap.unrecoverable_versions())
2072 # now, which should have won?
2073 roothash_s4a = self.get_roothash_for(3)
2074 roothash_s4b = self.get_roothash_for(4)
2075 if roothash_s4b > roothash_s4a:
2076 expected_contents = self.CONTENTS[4]
2078 expected_contents = self.CONTENTS[3]
2079 new_versionid = smap.best_recoverable_version()
2080 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2081 d2 = self._fn.download_version(smap, new_versionid)
2082 d2.addCallback(self.failUnlessEqual, expected_contents)
2084 d.addCallback(_check_smap)
2087 def test_non_merge(self):
2088 self.old_shares = []
2089 d = self.publish_multiple()
2090 # repair should not refuse a repair that doesn't need to merge. In
2091 # this case, we combine v2 with v3. The repair should ignore v2 and
2092 # copy v3 into a new v5.
2093 d.addCallback(lambda res:
2094 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2095 1:3,3:3,5:3,7:3,9:3}))
2096 d.addCallback(lambda res: self._fn.check(Monitor()))
2097 d.addCallback(lambda check_results: self._fn.repair(check_results))
2098 # this should give us 10 shares of v3
2099 def _check_repair_results(rres):
2100 self.failUnless(rres.get_successful())
2102 d.addCallback(_check_repair_results)
2103 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2104 def _check_smap(smap):
2105 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2106 self.failIf(smap.unrecoverable_versions())
2107 # now, which should have won?
2108 expected_contents = self.CONTENTS[3]
2109 new_versionid = smap.best_recoverable_version()
2110 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2111 d2 = self._fn.download_version(smap, new_versionid)
2112 d2.addCallback(self.failUnlessEqual, expected_contents)
2114 d.addCallback(_check_smap)
2117 def get_roothash_for(self, index):
2118 # return the roothash for the first share we see in the saved set
2119 shares = self._copied_shares[index]
2120 for peerid in shares:
2121 for shnum in shares[peerid]:
2122 share = shares[peerid][shnum]
2123 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2124 unpack_header(share)
2127 def test_check_and_repair_readcap(self):
2128 # we can't currently repair from a mutable readcap: #625
2129 self.old_shares = []
2130 d = self.publish_one()
2131 d.addCallback(self.copy_shares)
2132 def _get_readcap(res):
2133 self._fn3 = self._fn.get_readonly()
2134 # also delete some shares
2135 for peerid,shares in self._storage._peers.items():
2137 d.addCallback(_get_readcap)
2138 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2139 def _check_results(crr):
2140 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2141 # we should detect the unhealthy, but skip over mutable-readcap
2142 # repairs until #625 is fixed
2143 self.failIf(crr.get_pre_repair_results().is_healthy())
2144 self.failIf(crr.get_repair_attempted())
2145 self.failIf(crr.get_post_repair_results().is_healthy())
2146 d.addCallback(_check_results)
2149 def test_repair_empty(self):
2150 # bug 1689: delete one share of an empty mutable file, then repair.
2151 # In the buggy version, the check that precedes the retrieve+publish
2152 # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
2153 # privkey that repair needs.
2154 d = self.publish_empty_sdmf()
2155 def _delete_one_share(ign):
2156 shares = self._storage._peers
2157 for peerid in shares:
2158 for shnum in list(shares[peerid]):
2160 del shares[peerid][shnum]
2161 d.addCallback(_delete_one_share)
2162 d.addCallback(lambda ign: self._fn2.check(Monitor()))
2163 d.addCallback(lambda check_results: self._fn2.repair(check_results))
2165 self.failUnlessEqual(crr.get_successful(), True)
2166 d.addCallback(_check)
2169 class DevNullDictionary(dict):
2170 def __setitem__(self, key, value):
2173 class MultipleEncodings(unittest.TestCase):
2175 self.CONTENTS = "New contents go here"
2176 self.uploadable = MutableData(self.CONTENTS)
2177 self._storage = FakeStorage()
2178 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2179 self._storage_broker = self._nodemaker.storage_broker
2180 d = self._nodemaker.create_mutable_file(self.uploadable)
2183 d.addCallback(_created)
2186 def _encode(self, k, n, data, version=SDMF_VERSION):
2187 # encode 'data' into a peerid->shares dict.
2190 # disable the nodecache, since for these tests we explicitly need
2191 # multiple nodes pointing at the same file
2192 self._nodemaker._node_cache = DevNullDictionary()
2193 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2194 # then we copy over other fields that are normally fetched from the
2196 fn2._pubkey = fn._pubkey
2197 fn2._privkey = fn._privkey
2198 fn2._encprivkey = fn._encprivkey
2199 # and set the encoding parameters to something completely different
2200 fn2._required_shares = k
2201 fn2._total_shares = n
2204 s._peers = {} # clear existing storage
2205 p2 = Publish(fn2, self._storage_broker, None)
2206 uploadable = MutableData(data)
2207 d = p2.publish(uploadable)
2208 def _published(res):
2212 d.addCallback(_published)
2215 def make_servermap(self, mode=MODE_READ, oldmap=None):
2217 oldmap = ServerMap()
2218 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2223 def test_multiple_encodings(self):
2224 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2225 # then mix up the shares, to make sure that download survives seeing
2226 # a variety of encodings. This is actually kind of tricky to set up.
2228 contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
2229 contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
2230 contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
2232 # we make a retrieval object that doesn't know what encoding
2234 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2236 # now we upload a file through fn1, and grab its shares
2237 d = self._encode(3, 10, contents1)
2238 def _encoded_1(shares):
2239 self._shares1 = shares
2240 d.addCallback(_encoded_1)
2241 d.addCallback(lambda res: self._encode(4, 9, contents2))
2242 def _encoded_2(shares):
2243 self._shares2 = shares
2244 d.addCallback(_encoded_2)
2245 d.addCallback(lambda res: self._encode(4, 7, contents3))
2246 def _encoded_3(shares):
2247 self._shares3 = shares
2248 d.addCallback(_encoded_3)
2251 log.msg("merging sharelists")
2252 # we merge the shares from the two sets, leaving each shnum in
2253 # its original location, but using a share from set1 or set2
2254 # according to the following sequence:
2265 # so that neither form can be recovered until fetch [f], at which
2266 # point version-s1 (the 3-of-10 form) should be recoverable. If
2267 # the implementation latches on to the first version it sees,
2268 # then s2 will be recoverable at fetch [g].
2270 # Later, when we implement code that handles multiple versions,
2271 # we can use this framework to assert that all recoverable
2272 # versions are retrieved, and test that 'epsilon' does its job
2274 places = [2, 2, 3, 2, 1, 1, 1, 2]
2277 sb = self._storage_broker
2279 for peerid in sorted(sb.get_all_serverids()):
2280 for shnum in self._shares1.get(peerid, {}):
2281 if shnum < len(places):
2282 which = places[shnum]
2285 self._storage._peers[peerid] = peers = {}
2286 in_1 = shnum in self._shares1[peerid]
2287 in_2 = shnum in self._shares2.get(peerid, {})
2288 in_3 = shnum in self._shares3.get(peerid, {})
2291 peers[shnum] = self._shares1[peerid][shnum]
2292 sharemap[shnum] = peerid
2295 peers[shnum] = self._shares2[peerid][shnum]
2296 sharemap[shnum] = peerid
2299 peers[shnum] = self._shares3[peerid][shnum]
2300 sharemap[shnum] = peerid
2302 # we don't bother placing any other shares
2303 # now sort the sequence so that share 0 is returned first
2304 new_sequence = [sharemap[shnum]
2305 for shnum in sorted(sharemap.keys())]
2306 self._storage._sequence = new_sequence
2307 log.msg("merge done")
2308 d.addCallback(_merge)
2309 d.addCallback(lambda res: fn3.download_best_version())
2310 def _retrieved(new_contents):
2311 # the current specified behavior is "first version recoverable"
2312 self.failUnlessEqual(new_contents, contents1)
2313 d.addCallback(_retrieved)
2317 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2320 return self.publish_multiple()
2322 def test_multiple_versions(self):
2323 # if we see a mix of versions in the grid, download_best_version
2324 # should get the latest one
2325 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2326 d = self._fn.download_best_version()
2327 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2328 # and the checker should report problems
2329 d.addCallback(lambda res: self._fn.check(Monitor()))
2330 d.addCallback(self.check_bad, "test_multiple_versions")
2332 # but if everything is at version 2, that's what we should download
2333 d.addCallback(lambda res:
2334 self._set_versions(dict([(i,2) for i in range(10)])))
2335 d.addCallback(lambda res: self._fn.download_best_version())
2336 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2337 # if exactly one share is at version 3, we should still get v2
2338 d.addCallback(lambda res:
2339 self._set_versions({0:3}))
2340 d.addCallback(lambda res: self._fn.download_best_version())
2341 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2342 # but the servermap should see the unrecoverable version. This
2343 # depends upon the single newer share being queried early.
2344 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2345 def _check_smap(smap):
2346 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2347 newer = smap.unrecoverable_newer_versions()
2348 self.failUnlessEqual(len(newer), 1)
2349 verinfo, health = newer.items()[0]
2350 self.failUnlessEqual(verinfo[0], 4)
2351 self.failUnlessEqual(health, (1,3))
2352 self.failIf(smap.needs_merge())
2353 d.addCallback(_check_smap)
2354 # if we have a mix of two parallel versions (s4a and s4b), we could
2356 d.addCallback(lambda res:
2357 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2358 1:4,3:4,5:4,7:4,9:4}))
2359 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2360 def _check_smap_mixed(smap):
2361 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2362 newer = smap.unrecoverable_newer_versions()
2363 self.failUnlessEqual(len(newer), 0)
2364 self.failUnless(smap.needs_merge())
2365 d.addCallback(_check_smap_mixed)
2366 d.addCallback(lambda res: self._fn.download_best_version())
2367 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2368 res == self.CONTENTS[4]))
2371 def test_replace(self):
2372 # if we see a mix of versions in the grid, we should be able to
2373 # replace them all with a newer version
2375 # if exactly one share is at version 3, we should download (and
2376 # replace) v2, and the result should be v4. Note that the index we
2377 # give to _set_versions is different than the sequence number.
2378 target = dict([(i,2) for i in range(10)]) # seqnum3
2379 target[0] = 3 # seqnum4
2380 self._set_versions(target)
2382 def _modify(oldversion, servermap, first_time):
2383 return oldversion + " modified"
2384 d = self._fn.modify(_modify)
2385 d.addCallback(lambda res: self._fn.download_best_version())
2386 expected = self.CONTENTS[2] + " modified"
2387 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2388 # and the servermap should indicate that the outlier was replaced too
2389 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2390 def _check_smap(smap):
2391 self.failUnlessEqual(smap.highest_seqnum(), 5)
2392 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2393 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2394 d.addCallback(_check_smap)
2398 class Exceptions(unittest.TestCase):
2399 def test_repr(self):
2400 nmde = NeedMoreDataError(100, 50, 100)
2401 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2402 ucwe = UncoordinatedWriteError()
2403 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2406 class SameKeyGenerator:
2407 def __init__(self, pubkey, privkey):
2408 self.pubkey = pubkey
2409 self.privkey = privkey
2410 def generate(self, keysize=None):
2411 return defer.succeed( (self.pubkey, self.privkey) )
2413 class FirstServerGetsKilled:
2415 def notify(self, retval, wrapper, methname):
2417 wrapper.broken = True
2421 class FirstServerGetsDeleted:
2424 self.silenced = None
2425 def notify(self, retval, wrapper, methname):
2427 # this query will work, but later queries should think the share
2430 self.silenced = wrapper
2432 if wrapper == self.silenced:
2433 assert methname == "slot_testv_and_readv_and_writev"
2437 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2438 def do_publish_surprise(self, version):
2439 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2441 nm = self.g.clients[0].nodemaker
2442 d = nm.create_mutable_file(MutableData("contents 1"),
2445 d = defer.succeed(None)
2446 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2447 def _got_smap1(smap):
2448 # stash the old state of the file
2450 d.addCallback(_got_smap1)
2451 # then modify the file, leaving the old map untouched
2452 d.addCallback(lambda res: log.msg("starting winning write"))
2453 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2454 # now attempt to modify the file with the old servermap. This
2455 # will look just like an uncoordinated write, in which every
2456 # single share got updated between our mapupdate and our publish
2457 d.addCallback(lambda res: log.msg("starting doomed write"))
2458 d.addCallback(lambda res:
2459 self.shouldFail(UncoordinatedWriteError,
2460 "test_publish_surprise", None,
2462 MutableData("contents 2a"), self.old_map))
2464 d.addCallback(_created)
2467 def test_publish_surprise_sdmf(self):
2468 return self.do_publish_surprise(SDMF_VERSION)
2470 def test_publish_surprise_mdmf(self):
2471 return self.do_publish_surprise(MDMF_VERSION)
2473 def test_retrieve_surprise(self):
2474 self.basedir = "mutable/Problems/test_retrieve_surprise"
2476 nm = self.g.clients[0].nodemaker
2477 d = nm.create_mutable_file(MutableData("contents 1"*4000))
2479 d = defer.succeed(None)
2480 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2481 def _got_smap1(smap):
2482 # stash the old state of the file
2484 d.addCallback(_got_smap1)
2485 # then modify the file, leaving the old map untouched
2486 d.addCallback(lambda res: log.msg("starting winning write"))
2487 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2488 # now attempt to retrieve the old version with the old servermap.
2489 # This will look like someone has changed the file since we
2490 # updated the servermap.
2491 d.addCallback(lambda res: log.msg("starting doomed read"))
2492 d.addCallback(lambda res:
2493 self.shouldFail(NotEnoughSharesError,
2494 "test_retrieve_surprise",
2495 "ran out of servers: have 0 of 1",
2498 self.old_map.best_recoverable_version(),
2501 d.addCallback(_created)
2505 def test_unexpected_shares(self):
2506 # upload the file, take a servermap, shut down one of the servers,
2507 # upload it again (causing shares to appear on a new server), then
2508 # upload using the old servermap. The last upload should fail with an
2509 # UncoordinatedWriteError, because of the shares that didn't appear
2511 self.basedir = "mutable/Problems/test_unexpected_shares"
2513 nm = self.g.clients[0].nodemaker
2514 d = nm.create_mutable_file(MutableData("contents 1"))
2516 d = defer.succeed(None)
2517 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2518 def _got_smap1(smap):
2519 # stash the old state of the file
2521 # now shut down one of the servers
2522 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2523 self.g.remove_server(peer0)
2524 # then modify the file, leaving the old map untouched
2525 log.msg("starting winning write")
2526 return n.overwrite(MutableData("contents 2"))
2527 d.addCallback(_got_smap1)
2528 # now attempt to modify the file with the old servermap. This
2529 # will look just like an uncoordinated write, in which every
2530 # single share got updated between our mapupdate and our publish
2531 d.addCallback(lambda res: log.msg("starting doomed write"))
2532 d.addCallback(lambda res:
2533 self.shouldFail(UncoordinatedWriteError,
2534 "test_surprise", None,
2536 MutableData("contents 2a"), self.old_map))
2538 d.addCallback(_created)
2541 def test_multiply_placed_shares(self):
2542 self.basedir = "mutable/Problems/test_multiply_placed_shares"
2544 nm = self.g.clients[0].nodemaker
2545 d = nm.create_mutable_file(MutableData("contents 1"))
2546 # remove one of the servers and reupload the file.
2550 servers = self.g.get_all_serverids()
2551 self.ss = self.g.remove_server(servers[len(servers)-1])
2553 new_server = self.g.make_server(len(servers)-1)
2554 self.g.add_server(len(servers)-1, new_server)
2556 return self._node.download_best_version()
2557 d.addCallback(_created)
2558 d.addCallback(lambda data: MutableData(data))
2559 d.addCallback(lambda data: self._node.overwrite(data))
2561 # restore the server we removed earlier, then download+upload
2563 def _overwritten(ign):
2564 self.g.add_server(len(self.g.servers_by_number), self.ss)
2565 return self._node.download_best_version()
2566 d.addCallback(_overwritten)
2567 d.addCallback(lambda data: MutableData(data))
2568 d.addCallback(lambda data: self._node.overwrite(data))
2569 d.addCallback(lambda ignored:
2570 self._node.get_servermap(MODE_CHECK))
2571 def _overwritten_again(smap):
2572 # Make sure that all shares were updated by making sure that
2573 # there aren't any other versions in the sharemap.
2574 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2575 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2576 d.addCallback(_overwritten_again)
2579 def test_bad_server(self):
2580 # Break one server, then create the file: the initial publish should
2581 # complete with an alternate server. Breaking a second server should
2582 # not prevent an update from succeeding either.
2583 self.basedir = "mutable/Problems/test_bad_server"
2585 nm = self.g.clients[0].nodemaker
2587 # to make sure that one of the initial peers is broken, we have to
2588 # get creative. We create an RSA key and compute its storage-index.
2589 # Then we make a KeyGenerator that always returns that one key, and
2590 # use it to create the mutable file. This will get easier when we can
2591 # use #467 static-server-selection to disable permutation and force
2592 # the choice of server for share[0].
2594 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2595 def _got_key( (pubkey, privkey) ):
2596 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2597 pubkey_s = pubkey.serialize()
2598 privkey_s = privkey.serialize()
2599 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2600 ssk_pubkey_fingerprint_hash(pubkey_s))
2601 self._storage_index = u.get_storage_index()
2602 d.addCallback(_got_key)
2603 def _break_peer0(res):
2604 si = self._storage_index
2605 servers = nm.storage_broker.get_servers_for_psi(si)
2606 self.g.break_server(servers[0].get_serverid())
2607 self.server1 = servers[1]
2608 d.addCallback(_break_peer0)
2609 # now "create" the file, using the pre-established key, and let the
2610 # initial publish finally happen
2611 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2612 # that ought to work
2614 d = n.download_best_version()
2615 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2616 # now break the second peer
2617 def _break_peer1(res):
2618 self.g.break_server(self.server1.get_serverid())
2619 d.addCallback(_break_peer1)
2620 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2621 # that ought to work too
2622 d.addCallback(lambda res: n.download_best_version())
2623 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2624 def _explain_error(f):
2626 if f.check(NotEnoughServersError):
2627 print "first_error:", f.value.first_error
2629 d.addErrback(_explain_error)
2631 d.addCallback(_got_node)
2634 def test_bad_server_overlap(self):
2635 # like test_bad_server, but with no extra unused servers to fall back
2636 # upon. This means that we must re-use a server which we've already
2637 # used. If we don't remember the fact that we sent them one share
2638 # already, we'll mistakenly think we're experiencing an
2639 # UncoordinatedWriteError.
2641 # Break one server, then create the file: the initial publish should
2642 # complete with an alternate server. Breaking a second server should
2643 # not prevent an update from succeeding either.
2644 self.basedir = "mutable/Problems/test_bad_server_overlap"
2646 nm = self.g.clients[0].nodemaker
2647 sb = nm.storage_broker
2649 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2650 self.g.break_server(peerids[0])
2652 d = nm.create_mutable_file(MutableData("contents 1"))
2654 d = n.download_best_version()
2655 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2656 # now break one of the remaining servers
2657 def _break_second_server(res):
2658 self.g.break_server(peerids[1])
2659 d.addCallback(_break_second_server)
2660 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2661 # that ought to work too
2662 d.addCallback(lambda res: n.download_best_version())
2663 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2665 d.addCallback(_created)
2668 def test_publish_all_servers_bad(self):
2669 # Break all servers: the publish should fail
2670 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2672 nm = self.g.clients[0].nodemaker
2673 for s in nm.storage_broker.get_connected_servers():
2674 s.get_rref().broken = True
2676 d = self.shouldFail(NotEnoughServersError,
2677 "test_publish_all_servers_bad",
2678 "ran out of good servers",
2679 nm.create_mutable_file, MutableData("contents"))
2682 def test_publish_no_servers(self):
2683 # no servers at all: the publish should fail
2684 self.basedir = "mutable/Problems/test_publish_no_servers"
2685 self.set_up_grid(num_servers=0)
2686 nm = self.g.clients[0].nodemaker
2688 d = self.shouldFail(NotEnoughServersError,
2689 "test_publish_no_servers",
2690 "Ran out of non-bad servers",
2691 nm.create_mutable_file, MutableData("contents"))
2695 def test_privkey_query_error(self):
2696 # when a servermap is updated with MODE_WRITE, it tries to get the
2697 # privkey. Something might go wrong during this query attempt.
2698 # Exercise the code in _privkey_query_failed which tries to handle
2700 self.basedir = "mutable/Problems/test_privkey_query_error"
2701 self.set_up_grid(num_servers=20)
2702 nm = self.g.clients[0].nodemaker
2703 nm._node_cache = DevNullDictionary() # disable the nodecache
2705 # we need some contents that are large enough to push the privkey out
2706 # of the early part of the file
2707 LARGE = "These are Larger contents" * 2000 # about 50KB
2708 LARGE_uploadable = MutableData(LARGE)
2709 d = nm.create_mutable_file(LARGE_uploadable)
2711 self.uri = n.get_uri()
2712 self.n2 = nm.create_from_cap(self.uri)
2714 # When a mapupdate is performed on a node that doesn't yet know
2715 # the privkey, a short read is sent to a batch of servers, to get
2716 # the verinfo and (hopefully, if the file is short enough) the
2717 # encprivkey. Our file is too large to let this first read
2718 # contain the encprivkey. Each non-encprivkey-bearing response
2719 # that arrives (until the node gets the encprivkey) will trigger
2720 # a second read to specifically read the encprivkey.
2722 # So, to exercise this case:
2723 # 1. notice which server gets a read() call first
2724 # 2. tell that server to start throwing errors
2725 killer = FirstServerGetsKilled()
2726 for s in nm.storage_broker.get_connected_servers():
2727 s.get_rref().post_call_notifier = killer.notify
2728 d.addCallback(_created)
2730 # now we update a servermap from a new node (which doesn't have the
2731 # privkey yet, forcing it to use a separate privkey query). Note that
2732 # the map-update will succeed, since we'll just get a copy from one
2733 # of the other shares.
2734 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2738 def test_privkey_query_missing(self):
2739 # like test_privkey_query_error, but the shares are deleted by the
2740 # second query, instead of raising an exception.
2741 self.basedir = "mutable/Problems/test_privkey_query_missing"
2742 self.set_up_grid(num_servers=20)
2743 nm = self.g.clients[0].nodemaker
2744 LARGE = "These are Larger contents" * 2000 # about 50KiB
2745 LARGE_uploadable = MutableData(LARGE)
2746 nm._node_cache = DevNullDictionary() # disable the nodecache
2748 d = nm.create_mutable_file(LARGE_uploadable)
2750 self.uri = n.get_uri()
2751 self.n2 = nm.create_from_cap(self.uri)
2752 deleter = FirstServerGetsDeleted()
2753 for s in nm.storage_broker.get_connected_servers():
2754 s.get_rref().post_call_notifier = deleter.notify
2755 d.addCallback(_created)
2756 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2760 def test_block_and_hash_query_error(self):
2761 # This tests for what happens when a query to a remote server
2762 # fails in either the hash validation step or the block getting
2763 # step (because of batching, this is the same actual query).
2764 # We need to have the storage server persist up until the point
2765 # that its prefix is validated, then suddenly die. This
2766 # exercises some exception handling code in Retrieve.
2767 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2768 self.set_up_grid(num_servers=20)
2769 nm = self.g.clients[0].nodemaker
2770 CONTENTS = "contents" * 2000
2771 CONTENTS_uploadable = MutableData(CONTENTS)
2772 d = nm.create_mutable_file(CONTENTS_uploadable)
2775 d.addCallback(_created)
2776 d.addCallback(lambda ignored:
2777 self._node.get_servermap(MODE_READ))
2778 def _then(servermap):
2779 # we have our servermap. Now we set up the servers like the
2780 # tests above -- the first one that gets a read call should
2781 # start throwing errors, but only after returning its prefix
2782 # for validation. Since we'll download without fetching the
2783 # private key, the next query to the remote server will be
2784 # for either a block and salt or for hashes, either of which
2785 # will exercise the error handling code.
2786 killer = FirstServerGetsKilled()
2787 for s in nm.storage_broker.get_connected_servers():
2788 s.get_rref().post_call_notifier = killer.notify
2789 ver = servermap.best_recoverable_version()
2791 return self._node.download_version(servermap, ver)
2792 d.addCallback(_then)
2793 d.addCallback(lambda data:
2794 self.failUnlessEqual(data, CONTENTS))
2797 def test_1654(self):
2798 # test that the Retrieve object unconditionally verifies the block
2799 # hash tree root for mutable shares. The failure mode is that
2800 # carefully crafted shares can cause undetected corruption (the
2801 # retrieve appears to finish successfully, but the result is
2802 # corrupted). When fixed, these shares always cause a
2803 # CorruptShareError, which results in NotEnoughSharesError in this
2805 self.basedir = "mutable/Problems/test_1654"
2806 self.set_up_grid(num_servers=2)
2807 cap = uri.from_string(TEST_1654_CAP)
2808 si = cap.get_storage_index()
2810 for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2811 sharedata = base64.b64decode(share)
2812 storedir = self.get_serverdir(shnum)
2813 storage_path = os.path.join(storedir, "shares",
2814 storage_index_to_dir(si))
2815 fileutil.make_dirs(storage_path)
2816 fileutil.write(os.path.join(storage_path, "%d" % shnum),
2819 nm = self.g.clients[0].nodemaker
2820 n = nm.create_from_cap(TEST_1654_CAP)
2821 # to exercise the problem correctly, we must ensure that sh0 is
2822 # processed first, and sh1 second. NoNetworkGrid has facilities to
2823 # stall the first request from a single server, but it's not
2824 # currently easy to extend that to stall the second request (mutable
2825 # retrievals will see two: first the mapupdate, then the fetch).
2826 # However, repeated executions of this run without the #1654 fix
2827 # suggests that we're failing reliably even without explicit stalls,
2828 # probably because the servers are queried in a fixed order. So I'm
2829 # ok with relying upon that.
2830 d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2831 "ran out of servers",
2832 n.download_best_version)
2836 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2838 TEST_1654_SH0 = """\
2839 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2840 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2841 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2842 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2843 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2844 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2845 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2846 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2847 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2848 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2849 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2850 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2851 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2852 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2853 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2854 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2855 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2856 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2857 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2858 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2859 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2860 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2861 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2862 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2863 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2864 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2865 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2866 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2867 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2868 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2869 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2870 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2871 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2872 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2873 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2874 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2875 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2876 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2877 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2878 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2879 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2880 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2881 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2882 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2883 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2884 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2885 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2886 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2887 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2888 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2889 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2890 72mXGlqyLyWYuAAAAAA="""
2892 TEST_1654_SH1 = """\
2893 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2894 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2895 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2896 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2897 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2898 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2899 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2900 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2901 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2902 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2903 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2904 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2905 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2906 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2907 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2908 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2909 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2910 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2911 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2912 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2913 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2914 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2915 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2916 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2917 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2918 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2919 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2920 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2921 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2922 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2923 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2924 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2925 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2926 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2927 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2928 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2929 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2930 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2931 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2932 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2933 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2934 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2935 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2936 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2937 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2938 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2939 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2940 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2941 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2942 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2943 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2944 72mXGlqyLyWYuAAAAAA="""
2947 class FileHandle(unittest.TestCase):
2949 self.test_data = "Test Data" * 50000
2950 self.sio = StringIO(self.test_data)
2951 self.uploadable = MutableFileHandle(self.sio)
2954 def test_filehandle_read(self):
2955 self.basedir = "mutable/FileHandle/test_filehandle_read"
2957 for i in xrange(0, len(self.test_data), chunk_size):
2958 data = self.uploadable.read(chunk_size)
2959 data = "".join(data)
2961 end = i + chunk_size
2962 self.failUnlessEqual(data, self.test_data[start:end])
2965 def test_filehandle_get_size(self):
2966 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2967 actual_size = len(self.test_data)
2968 size = self.uploadable.get_size()
2969 self.failUnlessEqual(size, actual_size)
2972 def test_filehandle_get_size_out_of_order(self):
2973 # We should be able to call get_size whenever we want without
2974 # disturbing the location of the seek pointer.
2976 data = self.uploadable.read(chunk_size)
2977 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2980 size = self.uploadable.get_size()
2981 self.failUnlessEqual(size, len(self.test_data))
2983 # Now get more data. We should be right where we left off.
2984 more_data = self.uploadable.read(chunk_size)
2986 end = chunk_size * 2
2987 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2990 def test_filehandle_file(self):
2991 # Make sure that the MutableFileHandle works on a file as well
2992 # as a StringIO object, since in some cases it will be asked to
2994 self.basedir = self.mktemp()
2995 # necessary? What am I doing wrong here?
2996 os.mkdir(self.basedir)
2997 f_path = os.path.join(self.basedir, "test_file")
2998 f = open(f_path, "w")
2999 f.write(self.test_data)
3001 f = open(f_path, "r")
3003 uploadable = MutableFileHandle(f)
3005 data = uploadable.read(len(self.test_data))
3006 self.failUnlessEqual("".join(data), self.test_data)
3007 size = uploadable.get_size()
3008 self.failUnlessEqual(size, len(self.test_data))
3011 def test_close(self):
3012 # Make sure that the MutableFileHandle closes its handle when
3014 self.uploadable.close()
3015 self.failUnless(self.sio.closed)
3018 class DataHandle(unittest.TestCase):
3020 self.test_data = "Test Data" * 50000
3021 self.uploadable = MutableData(self.test_data)
3024 def test_datahandle_read(self):
3026 for i in xrange(0, len(self.test_data), chunk_size):
3027 data = self.uploadable.read(chunk_size)
3028 data = "".join(data)
3030 end = i + chunk_size
3031 self.failUnlessEqual(data, self.test_data[start:end])
3034 def test_datahandle_get_size(self):
3035 actual_size = len(self.test_data)
3036 size = self.uploadable.get_size()
3037 self.failUnlessEqual(size, actual_size)
3040 def test_datahandle_get_size_out_of_order(self):
3041 # We should be able to call get_size whenever we want without
3042 # disturbing the location of the seek pointer.
3044 data = self.uploadable.read(chunk_size)
3045 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3048 size = self.uploadable.get_size()
3049 self.failUnlessEqual(size, len(self.test_data))
3051 # Now get more data. We should be right where we left off.
3052 more_data = self.uploadable.read(chunk_size)
3054 end = chunk_size * 2
3055 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3058 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3061 GridTestMixin.setUp(self)
3062 self.basedir = self.mktemp()
3064 self.c = self.g.clients[0]
3065 self.nm = self.c.nodemaker
3066 self.data = "test data" * 100000 # about 900 KiB; MDMF
3067 self.small_data = "test data" * 10 # about 90 B; SDMF
3070 def do_upload_mdmf(self):
3071 d = self.nm.create_mutable_file(MutableData(self.data),
3072 version=MDMF_VERSION)
3074 assert isinstance(n, MutableFileNode)
3075 assert n._protocol_version == MDMF_VERSION
3078 d.addCallback(_then)
3081 def do_upload_sdmf(self):
3082 d = self.nm.create_mutable_file(MutableData(self.small_data))
3084 assert isinstance(n, MutableFileNode)
3085 assert n._protocol_version == SDMF_VERSION
3088 d.addCallback(_then)
3091 def do_upload_empty_sdmf(self):
3092 d = self.nm.create_mutable_file(MutableData(""))
3094 assert isinstance(n, MutableFileNode)
3095 self.sdmf_zero_length_node = n
3096 assert n._protocol_version == SDMF_VERSION
3098 d.addCallback(_then)
3101 def do_upload(self):
3102 d = self.do_upload_mdmf()
3103 d.addCallback(lambda ign: self.do_upload_sdmf())
3106 def test_debug(self):
3107 d = self.do_upload_mdmf()
3109 fso = debug.FindSharesOptions()
3110 storage_index = base32.b2a(n.get_storage_index())
3111 fso.si_s = storage_index
3112 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
3114 in self.iterate_servers()]
3115 fso.stdout = StringIO()
3116 fso.stderr = StringIO()
3117 debug.find_shares(fso)
3118 sharefiles = fso.stdout.getvalue().splitlines()
3119 expected = self.nm.default_encoding_parameters["n"]
3120 self.failUnlessEqual(len(sharefiles), expected)
3122 do = debug.DumpOptions()
3123 do["filename"] = sharefiles[0]
3124 do.stdout = StringIO()
3125 debug.dump_share(do)
3126 output = do.stdout.getvalue()
3127 lines = set(output.splitlines())
3128 self.failUnless("Mutable slot found:" in lines, output)
3129 self.failUnless(" share_type: MDMF" in lines, output)
3130 self.failUnless(" num_extra_leases: 0" in lines, output)
3131 self.failUnless(" MDMF contents:" in lines, output)
3132 self.failUnless(" seqnum: 1" in lines, output)
3133 self.failUnless(" required_shares: 3" in lines, output)
3134 self.failUnless(" total_shares: 10" in lines, output)
3135 self.failUnless(" segsize: 131073" in lines, output)
3136 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
3137 vcap = n.get_verify_cap().to_string()
3138 self.failUnless(" verify-cap: %s" % vcap in lines, output)
3140 cso = debug.CatalogSharesOptions()
3141 cso.nodedirs = fso.nodedirs
3142 cso.stdout = StringIO()
3143 cso.stderr = StringIO()
3144 debug.catalog_shares(cso)
3145 shares = cso.stdout.getvalue().splitlines()
3146 oneshare = shares[0] # all shares should be MDMF
3147 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3148 self.failUnless(oneshare.startswith("MDMF"), oneshare)
3149 fields = oneshare.split()
3150 self.failUnlessEqual(fields[0], "MDMF")
3151 self.failUnlessEqual(fields[1], storage_index)
3152 self.failUnlessEqual(fields[2], "3/10")
3153 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3154 self.failUnless(fields[4].startswith("#1:"), fields[3])
3155 # the rest of fields[4] is the roothash, which depends upon
3156 # encryption salts and is not constant. fields[5] is the
3157 # remaining time on the longest lease, which is timing dependent.
3158 # The rest of the line is the quoted pathname to the share.
3159 d.addCallback(_debug)
3162 def test_get_sequence_number(self):
3163 d = self.do_upload()
3164 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3165 d.addCallback(lambda bv:
3166 self.failUnlessEqual(bv.get_sequence_number(), 1))
3167 d.addCallback(lambda ignored:
3168 self.sdmf_node.get_best_readable_version())
3169 d.addCallback(lambda bv:
3170 self.failUnlessEqual(bv.get_sequence_number(), 1))
3171 # Now update. The sequence number in both cases should be 1 in
3173 def _do_update(ignored):
3174 new_data = MutableData("foo bar baz" * 100000)
3175 new_small_data = MutableData("foo bar baz" * 10)
3176 d1 = self.mdmf_node.overwrite(new_data)
3177 d2 = self.sdmf_node.overwrite(new_small_data)
3178 dl = gatherResults([d1, d2])
3180 d.addCallback(_do_update)
3181 d.addCallback(lambda ignored:
3182 self.mdmf_node.get_best_readable_version())
3183 d.addCallback(lambda bv:
3184 self.failUnlessEqual(bv.get_sequence_number(), 2))
3185 d.addCallback(lambda ignored:
3186 self.sdmf_node.get_best_readable_version())
3187 d.addCallback(lambda bv:
3188 self.failUnlessEqual(bv.get_sequence_number(), 2))
3192 def test_cap_after_upload(self):
3193 # If we create a new mutable file and upload things to it, and
3194 # it's an MDMF file, we should get an MDMF cap back from that
3195 # file and should be able to use that.
3196 # That's essentially what MDMF node is, so just check that.
3197 d = self.do_upload_mdmf()
3199 mdmf_uri = self.mdmf_node.get_uri()
3200 cap = uri.from_string(mdmf_uri)
3201 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3202 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3203 cap = uri.from_string(readonly_mdmf_uri)
3204 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3205 d.addCallback(_then)
3208 def test_mutable_version(self):
3209 # assert that getting parameters from the IMutableVersion object
3210 # gives us the same data as getting them from the filenode itself
3211 d = self.do_upload()
3212 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3213 def _check_mdmf(bv):
3215 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3216 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3217 self.failIf(bv.is_readonly())
3218 d.addCallback(_check_mdmf)
3219 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3220 def _check_sdmf(bv):
3222 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3223 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3224 self.failIf(bv.is_readonly())
3225 d.addCallback(_check_sdmf)
3229 def test_get_readonly_version(self):
3230 d = self.do_upload()
3231 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3232 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3234 # Attempting to get a mutable version of a mutable file from a
3235 # filenode initialized with a readcap should return a readonly
3236 # version of that same node.
3237 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3238 d.addCallback(lambda ro: ro.get_best_mutable_version())
3239 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3241 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3242 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3244 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3245 d.addCallback(lambda ro: ro.get_best_mutable_version())
3246 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3250 def test_toplevel_overwrite(self):
3251 new_data = MutableData("foo bar baz" * 100000)
3252 new_small_data = MutableData("foo bar baz" * 10)
3253 d = self.do_upload()
3254 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3255 d.addCallback(lambda ignored:
3256 self.mdmf_node.download_best_version())
3257 d.addCallback(lambda data:
3258 self.failUnlessEqual(data, "foo bar baz" * 100000))
3259 d.addCallback(lambda ignored:
3260 self.sdmf_node.overwrite(new_small_data))
3261 d.addCallback(lambda ignored:
3262 self.sdmf_node.download_best_version())
3263 d.addCallback(lambda data:
3264 self.failUnlessEqual(data, "foo bar baz" * 10))
3268 def test_toplevel_modify(self):
3269 d = self.do_upload()
3270 def modifier(old_contents, servermap, first_time):
3271 return old_contents + "modified"
3272 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3273 d.addCallback(lambda ignored:
3274 self.mdmf_node.download_best_version())
3275 d.addCallback(lambda data:
3276 self.failUnlessIn("modified", data))
3277 d.addCallback(lambda ignored:
3278 self.sdmf_node.modify(modifier))
3279 d.addCallback(lambda ignored:
3280 self.sdmf_node.download_best_version())
3281 d.addCallback(lambda data:
3282 self.failUnlessIn("modified", data))
3286 def test_version_modify(self):
3287 # TODO: When we can publish multiple versions, alter this test
3288 # to modify a version other than the best usable version, then
3289 # test to see that the best recoverable version is that.
3290 d = self.do_upload()
3291 def modifier(old_contents, servermap, first_time):
3292 return old_contents + "modified"
3293 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3294 d.addCallback(lambda ignored:
3295 self.mdmf_node.download_best_version())
3296 d.addCallback(lambda data:
3297 self.failUnlessIn("modified", data))
3298 d.addCallback(lambda ignored:
3299 self.sdmf_node.modify(modifier))
3300 d.addCallback(lambda ignored:
3301 self.sdmf_node.download_best_version())
3302 d.addCallback(lambda data:
3303 self.failUnlessIn("modified", data))
3307 def test_download_version(self):
3308 d = self.publish_multiple()
3309 # We want to have two recoverable versions on the grid.
3310 d.addCallback(lambda res:
3311 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3312 1:1,3:1,5:1,7:1,9:1}))
3313 # Now try to download each version. We should get the plaintext
3314 # associated with that version.
3315 d.addCallback(lambda ignored:
3316 self._fn.get_servermap(mode=MODE_READ))
3317 def _got_servermap(smap):
3318 versions = smap.recoverable_versions()
3319 assert len(versions) == 2
3321 self.servermap = smap
3322 self.version1, self.version2 = versions
3323 assert self.version1 != self.version2
3325 self.version1_seqnum = self.version1[0]
3326 self.version2_seqnum = self.version2[0]
3327 self.version1_index = self.version1_seqnum - 1
3328 self.version2_index = self.version2_seqnum - 1
3330 d.addCallback(_got_servermap)
3331 d.addCallback(lambda ignored:
3332 self._fn.download_version(self.servermap, self.version1))
3333 d.addCallback(lambda results:
3334 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3336 d.addCallback(lambda ignored:
3337 self._fn.download_version(self.servermap, self.version2))
3338 d.addCallback(lambda results:
3339 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3344 def test_download_nonexistent_version(self):
3345 d = self.do_upload_mdmf()
3346 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3347 def _set_servermap(servermap):
3348 self.servermap = servermap
3349 d.addCallback(_set_servermap)
3350 d.addCallback(lambda ignored:
3351 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3353 self.mdmf_node.download_version, self.servermap,
3358 def test_partial_read(self):
3359 d = self.do_upload_mdmf()
3360 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3361 modes = [("start_on_segment_boundary",
3362 mathutil.next_multiple(128 * 1024, 3), 50),
3363 ("ending_one_byte_after_segment_boundary",
3364 mathutil.next_multiple(128 * 1024, 3)-50, 51),
3365 ("zero_length_at_start", 0, 0),
3366 ("zero_length_in_middle", 50, 0),
3367 ("zero_length_at_segment_boundary",
3368 mathutil.next_multiple(128 * 1024, 3), 0),
3370 for (name, offset, length) in modes:
3371 d.addCallback(self._do_partial_read, name, offset, length)
3372 # then read only a few bytes at a time, and see that the results are
3374 def _read_data(version):
3375 c = consumer.MemoryConsumer()
3376 d2 = defer.succeed(None)
3377 for i in xrange(0, len(self.data), 10000):
3378 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3379 d2.addCallback(lambda ignored:
3380 self.failUnlessEqual(self.data, "".join(c.chunks)))
3382 d.addCallback(_read_data)
3384 def _do_partial_read(self, version, name, offset, length):
3385 c = consumer.MemoryConsumer()
3386 d = version.read(c, offset, length)
3387 expected = self.data[offset:offset+length]
3388 d.addCallback(lambda ignored: "".join(c.chunks))
3389 def _check(results):
3390 if results != expected:
3392 print "got: %s ... %s" % (results[:20], results[-20:])
3393 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3394 self.fail("results[%s] != expected" % name)
3395 return version # daisy-chained to next call
3396 d.addCallback(_check)
3400 def _test_read_and_download(self, node, expected):
3401 d = node.get_best_readable_version()
3402 def _read_data(version):
3403 c = consumer.MemoryConsumer()
3404 d2 = defer.succeed(None)
3405 d2.addCallback(lambda ignored: version.read(c))
3406 d2.addCallback(lambda ignored:
3407 self.failUnlessEqual(expected, "".join(c.chunks)))
3409 d.addCallback(_read_data)
3410 d.addCallback(lambda ignored: node.download_best_version())
3411 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3414 def test_read_and_download_mdmf(self):
3415 d = self.do_upload_mdmf()
3416 d.addCallback(self._test_read_and_download, self.data)
3419 def test_read_and_download_sdmf(self):
3420 d = self.do_upload_sdmf()
3421 d.addCallback(self._test_read_and_download, self.small_data)
3424 def test_read_and_download_sdmf_zero_length(self):
3425 d = self.do_upload_empty_sdmf()
3426 d.addCallback(self._test_read_and_download, "")
3430 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3431 timeout = 400 # these tests are too big, 120s is not enough on slow
3434 GridTestMixin.setUp(self)
3435 self.basedir = self.mktemp()
3437 self.c = self.g.clients[0]
3438 self.nm = self.c.nodemaker
3439 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3440 self.small_data = "test data" * 10 # about 90 B; SDMF
3443 def do_upload_sdmf(self):
3444 d = self.nm.create_mutable_file(MutableData(self.small_data))
3446 assert isinstance(n, MutableFileNode)
3448 # Make SDMF node that has 255 shares.
3449 self.nm.default_encoding_parameters['n'] = 255
3450 self.nm.default_encoding_parameters['k'] = 127
3451 return self.nm.create_mutable_file(MutableData(self.small_data))
3452 d.addCallback(_then)
3454 assert isinstance(n, MutableFileNode)
3455 self.sdmf_max_shares_node = n
3456 d.addCallback(_then2)
3459 def do_upload_mdmf(self):
3460 d = self.nm.create_mutable_file(MutableData(self.data),
3461 version=MDMF_VERSION)
3463 assert isinstance(n, MutableFileNode)
3465 # Make MDMF node that has 255 shares.
3466 self.nm.default_encoding_parameters['n'] = 255
3467 self.nm.default_encoding_parameters['k'] = 127
3468 return self.nm.create_mutable_file(MutableData(self.data),
3469 version=MDMF_VERSION)
3470 d.addCallback(_then)
3472 assert isinstance(n, MutableFileNode)
3473 self.mdmf_max_shares_node = n
3474 d.addCallback(_then2)
3477 def _test_replace(self, offset, new_data):
3478 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3479 d0 = self.do_upload_mdmf()
3481 d = defer.succeed(None)
3482 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3483 # close over 'node'.
3484 d.addCallback(lambda ign, node=node:
3485 node.get_best_mutable_version())
3486 d.addCallback(lambda mv:
3487 mv.update(MutableData(new_data), offset))
3488 d.addCallback(lambda ign, node=node:
3489 node.download_best_version())
3490 def _check(results):
3491 if results != expected:
3493 print "got: %s ... %s" % (results[:20], results[-20:])
3494 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3495 self.fail("results != expected")
3496 d.addCallback(_check)
3498 d0.addCallback(_run)
3501 def test_append(self):
3502 # We should be able to append data to a mutable file and get
3504 return self._test_replace(len(self.data), "appended")
3506 def test_replace_middle(self):
3507 # We should be able to replace data in the middle of a mutable
3508 # file and get what we expect back.
3509 return self._test_replace(100, "replaced")
3511 def test_replace_beginning(self):
3512 # We should be able to replace data at the beginning of the file
3513 # without truncating the file
3514 return self._test_replace(0, "beginning")
3516 def test_replace_segstart1(self):
3517 return self._test_replace(128*1024+1, "NNNN")
3519 def test_replace_zero_length_beginning(self):
3520 return self._test_replace(0, "")
3522 def test_replace_zero_length_middle(self):
3523 return self._test_replace(50, "")
3525 def test_replace_zero_length_segstart1(self):
3526 return self._test_replace(128*1024+1, "")
3528 def test_replace_and_extend(self):
3529 # We should be able to replace data in the middle of a mutable
3530 # file and extend that mutable file and get what we expect.
3531 return self._test_replace(100, "modified " * 100000)
3534 def _check_differences(self, got, expected):
3535 # displaying arbitrary file corruption is tricky for a
3536 # 1MB file of repeating data,, so look for likely places
3537 # with problems and display them separately
3538 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3539 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3540 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3541 for (start,end) in gotmods]
3542 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3543 for (start,end) in expmods]
3544 #print "expecting: %s" % expspans
3548 print "differences:"
3549 for segnum in range(len(expected)//SEGSIZE):
3550 start = segnum * SEGSIZE
3551 end = (segnum+1) * SEGSIZE
3552 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3553 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3554 if got_ends != exp_ends:
3555 print "expected[%d]: %s" % (start, exp_ends)
3556 print "got [%d]: %s" % (start, got_ends)
3557 if expspans != gotspans:
3558 print "expected: %s" % expspans
3559 print "got : %s" % gotspans
3560 open("EXPECTED","wb").write(expected)
3561 open("GOT","wb").write(got)
3562 print "wrote data to EXPECTED and GOT"
3563 self.fail("didn't get expected data")
3566 def test_replace_locations(self):
3567 # exercise fencepost conditions
3569 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3570 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3571 d0 = self.do_upload_mdmf()
3573 expected = self.data
3574 d = defer.succeed(None)
3575 for offset in suspects:
3576 new_data = letters.next()*2 # "AA", then "BB", etc
3577 expected = expected[:offset]+new_data+expected[offset+2:]
3578 d.addCallback(lambda ign:
3579 self.mdmf_node.get_best_mutable_version())
3580 def _modify(mv, offset=offset, new_data=new_data):
3581 # close over 'offset','new_data'
3582 md = MutableData(new_data)
3583 return mv.update(md, offset)
3584 d.addCallback(_modify)
3585 d.addCallback(lambda ignored:
3586 self.mdmf_node.download_best_version())
3587 d.addCallback(self._check_differences, expected)
3589 d0.addCallback(_run)
3592 def test_replace_locations_max_shares(self):
3593 # exercise fencepost conditions
3595 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3596 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3597 d0 = self.do_upload_mdmf()
3599 expected = self.data
3600 d = defer.succeed(None)
3601 for offset in suspects:
3602 new_data = letters.next()*2 # "AA", then "BB", etc
3603 expected = expected[:offset]+new_data+expected[offset+2:]
3604 d.addCallback(lambda ign:
3605 self.mdmf_max_shares_node.get_best_mutable_version())
3606 def _modify(mv, offset=offset, new_data=new_data):
3607 # close over 'offset','new_data'
3608 md = MutableData(new_data)
3609 return mv.update(md, offset)
3610 d.addCallback(_modify)
3611 d.addCallback(lambda ignored:
3612 self.mdmf_max_shares_node.download_best_version())
3613 d.addCallback(self._check_differences, expected)
3615 d0.addCallback(_run)
3619 def test_append_power_of_two(self):
3620 # If we attempt to extend a mutable file so that its segment
3621 # count crosses a power-of-two boundary, the update operation
3622 # should know how to reencode the file.
3624 # Note that the data populating self.mdmf_node is about 900 KiB
3625 # long -- this is 7 segments in the default segment size. So we
3626 # need to add 2 segments worth of data to push it over a
3627 # power-of-two boundary.
3628 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3629 new_data = self.data + (segment * 2)
3630 d0 = self.do_upload_mdmf()
3632 d = defer.succeed(None)
3633 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3634 # close over 'node'.
3635 d.addCallback(lambda ign, node=node:
3636 node.get_best_mutable_version())
3637 d.addCallback(lambda mv:
3638 mv.update(MutableData(segment * 2), len(self.data)))
3639 d.addCallback(lambda ign, node=node:
3640 node.download_best_version())
3641 d.addCallback(lambda results:
3642 self.failUnlessEqual(results, new_data))
3644 d0.addCallback(_run)
3647 def test_update_sdmf(self):
3648 # Running update on a single-segment file should still work.
3649 new_data = self.small_data + "appended"
3650 d0 = self.do_upload_sdmf()
3652 d = defer.succeed(None)
3653 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3654 # close over 'node'.
3655 d.addCallback(lambda ign, node=node:
3656 node.get_best_mutable_version())
3657 d.addCallback(lambda mv:
3658 mv.update(MutableData("appended"), len(self.small_data)))
3659 d.addCallback(lambda ign, node=node:
3660 node.download_best_version())
3661 d.addCallback(lambda results:
3662 self.failUnlessEqual(results, new_data))
3664 d0.addCallback(_run)
3667 def test_replace_in_last_segment(self):
3668 # The wrapper should know how to handle the tail segment
3670 replace_offset = len(self.data) - 100
3671 new_data = self.data[:replace_offset] + "replaced"
3672 rest_offset = replace_offset + len("replaced")
3673 new_data += self.data[rest_offset:]
3674 d0 = self.do_upload_mdmf()
3676 d = defer.succeed(None)
3677 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3678 # close over 'node'.
3679 d.addCallback(lambda ign, node=node:
3680 node.get_best_mutable_version())
3681 d.addCallback(lambda mv:
3682 mv.update(MutableData("replaced"), replace_offset))
3683 d.addCallback(lambda ign, node=node:
3684 node.download_best_version())
3685 d.addCallback(lambda results:
3686 self.failUnlessEqual(results, new_data))
3688 d0.addCallback(_run)
3691 def test_multiple_segment_replace(self):
3692 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3693 new_data = self.data[:replace_offset]
3694 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3695 new_data += 2 * new_segment
3696 new_data += "replaced"
3697 rest_offset = len(new_data)
3698 new_data += self.data[rest_offset:]
3699 d0 = self.do_upload_mdmf()
3701 d = defer.succeed(None)
3702 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3703 # close over 'node'.
3704 d.addCallback(lambda ign, node=node:
3705 node.get_best_mutable_version())
3706 d.addCallback(lambda mv:
3707 mv.update(MutableData((2 * new_segment) + "replaced"),
3709 d.addCallback(lambda ignored, node=node:
3710 node.download_best_version())
3711 d.addCallback(lambda results:
3712 self.failUnlessEqual(results, new_data))
3714 d0.addCallback(_run)
3717 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3718 sdmf_old_shares = {}
3719 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3720 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3721 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3722 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3723 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3724 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3725 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3726 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3727 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3728 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3729 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3730 sdmf_old_contents = "This is a test file.\n"
3731 def copy_sdmf_shares(self):
3732 # We'll basically be short-circuiting the upload process.
3733 servernums = self.g.servers_by_number.keys()
3734 assert len(servernums) == 10
3736 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3737 # Get the storage index.
3738 cap = uri.from_string(self.sdmf_old_cap)
3739 si = cap.get_storage_index()
3741 # Now execute each assignment by writing the storage.
3742 for (share, servernum) in assignments:
3743 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3744 storedir = self.get_serverdir(servernum)
3745 storage_path = os.path.join(storedir, "shares",
3746 storage_index_to_dir(si))
3747 fileutil.make_dirs(storage_path)
3748 fileutil.write(os.path.join(storage_path, "%d" % share),
3750 # ...and verify that the shares are there.
3751 shares = self.find_uri_shares(self.sdmf_old_cap)
3752 assert len(shares) == 10
3754 def test_new_downloader_can_read_old_shares(self):
3755 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3757 self.copy_sdmf_shares()
3758 nm = self.g.clients[0].nodemaker
3759 n = nm.create_from_cap(self.sdmf_old_cap)
3760 d = n.download_best_version()
3761 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3764 class DifferentEncoding(unittest.TestCase):
3766 self._storage = s = FakeStorage()
3767 self.nodemaker = make_nodemaker(s)
3769 def test_filenode(self):
3770 # create a file with 3-of-20, then modify it with a client configured
3771 # to do 3-of-10. #1510 tracks a failure here
3772 self.nodemaker.default_encoding_parameters["n"] = 20
3773 d = self.nodemaker.create_mutable_file("old contents")
3775 filecap = n.get_cap().to_string()
3776 del n # we want a new object, not the cached one
3777 self.nodemaker.default_encoding_parameters["n"] = 10
3778 n2 = self.nodemaker.create_from_cap(filecap)
3780 d.addCallback(_created)
3781 def modifier(old_contents, servermap, first_time):
3782 return "new contents"
3783 d.addCallback(lambda n: n.modify(modifier))