2 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
7 from allmydata import uri, client
8 from allmydata.nodemaker import NodeMaker
9 from allmydata.util import base32, consumer, fileutil, mathutil
10 from allmydata.util.fileutil import abspath_expanduser_unicode
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12 ssk_pubkey_fingerprint_hash
13 from allmydata.util.consumer import MemoryConsumer
14 from allmydata.util.deferredutil import gatherResults
15 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
16 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
17 from allmydata.monitor import Monitor
18 from allmydata.test.common import ShouldFailMixin
19 from allmydata.test.no_network import GridTestMixin
20 from foolscap.api import eventually, fireEventually, flushEventualQueue
21 from foolscap.logging import log
22 from allmydata.storage_client import StorageFarmBroker
23 from allmydata.storage.common import storage_index_to_dir
24 from allmydata.scripts import debug
26 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
27 from allmydata.mutable.common import \
28 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
29 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
30 NotEnoughServersError, CorruptShareError
31 from allmydata.mutable.retrieve import Retrieve
32 from allmydata.mutable.publish import Publish, MutableFileHandle, \
34 DEFAULT_MAX_SEGMENT_SIZE
35 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
36 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
37 from allmydata.mutable.repairer import MustForceRepairError
39 import allmydata.test.common_util as testutil
40 from allmydata.test.common import TEST_RSA_KEY_SIZE
41 from allmydata.test.test_download import PausingConsumer, \
42 PausingAndStoppingConsumer, StoppingConsumer, \
43 ImmediatelyStoppingConsumer
45 def eventuaaaaaly(res=None):
46 d = fireEventually(res)
47 d.addCallback(fireEventually)
48 d.addCallback(fireEventually)
52 # this "FakeStorage" exists to put the share data in RAM and avoid using real
53 # network connections, both to speed up the tests and to reduce the amount of
54 # non-mutable.py code being exercised.
57 # this class replaces the collection of storage servers, allowing the
58 # tests to examine and manipulate the published shares. It also lets us
59 # control the order in which read queries are answered, to exercise more
60 # of the error-handling code in Retrieve .
62 # Note that we ignore the storage index: this FakeStorage instance can
63 # only be used for a single storage index.
68 # _sequence is used to cause the responses to occur in a specific
69 # order. If it is in use, then we will defer queries instead of
70 # answering them right away, accumulating the Deferreds in a dict. We
71 # don't know exactly how many queries we'll get, so exactly one
72 # second after the first query arrives, we will release them all (in
76 self._pending_timer = None
78 def read(self, peerid, storage_index):
79 shares = self._peers.get(peerid, {})
80 if self._sequence is None:
81 return eventuaaaaaly(shares)
84 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
85 if peerid not in self._pending:
86 self._pending[peerid] = []
87 self._pending[peerid].append( (d, shares) )
90 def _fire_readers(self):
91 self._pending_timer = None
92 pending = self._pending
94 for peerid in self._sequence:
96 for (d, shares) in pending.pop(peerid):
97 eventually(d.callback, shares)
98 for peerid in pending:
99 for (d, shares) in pending[peerid]:
100 eventually(d.callback, shares)
102 def write(self, peerid, storage_index, shnum, offset, data):
103 if peerid not in self._peers:
104 self._peers[peerid] = {}
105 shares = self._peers[peerid]
107 f.write(shares.get(shnum, ""))
110 shares[shnum] = f.getvalue()
113 class FakeStorageServer:
114 def __init__(self, peerid, storage):
116 self.storage = storage
118 def callRemote(self, methname, *args, **kwargs):
121 meth = getattr(self, methname)
122 return meth(*args, **kwargs)
124 d.addCallback(lambda res: _call())
127 def callRemoteOnly(self, methname, *args, **kwargs):
129 d = self.callRemote(methname, *args, **kwargs)
130 d.addBoth(lambda ignore: None)
133 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
136 def slot_readv(self, storage_index, shnums, readv):
137 d = self.storage.read(self.peerid, storage_index)
141 if shnums and shnum not in shnums:
143 vector = response[shnum] = []
144 for (offset, length) in readv:
145 assert isinstance(offset, (int, long)), offset
146 assert isinstance(length, (int, long)), length
147 vector.append(shares[shnum][offset:offset+length])
152 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
153 tw_vectors, read_vector):
154 # always-pass: parrot the test vectors back to them.
156 for shnum, (testv, writev, new_length) in tw_vectors.items():
157 for (offset, length, op, specimen) in testv:
158 assert op in ("le", "eq", "ge")
159 # TODO: this isn't right, the read is controlled by read_vector,
161 readv[shnum] = [ specimen
162 for (offset, length, op, specimen)
164 for (offset, data) in writev:
165 self.storage.write(self.peerid, storage_index, shnum,
167 answer = (True, readv)
168 return fireEventually(answer)
171 def flip_bit(original, byte_offset):
172 return (original[:byte_offset] +
173 chr(ord(original[byte_offset]) ^ 0x01) +
174 original[byte_offset+1:])
176 def add_two(original, byte_offset):
177 # It isn't enough to simply flip the bit for the version number,
178 # because 1 is a valid version number. So we add two instead.
179 return (original[:byte_offset] +
180 chr(ord(original[byte_offset]) ^ 0x02) +
181 original[byte_offset+1:])
183 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
184 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
185 # list of shnums to corrupt.
187 for peerid in s._peers:
188 shares = s._peers[peerid]
190 if (shnums_to_corrupt is not None
191 and shnum not in shnums_to_corrupt):
194 # We're feeding the reader all of the share data, so it
195 # won't need to use the rref that we didn't provide, nor the
196 # storage index that we didn't provide. We do this because
197 # the reader will work for both MDMF and SDMF.
198 reader = MDMFSlotReadProxy(None, None, shnum, data)
199 # We need to get the offsets for the next part.
200 d = reader.get_verinfo()
201 def _do_corruption(verinfo, data, shnum, shares):
207 k, n, prefix, o) = verinfo
208 if isinstance(offset, tuple):
209 offset1, offset2 = offset
213 if offset1 == "pubkey" and IV:
216 real_offset = o[offset1]
218 real_offset = offset1
219 real_offset = int(real_offset) + offset2 + offset_offset
220 assert isinstance(real_offset, int), offset
221 if offset1 == 0: # verbyte
225 shares[shnum] = f(data, real_offset)
226 d.addCallback(_do_corruption, data, shnum, shares)
228 dl = defer.DeferredList(ds)
229 dl.addCallback(lambda ignored: res)
232 def make_storagebroker(s=None, num_peers=10):
235 peerids = [tagged_hash("peerid", "%d" % i)[:20]
236 for i in range(num_peers)]
237 storage_broker = StorageFarmBroker(None, True)
238 for peerid in peerids:
239 fss = FakeStorageServer(peerid, s)
240 ann = {"anonymous-storage-FURL": "pb://%s@nowhere/fake" % base32.b2a(peerid),
241 "permutation-seed-base32": base32.b2a(peerid) }
242 storage_broker.test_add_rref(peerid, fss, ann)
243 return storage_broker
245 def make_nodemaker(s=None, num_peers=10, keysize=TEST_RSA_KEY_SIZE):
246 storage_broker = make_storagebroker(s, num_peers)
247 sh = client.SecretHolder("lease secret", "convergence secret")
248 keygen = client.KeyGenerator()
250 keygen.set_default_keysize(keysize)
251 nodemaker = NodeMaker(storage_broker, sh, None,
253 {"k": 3, "n": 10}, SDMF_VERSION, keygen)
256 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
257 # this used to be in Publish, but we removed the limit. Some of
258 # these tests test whether the new code correctly allows files
259 # larger than the limit.
260 OLD_MAX_SEGMENT_SIZE = 3500000
262 self._storage = s = FakeStorage()
263 self.nodemaker = make_nodemaker(s)
265 def test_create(self):
266 d = self.nodemaker.create_mutable_file()
268 self.failUnless(isinstance(n, MutableFileNode))
269 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
270 sb = self.nodemaker.storage_broker
271 peer0 = sorted(sb.get_all_serverids())[0]
272 shnums = self._storage._peers[peer0].keys()
273 self.failUnlessEqual(len(shnums), 1)
274 d.addCallback(_created)
278 def test_create_mdmf(self):
279 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
281 self.failUnless(isinstance(n, MutableFileNode))
282 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
283 sb = self.nodemaker.storage_broker
284 peer0 = sorted(sb.get_all_serverids())[0]
285 shnums = self._storage._peers[peer0].keys()
286 self.failUnlessEqual(len(shnums), 1)
287 d.addCallback(_created)
290 def test_single_share(self):
291 # Make sure that we tolerate publishing a single share.
292 self.nodemaker.default_encoding_parameters['k'] = 1
293 self.nodemaker.default_encoding_parameters['happy'] = 1
294 self.nodemaker.default_encoding_parameters['n'] = 1
295 d = defer.succeed(None)
296 for v in (SDMF_VERSION, MDMF_VERSION):
297 d.addCallback(lambda ignored, v=v:
298 self.nodemaker.create_mutable_file(version=v))
300 self.failUnless(isinstance(n, MutableFileNode))
303 d.addCallback(_created)
304 d.addCallback(lambda n:
305 n.overwrite(MutableData("Contents" * 50000)))
306 d.addCallback(lambda ignored:
307 self._node.download_best_version())
308 d.addCallback(lambda contents:
309 self.failUnlessEqual(contents, "Contents" * 50000))
312 def test_max_shares(self):
313 self.nodemaker.default_encoding_parameters['n'] = 255
314 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
316 self.failUnless(isinstance(n, MutableFileNode))
317 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
318 sb = self.nodemaker.storage_broker
319 num_shares = sum([len(self._storage._peers[x].keys()) for x \
320 in sb.get_all_serverids()])
321 self.failUnlessEqual(num_shares, 255)
324 d.addCallback(_created)
325 # Now we upload some contents
326 d.addCallback(lambda n:
327 n.overwrite(MutableData("contents" * 50000)))
328 # ...then download contents
329 d.addCallback(lambda ignored:
330 self._node.download_best_version())
331 # ...and check to make sure everything went okay.
332 d.addCallback(lambda contents:
333 self.failUnlessEqual("contents" * 50000, contents))
336 def test_max_shares_mdmf(self):
337 # Test how files behave when there are 255 shares.
338 self.nodemaker.default_encoding_parameters['n'] = 255
339 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
341 self.failUnless(isinstance(n, MutableFileNode))
342 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
343 sb = self.nodemaker.storage_broker
344 num_shares = sum([len(self._storage._peers[x].keys()) for x \
345 in sb.get_all_serverids()])
346 self.failUnlessEqual(num_shares, 255)
349 d.addCallback(_created)
350 d.addCallback(lambda n:
351 n.overwrite(MutableData("contents" * 50000)))
352 d.addCallback(lambda ignored:
353 self._node.download_best_version())
354 d.addCallback(lambda contents:
355 self.failUnlessEqual(contents, "contents" * 50000))
358 def test_mdmf_filenode_cap(self):
359 # Test that an MDMF filenode, once created, returns an MDMF URI.
360 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
362 self.failUnless(isinstance(n, MutableFileNode))
364 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
365 rcap = n.get_readcap()
366 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
367 vcap = n.get_verify_cap()
368 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
369 d.addCallback(_created)
373 def test_create_from_mdmf_writecap(self):
374 # Test that the nodemaker is capable of creating an MDMF
375 # filenode given an MDMF cap.
376 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
378 self.failUnless(isinstance(n, MutableFileNode))
380 self.failUnless(s.startswith("URI:MDMF"))
381 n2 = self.nodemaker.create_from_cap(s)
382 self.failUnless(isinstance(n2, MutableFileNode))
383 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
384 self.failUnlessEqual(n.get_uri(), n2.get_uri())
385 d.addCallback(_created)
389 def test_create_from_mdmf_readcap(self):
390 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
392 self.failUnless(isinstance(n, MutableFileNode))
393 s = n.get_readonly_uri()
394 n2 = self.nodemaker.create_from_cap(s)
395 self.failUnless(isinstance(n2, MutableFileNode))
397 # Check that it's a readonly node
398 self.failUnless(n2.is_readonly())
399 d.addCallback(_created)
403 def test_internal_version_from_cap(self):
404 # MutableFileNodes and MutableFileVersions have an internal
405 # switch that tells them whether they're dealing with an SDMF or
406 # MDMF mutable file when they start doing stuff. We want to make
407 # sure that this is set appropriately given an MDMF cap.
408 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
410 self.uri = n.get_uri()
411 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
413 n2 = self.nodemaker.create_from_cap(self.uri)
414 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
415 d.addCallback(_created)
419 def test_serialize(self):
420 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
422 def _callback(*args, **kwargs):
423 self.failUnlessEqual(args, (4,) )
424 self.failUnlessEqual(kwargs, {"foo": 5})
427 d = n._do_serialized(_callback, 4, foo=5)
428 def _check_callback(res):
429 self.failUnlessEqual(res, 6)
430 self.failUnlessEqual(calls, [1])
431 d.addCallback(_check_callback)
434 raise ValueError("heya")
435 d.addCallback(lambda res:
436 self.shouldFail(ValueError, "_check_errback", "heya",
437 n._do_serialized, _errback))
440 def test_upload_and_download(self):
441 d = self.nodemaker.create_mutable_file()
443 d = defer.succeed(None)
444 d.addCallback(lambda res: n.get_servermap(MODE_READ))
445 d.addCallback(lambda smap: smap.dump(StringIO()))
446 d.addCallback(lambda sio:
447 self.failUnless("3-of-10" in sio.getvalue()))
448 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
449 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
450 d.addCallback(lambda res: n.download_best_version())
451 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
452 d.addCallback(lambda res: n.get_size_of_best_version())
453 d.addCallback(lambda size:
454 self.failUnlessEqual(size, len("contents 1")))
455 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
456 d.addCallback(lambda res: n.download_best_version())
457 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
458 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
459 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
460 d.addCallback(lambda res: n.download_best_version())
461 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
462 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
463 d.addCallback(lambda smap:
464 n.download_version(smap,
465 smap.best_recoverable_version()))
466 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
467 # test a file that is large enough to overcome the
468 # mapupdate-to-retrieve data caching (i.e. make the shares larger
469 # than the default readsize, which is 2000 bytes). A 15kB file
470 # will have 5kB shares.
471 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
472 d.addCallback(lambda res: n.download_best_version())
473 d.addCallback(lambda res:
474 self.failUnlessEqual(res, "large size file" * 1000))
476 d.addCallback(_created)
480 def test_upload_and_download_mdmf(self):
481 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
483 d = defer.succeed(None)
484 d.addCallback(lambda ignored:
485 n.get_servermap(MODE_READ))
486 def _then(servermap):
487 dumped = servermap.dump(StringIO())
488 self.failUnlessIn("3-of-10", dumped.getvalue())
490 # Now overwrite the contents with some new contents. We want
491 # to make them big enough to force the file to be uploaded
492 # in more than one segment.
493 big_contents = "contents1" * 100000 # about 900 KiB
494 big_contents_uploadable = MutableData(big_contents)
495 d.addCallback(lambda ignored:
496 n.overwrite(big_contents_uploadable))
497 d.addCallback(lambda ignored:
498 n.download_best_version())
499 d.addCallback(lambda data:
500 self.failUnlessEqual(data, big_contents))
501 # Overwrite the contents again with some new contents. As
502 # before, they need to be big enough to force multiple
503 # segments, so that we make the downloader deal with
505 bigger_contents = "contents2" * 1000000 # about 9MiB
506 bigger_contents_uploadable = MutableData(bigger_contents)
507 d.addCallback(lambda ignored:
508 n.overwrite(bigger_contents_uploadable))
509 d.addCallback(lambda ignored:
510 n.download_best_version())
511 d.addCallback(lambda data:
512 self.failUnlessEqual(data, bigger_contents))
514 d.addCallback(_created)
518 def test_retrieve_producer_mdmf(self):
519 # We should make sure that the retriever is able to pause and stop
521 data = "contents1" * 100000
522 d = self.nodemaker.create_mutable_file(MutableData(data),
523 version=MDMF_VERSION)
524 d.addCallback(lambda node: node.get_best_mutable_version())
525 d.addCallback(self._test_retrieve_producer, "MDMF", data)
528 # note: SDMF has only one big segment, so we can't use the usual
529 # after-the-first-write() trick to pause or stop the download.
530 # Disabled until we find a better approach.
531 def OFF_test_retrieve_producer_sdmf(self):
532 data = "contents1" * 100000
533 d = self.nodemaker.create_mutable_file(MutableData(data),
534 version=SDMF_VERSION)
535 d.addCallback(lambda node: node.get_best_mutable_version())
536 d.addCallback(self._test_retrieve_producer, "SDMF", data)
539 def _test_retrieve_producer(self, version, kind, data):
540 # Now we'll retrieve it into a pausing consumer.
541 c = PausingConsumer()
543 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
545 c2 = PausingAndStoppingConsumer()
546 d.addCallback(lambda ign:
547 self.shouldFail(DownloadStopped, kind+"_pause_stop",
548 "our Consumer called stopProducing()",
551 c3 = StoppingConsumer()
552 d.addCallback(lambda ign:
553 self.shouldFail(DownloadStopped, kind+"_stop",
554 "our Consumer called stopProducing()",
557 c4 = ImmediatelyStoppingConsumer()
558 d.addCallback(lambda ign:
559 self.shouldFail(DownloadStopped, kind+"_stop_imm",
560 "our Consumer called stopProducing()",
564 c5 = MemoryConsumer()
565 d1 = version.read(c5)
566 c5.producer.stopProducing()
567 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
568 "our Consumer called stopProducing()",
573 def test_download_from_mdmf_cap(self):
574 # We should be able to download an MDMF file given its cap
575 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
577 self.uri = node.get_uri()
578 # also confirm that the cap has no extension fields
579 pieces = self.uri.split(":")
580 self.failUnlessEqual(len(pieces), 4)
582 return node.overwrite(MutableData("contents1" * 100000))
584 node = self.nodemaker.create_from_cap(self.uri)
585 return node.download_best_version()
586 def _downloaded(data):
587 self.failUnlessEqual(data, "contents1" * 100000)
588 d.addCallback(_created)
590 d.addCallback(_downloaded)
594 def test_mdmf_write_count(self):
595 # Publishing an MDMF file should only cause one write for each
596 # share that is to be published. Otherwise, we introduce
597 # undesirable semantics that are a regression from SDMF
598 upload = MutableData("MDMF" * 100000) # about 400 KiB
599 d = self.nodemaker.create_mutable_file(upload,
600 version=MDMF_VERSION)
601 def _check_server_write_counts(ignored):
602 sb = self.nodemaker.storage_broker
603 for server in sb.servers.itervalues():
604 self.failUnlessEqual(server.get_rref().queries, 1)
605 d.addCallback(_check_server_write_counts)
609 def test_create_with_initial_contents(self):
610 upload1 = MutableData("contents 1")
611 d = self.nodemaker.create_mutable_file(upload1)
613 d = n.download_best_version()
614 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
615 upload2 = MutableData("contents 2")
616 d.addCallback(lambda res: n.overwrite(upload2))
617 d.addCallback(lambda res: n.download_best_version())
618 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
620 d.addCallback(_created)
624 def test_create_mdmf_with_initial_contents(self):
625 initial_contents = "foobarbaz" * 131072 # 900KiB
626 initial_contents_uploadable = MutableData(initial_contents)
627 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
628 version=MDMF_VERSION)
630 d = n.download_best_version()
631 d.addCallback(lambda data:
632 self.failUnlessEqual(data, initial_contents))
633 uploadable2 = MutableData(initial_contents + "foobarbaz")
634 d.addCallback(lambda ignored:
635 n.overwrite(uploadable2))
636 d.addCallback(lambda ignored:
637 n.download_best_version())
638 d.addCallback(lambda data:
639 self.failUnlessEqual(data, initial_contents +
642 d.addCallback(_created)
645 def test_create_with_initial_contents_function(self):
646 data = "initial contents"
647 def _make_contents(n):
648 self.failUnless(isinstance(n, MutableFileNode))
649 key = n.get_writekey()
650 self.failUnless(isinstance(key, str), key)
651 self.failUnlessEqual(len(key), 16) # AES key size
652 return MutableData(data)
653 d = self.nodemaker.create_mutable_file(_make_contents)
655 return n.download_best_version()
656 d.addCallback(_created)
657 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
661 def test_create_mdmf_with_initial_contents_function(self):
662 data = "initial contents" * 100000
663 def _make_contents(n):
664 self.failUnless(isinstance(n, MutableFileNode))
665 key = n.get_writekey()
666 self.failUnless(isinstance(key, str), key)
667 self.failUnlessEqual(len(key), 16)
668 return MutableData(data)
669 d = self.nodemaker.create_mutable_file(_make_contents,
670 version=MDMF_VERSION)
671 d.addCallback(lambda n:
672 n.download_best_version())
673 d.addCallback(lambda data2:
674 self.failUnlessEqual(data2, data))
678 def test_create_with_too_large_contents(self):
679 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
680 BIG_uploadable = MutableData(BIG)
681 d = self.nodemaker.create_mutable_file(BIG_uploadable)
683 other_BIG_uploadable = MutableData(BIG)
684 d = n.overwrite(other_BIG_uploadable)
686 d.addCallback(_created)
689 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
690 d = n.get_servermap(MODE_READ)
691 d.addCallback(lambda servermap: servermap.best_recoverable_version())
692 d.addCallback(lambda verinfo:
693 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
696 def test_modify(self):
697 def _modifier(old_contents, servermap, first_time):
698 new_contents = old_contents + "line2"
700 def _non_modifier(old_contents, servermap, first_time):
702 def _none_modifier(old_contents, servermap, first_time):
704 def _error_modifier(old_contents, servermap, first_time):
705 raise ValueError("oops")
706 def _toobig_modifier(old_contents, servermap, first_time):
707 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
710 def _ucw_error_modifier(old_contents, servermap, first_time):
711 # simulate an UncoordinatedWriteError once
714 raise UncoordinatedWriteError("simulated")
715 new_contents = old_contents + "line3"
717 def _ucw_error_non_modifier(old_contents, servermap, first_time):
718 # simulate an UncoordinatedWriteError once, and don't actually
719 # modify the contents on subsequent invocations
722 raise UncoordinatedWriteError("simulated")
725 initial_contents = "line1"
726 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
728 d = n.modify(_modifier)
729 d.addCallback(lambda res: n.download_best_version())
730 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
731 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
733 d.addCallback(lambda res: n.modify(_non_modifier))
734 d.addCallback(lambda res: n.download_best_version())
735 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
736 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
738 d.addCallback(lambda res: n.modify(_none_modifier))
739 d.addCallback(lambda res: n.download_best_version())
740 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
741 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
743 d.addCallback(lambda res:
744 self.shouldFail(ValueError, "error_modifier", None,
745 n.modify, _error_modifier))
746 d.addCallback(lambda res: n.download_best_version())
747 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
748 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
751 d.addCallback(lambda res: n.download_best_version())
752 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
753 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
755 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
756 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
757 d.addCallback(lambda res: n.download_best_version())
758 d.addCallback(lambda res: self.failUnlessEqual(res,
760 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
762 def _reset_ucw_error_modifier(res):
765 d.addCallback(_reset_ucw_error_modifier)
767 # in practice, this n.modify call should publish twice: the first
768 # one gets a UCWE, the second does not. But our test jig (in
769 # which the modifier raises the UCWE) skips over the first one,
770 # so in this test there will be only one publish, and the seqnum
771 # will only be one larger than the previous test, not two (i.e. 4
773 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
774 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
775 d.addCallback(lambda res: n.download_best_version())
776 d.addCallback(lambda res: self.failUnlessEqual(res,
778 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
779 d.addCallback(lambda res: n.modify(_toobig_modifier))
781 d.addCallback(_created)
785 def test_modify_backoffer(self):
786 def _modifier(old_contents, servermap, first_time):
787 return old_contents + "line2"
789 def _ucw_error_modifier(old_contents, servermap, first_time):
790 # simulate an UncoordinatedWriteError once
793 raise UncoordinatedWriteError("simulated")
794 return old_contents + "line3"
795 def _always_ucw_error_modifier(old_contents, servermap, first_time):
796 raise UncoordinatedWriteError("simulated")
797 def _backoff_stopper(node, f):
799 def _backoff_pauser(node, f):
801 reactor.callLater(0.5, d.callback, None)
804 # the give-up-er will hit its maximum retry count quickly
805 giveuper = BackoffAgent()
806 giveuper._delay = 0.1
809 d = self.nodemaker.create_mutable_file(MutableData("line1"))
811 d = n.modify(_modifier)
812 d.addCallback(lambda res: n.download_best_version())
813 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
814 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
816 d.addCallback(lambda res:
817 self.shouldFail(UncoordinatedWriteError,
818 "_backoff_stopper", None,
819 n.modify, _ucw_error_modifier,
821 d.addCallback(lambda res: n.download_best_version())
822 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
823 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
825 def _reset_ucw_error_modifier(res):
828 d.addCallback(_reset_ucw_error_modifier)
829 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
831 d.addCallback(lambda res: n.download_best_version())
832 d.addCallback(lambda res: self.failUnlessEqual(res,
834 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
836 d.addCallback(lambda res:
837 self.shouldFail(UncoordinatedWriteError,
839 n.modify, _always_ucw_error_modifier,
841 d.addCallback(lambda res: n.download_best_version())
842 d.addCallback(lambda res: self.failUnlessEqual(res,
844 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
847 d.addCallback(_created)
850 def test_upload_and_download_full_size_keys(self):
851 self.nodemaker.key_generator = client.KeyGenerator()
852 d = self.nodemaker.create_mutable_file()
854 d = defer.succeed(None)
855 d.addCallback(lambda res: n.get_servermap(MODE_READ))
856 d.addCallback(lambda smap: smap.dump(StringIO()))
857 d.addCallback(lambda sio:
858 self.failUnless("3-of-10" in sio.getvalue()))
859 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
860 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
861 d.addCallback(lambda res: n.download_best_version())
862 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
863 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
864 d.addCallback(lambda res: n.download_best_version())
865 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
866 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
867 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
868 d.addCallback(lambda res: n.download_best_version())
869 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
870 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
871 d.addCallback(lambda smap:
872 n.download_version(smap,
873 smap.best_recoverable_version()))
874 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
876 d.addCallback(_created)
880 def test_size_after_servermap_update(self):
881 # a mutable file node should have something to say about how big
882 # it is after a servermap update is performed, since this tells
883 # us how large the best version of that mutable file is.
884 d = self.nodemaker.create_mutable_file()
887 return n.get_servermap(MODE_READ)
888 d.addCallback(_created)
889 d.addCallback(lambda ignored:
890 self.failUnlessEqual(self.n.get_size(), 0))
891 d.addCallback(lambda ignored:
892 self.n.overwrite(MutableData("foobarbaz")))
893 d.addCallback(lambda ignored:
894 self.failUnlessEqual(self.n.get_size(), 9))
895 d.addCallback(lambda ignored:
896 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
897 d.addCallback(_created)
898 d.addCallback(lambda ignored:
899 self.failUnlessEqual(self.n.get_size(), 9))
904 def publish_one(self):
905 # publish a file and create shares, which can then be manipulated
907 self.CONTENTS = "New contents go here" * 1000
908 self.uploadable = MutableData(self.CONTENTS)
909 self._storage = FakeStorage()
910 self._nodemaker = make_nodemaker(self._storage)
911 self._storage_broker = self._nodemaker.storage_broker
912 d = self._nodemaker.create_mutable_file(self.uploadable)
915 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
916 d.addCallback(_created)
919 def publish_mdmf(self, data=None):
920 # like publish_one, except that the result is guaranteed to be
922 # self.CONTENTS should have more than one segment.
924 data = "This is an MDMF file" * 100000
926 self.uploadable = MutableData(self.CONTENTS)
927 self._storage = FakeStorage()
928 self._nodemaker = make_nodemaker(self._storage)
929 self._storage_broker = self._nodemaker.storage_broker
930 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
933 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
934 d.addCallback(_created)
938 def publish_sdmf(self, data=None):
939 # like publish_one, except that the result is guaranteed to be
942 data = "This is an SDMF file" * 1000
944 self.uploadable = MutableData(self.CONTENTS)
945 self._storage = FakeStorage()
946 self._nodemaker = make_nodemaker(self._storage)
947 self._storage_broker = self._nodemaker.storage_broker
948 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
951 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
952 d.addCallback(_created)
956 def publish_multiple(self, version=0):
957 self.CONTENTS = ["Contents 0",
962 self.uploadables = [MutableData(d) for d in self.CONTENTS]
963 self._copied_shares = {}
964 self._storage = FakeStorage()
965 self._nodemaker = make_nodemaker(self._storage)
966 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
969 # now create multiple versions of the same file, and accumulate
970 # their shares, so we can mix and match them later.
971 d = defer.succeed(None)
972 d.addCallback(self._copy_shares, 0)
973 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
974 d.addCallback(self._copy_shares, 1)
975 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
976 d.addCallback(self._copy_shares, 2)
977 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
978 d.addCallback(self._copy_shares, 3)
979 # now we replace all the shares with version s3, and upload a new
980 # version to get s4b.
981 rollback = dict([(i,2) for i in range(10)])
982 d.addCallback(lambda res: self._set_versions(rollback))
983 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
984 d.addCallback(self._copy_shares, 4)
985 # we leave the storage in state 4
987 d.addCallback(_created)
991 def _copy_shares(self, ignored, index):
992 shares = self._storage._peers
993 # we need a deep copy
995 for peerid in shares:
996 new_shares[peerid] = {}
997 for shnum in shares[peerid]:
998 new_shares[peerid][shnum] = shares[peerid][shnum]
999 self._copied_shares[index] = new_shares
1001 def _set_versions(self, versionmap):
1002 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1003 # share to be at. Any shnum which is left out of the map will stay at
1004 # its current version.
1005 shares = self._storage._peers
1006 oldshares = self._copied_shares
1007 for peerid in shares:
1008 for shnum in shares[peerid]:
1009 if shnum in versionmap:
1010 index = versionmap[shnum]
1011 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1013 class Servermap(unittest.TestCase, PublishMixin):
1015 return self.publish_one()
1017 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1022 sb = self._storage_broker
1023 smu = ServermapUpdater(fn, sb, Monitor(),
1024 ServerMap(), mode, update_range=update_range)
1028 def update_servermap(self, oldmap, mode=MODE_CHECK):
1029 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1034 def failUnlessOneRecoverable(self, sm, num_shares):
1035 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1036 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1037 best = sm.best_recoverable_version()
1038 self.failIfEqual(best, None)
1039 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1040 self.failUnlessEqual(len(sm.shares_available()), 1)
1041 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1042 shnum, servers = sm.make_sharemap().items()[0]
1043 server = list(servers)[0]
1044 self.failUnlessEqual(sm.version_on_server(server, shnum), best)
1045 self.failUnlessEqual(sm.version_on_server(server, 666), None)
1048 def test_basic(self):
1049 d = defer.succeed(None)
1050 ms = self.make_servermap
1051 us = self.update_servermap
1053 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1054 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1055 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1056 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1057 d.addCallback(lambda res: ms(mode=MODE_READ))
1058 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1059 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1060 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1061 # this mode stops at 'k' shares
1062 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1064 # and can we re-use the same servermap? Note that these are sorted in
1065 # increasing order of number of servers queried, since once a server
1066 # gets into the servermap, we'll always ask it for an update.
1067 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1068 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1069 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1070 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1071 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1072 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1073 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1074 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1075 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1079 def test_fetch_privkey(self):
1080 d = defer.succeed(None)
1081 # use the sibling filenode (which hasn't been used yet), and make
1082 # sure it can fetch the privkey. The file is small, so the privkey
1083 # will be fetched on the first (query) pass.
1084 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1085 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1087 # create a new file, which is large enough to knock the privkey out
1088 # of the early part of the file
1089 LARGE = "These are Larger contents" * 200 # about 5KB
1090 LARGE_uploadable = MutableData(LARGE)
1091 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1092 def _created(large_fn):
1093 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1094 return self.make_servermap(MODE_WRITE, large_fn2)
1095 d.addCallback(_created)
1096 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1100 def test_mark_bad(self):
1101 d = defer.succeed(None)
1102 ms = self.make_servermap
1104 d.addCallback(lambda res: ms(mode=MODE_READ))
1105 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1107 v = sm.best_recoverable_version()
1108 vm = sm.make_versionmap()
1109 shares = list(vm[v])
1110 self.failUnlessEqual(len(shares), 6)
1111 self._corrupted = set()
1112 # mark the first 5 shares as corrupt, then update the servermap.
1113 # The map should not have the marked shares it in any more, and
1114 # new shares should be found to replace the missing ones.
1115 for (shnum, server, timestamp) in shares:
1117 self._corrupted.add( (server, shnum) )
1118 sm.mark_bad_share(server, shnum, "")
1119 return self.update_servermap(sm, MODE_WRITE)
1120 d.addCallback(_made_map)
1122 # this should find all 5 shares that weren't marked bad
1123 v = sm.best_recoverable_version()
1124 vm = sm.make_versionmap()
1125 shares = list(vm[v])
1126 for (server, shnum) in self._corrupted:
1127 server_shares = sm.debug_shares_on_server(server)
1128 self.failIf(shnum in server_shares,
1129 "%d was in %s" % (shnum, server_shares))
1130 self.failUnlessEqual(len(shares), 5)
1131 d.addCallback(_check_map)
1134 def failUnlessNoneRecoverable(self, sm):
1135 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1136 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1137 best = sm.best_recoverable_version()
1138 self.failUnlessEqual(best, None)
1139 self.failUnlessEqual(len(sm.shares_available()), 0)
1141 def test_no_shares(self):
1142 self._storage._peers = {} # delete all shares
1143 ms = self.make_servermap
1144 d = defer.succeed(None)
1146 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1147 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1149 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1150 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1152 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1153 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1155 d.addCallback(lambda res: ms(mode=MODE_READ))
1156 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1160 def failUnlessNotQuiteEnough(self, sm):
1161 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1162 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1163 best = sm.best_recoverable_version()
1164 self.failUnlessEqual(best, None)
1165 self.failUnlessEqual(len(sm.shares_available()), 1)
1166 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1169 def test_not_quite_enough_shares(self):
1171 ms = self.make_servermap
1172 num_shares = len(s._peers)
1173 for peerid in s._peers:
1174 s._peers[peerid] = {}
1178 # now there ought to be only two shares left
1179 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1181 d = defer.succeed(None)
1183 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1184 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1185 d.addCallback(lambda sm:
1186 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1187 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1188 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1189 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1190 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1191 d.addCallback(lambda res: ms(mode=MODE_READ))
1192 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1197 def test_servermapupdater_finds_mdmf_files(self):
1198 # setUp already published an MDMF file for us. We just need to
1199 # make sure that when we run the ServermapUpdater, the file is
1200 # reported to have one recoverable version.
1201 d = defer.succeed(None)
1202 d.addCallback(lambda ignored:
1203 self.publish_mdmf())
1204 d.addCallback(lambda ignored:
1205 self.make_servermap(mode=MODE_CHECK))
1206 # Calling make_servermap also updates the servermap in the mode
1207 # that we specify, so we just need to see what it says.
1208 def _check_servermap(sm):
1209 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1210 d.addCallback(_check_servermap)
1214 def test_fetch_update(self):
1215 d = defer.succeed(None)
1216 d.addCallback(lambda ignored:
1217 self.publish_mdmf())
1218 d.addCallback(lambda ignored:
1219 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1220 def _check_servermap(sm):
1222 self.failUnlessEqual(len(sm.update_data), 10)
1224 for data in sm.update_data.itervalues():
1225 self.failUnlessEqual(len(data), 1)
1226 d.addCallback(_check_servermap)
1230 def test_servermapupdater_finds_sdmf_files(self):
1231 d = defer.succeed(None)
1232 d.addCallback(lambda ignored:
1233 self.publish_sdmf())
1234 d.addCallback(lambda ignored:
1235 self.make_servermap(mode=MODE_CHECK))
1236 d.addCallback(lambda servermap:
1237 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1241 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1243 return self.publish_one()
1245 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1247 oldmap = ServerMap()
1249 sb = self._storage_broker
1250 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1254 def abbrev_verinfo(self, verinfo):
1257 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1258 offsets_tuple) = verinfo
1259 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1261 def abbrev_verinfo_dict(self, verinfo_d):
1263 for verinfo,value in verinfo_d.items():
1264 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1265 offsets_tuple) = verinfo
1266 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1269 def dump_servermap(self, servermap):
1270 print "SERVERMAP", servermap
1271 print "RECOVERABLE", [self.abbrev_verinfo(v)
1272 for v in servermap.recoverable_versions()]
1273 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1274 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1276 def do_download(self, servermap, version=None):
1278 version = servermap.best_recoverable_version()
1279 r = Retrieve(self._fn, self._storage_broker, servermap, version)
1280 c = consumer.MemoryConsumer()
1281 d = r.download(consumer=c)
1282 d.addCallback(lambda mc: "".join(mc.chunks))
1286 def test_basic(self):
1287 d = self.make_servermap()
1288 def _do_retrieve(servermap):
1289 self._smap = servermap
1290 #self.dump_servermap(servermap)
1291 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1292 return self.do_download(servermap)
1293 d.addCallback(_do_retrieve)
1294 def _retrieved(new_contents):
1295 self.failUnlessEqual(new_contents, self.CONTENTS)
1296 d.addCallback(_retrieved)
1297 # we should be able to re-use the same servermap, both with and
1298 # without updating it.
1299 d.addCallback(lambda res: self.do_download(self._smap))
1300 d.addCallback(_retrieved)
1301 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1302 d.addCallback(lambda res: self.do_download(self._smap))
1303 d.addCallback(_retrieved)
1304 # clobbering the pubkey should make the servermap updater re-fetch it
1305 def _clobber_pubkey(res):
1306 self._fn._pubkey = None
1307 d.addCallback(_clobber_pubkey)
1308 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1309 d.addCallback(lambda res: self.do_download(self._smap))
1310 d.addCallback(_retrieved)
1313 def test_all_shares_vanished(self):
1314 d = self.make_servermap()
1315 def _remove_shares(servermap):
1316 for shares in self._storage._peers.values():
1318 d1 = self.shouldFail(NotEnoughSharesError,
1319 "test_all_shares_vanished",
1320 "ran out of servers",
1321 self.do_download, servermap)
1323 d.addCallback(_remove_shares)
1326 def test_all_but_two_shares_vanished_updated_servermap(self):
1327 # tests error reporting for ticket #1742
1328 d = self.make_servermap()
1329 def _remove_shares(servermap):
1330 self._version = servermap.best_recoverable_version()
1331 for shares in self._storage._peers.values()[2:]:
1333 return self.make_servermap(servermap)
1334 d.addCallback(_remove_shares)
1335 def _check(updated_servermap):
1336 d1 = self.shouldFail(NotEnoughSharesError,
1337 "test_all_but_two_shares_vanished_updated_servermap",
1338 "ran out of servers",
1339 self.do_download, updated_servermap, version=self._version)
1341 d.addCallback(_check)
1344 def test_no_servers(self):
1345 sb2 = make_storagebroker(num_peers=0)
1346 # if there are no servers, then a MODE_READ servermap should come
1348 d = self.make_servermap(sb=sb2)
1349 def _check_servermap(servermap):
1350 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1351 self.failIf(servermap.recoverable_versions())
1352 self.failIf(servermap.unrecoverable_versions())
1353 self.failIf(servermap.all_servers())
1354 d.addCallback(_check_servermap)
1357 def test_no_servers_download(self):
1358 sb2 = make_storagebroker(num_peers=0)
1359 self._fn._storage_broker = sb2
1360 d = self.shouldFail(UnrecoverableFileError,
1361 "test_no_servers_download",
1362 "no recoverable versions",
1363 self._fn.download_best_version)
1365 # a failed download that occurs while we aren't connected to
1366 # anybody should not prevent a subsequent download from working.
1367 # This isn't quite the webapi-driven test that #463 wants, but it
1368 # should be close enough.
1369 self._fn._storage_broker = self._storage_broker
1370 return self._fn.download_best_version()
1371 def _retrieved(new_contents):
1372 self.failUnlessEqual(new_contents, self.CONTENTS)
1373 d.addCallback(_restore)
1374 d.addCallback(_retrieved)
1378 def _test_corrupt_all(self, offset, substring,
1379 should_succeed=False,
1381 failure_checker=None,
1382 fetch_privkey=False):
1383 d = defer.succeed(None)
1385 d.addCallback(corrupt, self._storage, offset)
1386 d.addCallback(lambda res: self.make_servermap())
1387 if not corrupt_early:
1388 d.addCallback(corrupt, self._storage, offset)
1389 def _do_retrieve(servermap):
1390 ver = servermap.best_recoverable_version()
1391 if ver is None and not should_succeed:
1392 # no recoverable versions == not succeeding. The problem
1393 # should be noted in the servermap's list of problems.
1395 allproblems = [str(f) for f in servermap.get_problems()]
1396 self.failUnlessIn(substring, "".join(allproblems))
1399 d1 = self._fn.download_version(servermap, ver,
1401 d1.addCallback(lambda new_contents:
1402 self.failUnlessEqual(new_contents, self.CONTENTS))
1404 d1 = self.shouldFail(NotEnoughSharesError,
1405 "_corrupt_all(offset=%s)" % (offset,),
1407 self._fn.download_version, servermap,
1411 d1.addCallback(failure_checker)
1412 d1.addCallback(lambda res: servermap)
1414 d.addCallback(_do_retrieve)
1417 def test_corrupt_all_verbyte(self):
1418 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1419 # error in unpack_share().
1420 d = self._test_corrupt_all(0, "UnknownVersionError")
1421 def _check_servermap(servermap):
1422 # and the dump should mention the problems
1424 dump = servermap.dump(s).getvalue()
1425 self.failUnless("30 PROBLEMS" in dump, dump)
1426 d.addCallback(_check_servermap)
1429 def test_corrupt_all_seqnum(self):
1430 # a corrupt sequence number will trigger a bad signature
1431 return self._test_corrupt_all(1, "signature is invalid")
1433 def test_corrupt_all_R(self):
1434 # a corrupt root hash will trigger a bad signature
1435 return self._test_corrupt_all(9, "signature is invalid")
1437 def test_corrupt_all_IV(self):
1438 # a corrupt salt/IV will trigger a bad signature
1439 return self._test_corrupt_all(41, "signature is invalid")
1441 def test_corrupt_all_k(self):
1442 # a corrupt 'k' will trigger a bad signature
1443 return self._test_corrupt_all(57, "signature is invalid")
1445 def test_corrupt_all_N(self):
1446 # a corrupt 'N' will trigger a bad signature
1447 return self._test_corrupt_all(58, "signature is invalid")
1449 def test_corrupt_all_segsize(self):
1450 # a corrupt segsize will trigger a bad signature
1451 return self._test_corrupt_all(59, "signature is invalid")
1453 def test_corrupt_all_datalen(self):
1454 # a corrupt data length will trigger a bad signature
1455 return self._test_corrupt_all(67, "signature is invalid")
1457 def test_corrupt_all_pubkey(self):
1458 # a corrupt pubkey won't match the URI's fingerprint. We need to
1459 # remove the pubkey from the filenode, or else it won't bother trying
1461 self._fn._pubkey = None
1462 return self._test_corrupt_all("pubkey",
1463 "pubkey doesn't match fingerprint")
1465 def test_corrupt_all_sig(self):
1466 # a corrupt signature is a bad one
1467 # the signature runs from about [543:799], depending upon the length
1469 return self._test_corrupt_all("signature", "signature is invalid")
1471 def test_corrupt_all_share_hash_chain_number(self):
1472 # a corrupt share hash chain entry will show up as a bad hash. If we
1473 # mangle the first byte, that will look like a bad hash number,
1474 # causing an IndexError
1475 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1477 def test_corrupt_all_share_hash_chain_hash(self):
1478 # a corrupt share hash chain entry will show up as a bad hash. If we
1479 # mangle a few bytes in, that will look like a bad hash.
1480 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1482 def test_corrupt_all_block_hash_tree(self):
1483 return self._test_corrupt_all("block_hash_tree",
1484 "block hash tree failure")
1486 def test_corrupt_all_block(self):
1487 return self._test_corrupt_all("share_data", "block hash tree failure")
1489 def test_corrupt_all_encprivkey(self):
1490 # a corrupted privkey won't even be noticed by the reader, only by a
1492 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1495 def test_corrupt_all_encprivkey_late(self):
1496 # this should work for the same reason as above, but we corrupt
1497 # after the servermap update to exercise the error handling
1499 # We need to remove the privkey from the node, or the retrieve
1500 # process won't know to update it.
1501 self._fn._privkey = None
1502 return self._test_corrupt_all("enc_privkey",
1503 None, # this shouldn't fail
1504 should_succeed=True,
1505 corrupt_early=False,
1509 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1510 # just use a .todo because the failing-but-ignored test emits about 30kB
1512 def OFF_test_corrupt_all_seqnum_late(self):
1513 # corrupting the seqnum between mapupdate and retrieve should result
1514 # in NotEnoughSharesError, since each share will look invalid
1517 self.failUnless(f.check(NotEnoughSharesError))
1518 self.failUnless("uncoordinated write" in str(f))
1519 return self._test_corrupt_all(1, "ran out of servers",
1520 corrupt_early=False,
1521 failure_checker=_check)
1524 def test_corrupt_all_block_late(self):
1527 self.failUnless(f.check(NotEnoughSharesError))
1528 return self._test_corrupt_all("share_data", "block hash tree failure",
1529 corrupt_early=False,
1530 failure_checker=_check)
1533 def test_basic_pubkey_at_end(self):
1534 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1535 # download to succeed but forcing a bunch of retries first. Note that
1536 # this is rather pessimistic: our Retrieve process will throw away
1537 # the whole share if the pubkey is bad, even though the rest of the
1538 # share might be good.
1540 self._fn._pubkey = None
1541 k = self._fn.get_required_shares()
1542 N = self._fn.get_total_shares()
1543 d = defer.succeed(None)
1544 d.addCallback(corrupt, self._storage, "pubkey",
1545 shnums_to_corrupt=range(0, N-k))
1546 d.addCallback(lambda res: self.make_servermap())
1547 def _do_retrieve(servermap):
1548 self.failUnless(servermap.get_problems())
1549 self.failUnless("pubkey doesn't match fingerprint"
1550 in str(servermap.get_problems()[0]))
1551 ver = servermap.best_recoverable_version()
1552 r = Retrieve(self._fn, self._storage_broker, servermap, ver)
1553 c = consumer.MemoryConsumer()
1554 return r.download(c)
1555 d.addCallback(_do_retrieve)
1556 d.addCallback(lambda mc: "".join(mc.chunks))
1557 d.addCallback(lambda new_contents:
1558 self.failUnlessEqual(new_contents, self.CONTENTS))
1562 def _test_corrupt_some(self, offset, mdmf=False):
1564 d = self.publish_mdmf()
1566 d = defer.succeed(None)
1567 d.addCallback(lambda ignored:
1568 corrupt(None, self._storage, offset, range(5)))
1569 d.addCallback(lambda ignored:
1570 self.make_servermap())
1571 def _do_retrieve(servermap):
1572 ver = servermap.best_recoverable_version()
1573 self.failUnless(ver)
1574 return self._fn.download_best_version()
1575 d.addCallback(_do_retrieve)
1576 d.addCallback(lambda new_contents:
1577 self.failUnlessEqual(new_contents, self.CONTENTS))
1581 def test_corrupt_some(self):
1582 # corrupt the data of first five shares (so the servermap thinks
1583 # they're good but retrieve marks them as bad), so that the
1584 # MODE_READ set of 6 will be insufficient, forcing node.download to
1585 # retry with more servers.
1586 return self._test_corrupt_some("share_data")
1589 def test_download_fails(self):
1590 d = corrupt(None, self._storage, "signature")
1591 d.addCallback(lambda ignored:
1592 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1593 "no recoverable versions",
1594 self._fn.download_best_version))
1599 def test_corrupt_mdmf_block_hash_tree(self):
1600 d = self.publish_mdmf()
1601 d.addCallback(lambda ignored:
1602 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1603 "block hash tree failure",
1605 should_succeed=False))
1609 def test_corrupt_mdmf_block_hash_tree_late(self):
1610 # Note - there is no SDMF counterpart to this test, as the SDMF
1611 # files are guaranteed to have exactly one block, and therefore
1612 # the block hash tree fits within the initial read (#1240).
1613 d = self.publish_mdmf()
1614 d.addCallback(lambda ignored:
1615 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1616 "block hash tree failure",
1617 corrupt_early=False,
1618 should_succeed=False))
1622 def test_corrupt_mdmf_share_data(self):
1623 d = self.publish_mdmf()
1624 d.addCallback(lambda ignored:
1625 # TODO: Find out what the block size is and corrupt a
1626 # specific block, rather than just guessing.
1627 self._test_corrupt_all(("share_data", 12 * 40),
1628 "block hash tree failure",
1630 should_succeed=False))
1634 def test_corrupt_some_mdmf(self):
1635 return self._test_corrupt_some(("share_data", 12 * 40),
1640 def check_good(self, r, where):
1641 self.failUnless(r.is_healthy(), where)
1644 def check_bad(self, r, where):
1645 self.failIf(r.is_healthy(), where)
1648 def check_expected_failure(self, r, expected_exception, substring, where):
1649 for (peerid, storage_index, shnum, f) in r.get_share_problems():
1650 if f.check(expected_exception):
1651 self.failUnless(substring in str(f),
1652 "%s: substring '%s' not in '%s'" %
1653 (where, substring, str(f)))
1655 self.fail("%s: didn't see expected exception %s in problems %s" %
1656 (where, expected_exception, r.get_share_problems()))
1659 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1661 return self.publish_one()
1664 def test_check_good(self):
1665 d = self._fn.check(Monitor())
1666 d.addCallback(self.check_good, "test_check_good")
1669 def test_check_mdmf_good(self):
1670 d = self.publish_mdmf()
1671 d.addCallback(lambda ignored:
1672 self._fn.check(Monitor()))
1673 d.addCallback(self.check_good, "test_check_mdmf_good")
1676 def test_check_no_shares(self):
1677 for shares in self._storage._peers.values():
1679 d = self._fn.check(Monitor())
1680 d.addCallback(self.check_bad, "test_check_no_shares")
1683 def test_check_mdmf_no_shares(self):
1684 d = self.publish_mdmf()
1686 for share in self._storage._peers.values():
1688 d.addCallback(_then)
1689 d.addCallback(lambda ignored:
1690 self._fn.check(Monitor()))
1691 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1694 def test_check_not_enough_shares(self):
1695 for shares in self._storage._peers.values():
1696 for shnum in shares.keys():
1699 d = self._fn.check(Monitor())
1700 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1703 def test_check_mdmf_not_enough_shares(self):
1704 d = self.publish_mdmf()
1706 for shares in self._storage._peers.values():
1707 for shnum in shares.keys():
1710 d.addCallback(_then)
1711 d.addCallback(lambda ignored:
1712 self._fn.check(Monitor()))
1713 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1717 def test_check_all_bad_sig(self):
1718 d = corrupt(None, self._storage, 1) # bad sig
1719 d.addCallback(lambda ignored:
1720 self._fn.check(Monitor()))
1721 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1724 def test_check_mdmf_all_bad_sig(self):
1725 d = self.publish_mdmf()
1726 d.addCallback(lambda ignored:
1727 corrupt(None, self._storage, 1))
1728 d.addCallback(lambda ignored:
1729 self._fn.check(Monitor()))
1730 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1733 def test_verify_mdmf_all_bad_sharedata(self):
1734 d = self.publish_mdmf()
1735 # On 8 of the shares, corrupt the beginning of the share data.
1736 # The signature check during the servermap update won't catch this.
1737 d.addCallback(lambda ignored:
1738 corrupt(None, self._storage, "share_data", range(8)))
1739 # On 2 of the shares, corrupt the end of the share data.
1740 # The signature check during the servermap update won't catch
1741 # this either, and the retrieval process will have to process
1742 # all of the segments before it notices.
1743 d.addCallback(lambda ignored:
1744 # the block hash tree comes right after the share data, so if we
1745 # corrupt a little before the block hash tree, we'll corrupt in the
1746 # last block of each share.
1747 corrupt(None, self._storage, "block_hash_tree", [8, 9], -5))
1748 d.addCallback(lambda ignored:
1749 self._fn.check(Monitor(), verify=True))
1750 # The verifier should flag the file as unhealthy, and should
1751 # list all 10 shares as bad.
1752 d.addCallback(self.check_bad, "test_verify_mdmf_all_bad_sharedata")
1753 def _check_num_bad(r):
1754 self.failIf(r.is_recoverable())
1755 smap = r.get_servermap()
1756 self.failUnlessEqual(len(smap.get_bad_shares()), 10)
1757 d.addCallback(_check_num_bad)
1760 def test_check_all_bad_blocks(self):
1761 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1762 # the Checker won't notice this.. it doesn't look at actual data
1763 d.addCallback(lambda ignored:
1764 self._fn.check(Monitor()))
1765 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1769 def test_check_mdmf_all_bad_blocks(self):
1770 d = self.publish_mdmf()
1771 d.addCallback(lambda ignored:
1772 corrupt(None, self._storage, "share_data"))
1773 d.addCallback(lambda ignored:
1774 self._fn.check(Monitor()))
1775 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1778 def test_verify_good(self):
1779 d = self._fn.check(Monitor(), verify=True)
1780 d.addCallback(self.check_good, "test_verify_good")
1783 def test_verify_all_bad_sig(self):
1784 d = corrupt(None, self._storage, 1) # bad sig
1785 d.addCallback(lambda ignored:
1786 self._fn.check(Monitor(), verify=True))
1787 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1790 def test_verify_one_bad_sig(self):
1791 d = corrupt(None, self._storage, 1, [9]) # bad sig
1792 d.addCallback(lambda ignored:
1793 self._fn.check(Monitor(), verify=True))
1794 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1797 def test_verify_one_bad_block(self):
1798 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1799 # the Verifier *will* notice this, since it examines every byte
1800 d.addCallback(lambda ignored:
1801 self._fn.check(Monitor(), verify=True))
1802 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1803 d.addCallback(self.check_expected_failure,
1804 CorruptShareError, "block hash tree failure",
1805 "test_verify_one_bad_block")
1808 def test_verify_one_bad_sharehash(self):
1809 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1810 d.addCallback(lambda ignored:
1811 self._fn.check(Monitor(), verify=True))
1812 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1813 d.addCallback(self.check_expected_failure,
1814 CorruptShareError, "corrupt hashes",
1815 "test_verify_one_bad_sharehash")
1818 def test_verify_one_bad_encprivkey(self):
1819 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1820 d.addCallback(lambda ignored:
1821 self._fn.check(Monitor(), verify=True))
1822 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1823 d.addCallback(self.check_expected_failure,
1824 CorruptShareError, "invalid privkey",
1825 "test_verify_one_bad_encprivkey")
1828 def test_verify_one_bad_encprivkey_uncheckable(self):
1829 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1830 readonly_fn = self._fn.get_readonly()
1831 # a read-only node has no way to validate the privkey
1832 d.addCallback(lambda ignored:
1833 readonly_fn.check(Monitor(), verify=True))
1834 d.addCallback(self.check_good,
1835 "test_verify_one_bad_encprivkey_uncheckable")
1839 def test_verify_mdmf_good(self):
1840 d = self.publish_mdmf()
1841 d.addCallback(lambda ignored:
1842 self._fn.check(Monitor(), verify=True))
1843 d.addCallback(self.check_good, "test_verify_mdmf_good")
1847 def test_verify_mdmf_one_bad_block(self):
1848 d = self.publish_mdmf()
1849 d.addCallback(lambda ignored:
1850 corrupt(None, self._storage, "share_data", [1]))
1851 d.addCallback(lambda ignored:
1852 self._fn.check(Monitor(), verify=True))
1853 # We should find one bad block here
1854 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1855 d.addCallback(self.check_expected_failure,
1856 CorruptShareError, "block hash tree failure",
1857 "test_verify_mdmf_one_bad_block")
1861 def test_verify_mdmf_bad_encprivkey(self):
1862 d = self.publish_mdmf()
1863 d.addCallback(lambda ignored:
1864 corrupt(None, self._storage, "enc_privkey", [0]))
1865 d.addCallback(lambda ignored:
1866 self._fn.check(Monitor(), verify=True))
1867 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1868 d.addCallback(self.check_expected_failure,
1869 CorruptShareError, "privkey",
1870 "test_verify_mdmf_bad_encprivkey")
1874 def test_verify_mdmf_bad_sig(self):
1875 d = self.publish_mdmf()
1876 d.addCallback(lambda ignored:
1877 corrupt(None, self._storage, 1, [1]))
1878 d.addCallback(lambda ignored:
1879 self._fn.check(Monitor(), verify=True))
1880 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1884 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1885 d = self.publish_mdmf()
1886 d.addCallback(lambda ignored:
1887 corrupt(None, self._storage, "enc_privkey", [1]))
1888 d.addCallback(lambda ignored:
1889 self._fn.get_readonly())
1890 d.addCallback(lambda fn:
1891 fn.check(Monitor(), verify=True))
1892 d.addCallback(self.check_good,
1893 "test_verify_mdmf_bad_encprivkey_uncheckable")
1896 def test_verify_sdmf_empty(self):
1897 d = self.publish_sdmf("")
1898 d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
1899 d.addCallback(self.check_good, "test_verify_sdmf")
1900 d.addCallback(flushEventualQueue)
1903 def test_verify_mdmf_empty(self):
1904 d = self.publish_mdmf("")
1905 d.addCallback(lambda ignored: self._fn.check(Monitor(), verify=True))
1906 d.addCallback(self.check_good, "test_verify_mdmf")
1907 d.addCallback(flushEventualQueue)
1910 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1912 def get_shares(self, s):
1913 all_shares = {} # maps (peerid, shnum) to share data
1914 for peerid in s._peers:
1915 shares = s._peers[peerid]
1916 for shnum in shares:
1917 data = shares[shnum]
1918 all_shares[ (peerid, shnum) ] = data
1921 def copy_shares(self, ignored=None):
1922 self.old_shares.append(self.get_shares(self._storage))
1924 def test_repair_nop(self):
1925 self.old_shares = []
1926 d = self.publish_one()
1927 d.addCallback(self.copy_shares)
1928 d.addCallback(lambda res: self._fn.check(Monitor()))
1929 d.addCallback(lambda check_results: self._fn.repair(check_results))
1930 def _check_results(rres):
1931 self.failUnless(IRepairResults.providedBy(rres))
1932 self.failUnless(rres.get_successful())
1933 # TODO: examine results
1937 initial_shares = self.old_shares[0]
1938 new_shares = self.old_shares[1]
1939 # TODO: this really shouldn't change anything. When we implement
1940 # a "minimal-bandwidth" repairer", change this test to assert:
1941 #self.failUnlessEqual(new_shares, initial_shares)
1943 # all shares should be in the same place as before
1944 self.failUnlessEqual(set(initial_shares.keys()),
1945 set(new_shares.keys()))
1946 # but they should all be at a newer seqnum. The IV will be
1947 # different, so the roothash will be too.
1948 for key in initial_shares:
1953 k0, N0, segsize0, datalen0,
1954 o0) = unpack_header(initial_shares[key])
1959 k1, N1, segsize1, datalen1,
1960 o1) = unpack_header(new_shares[key])
1961 self.failUnlessEqual(version0, version1)
1962 self.failUnlessEqual(seqnum0+1, seqnum1)
1963 self.failUnlessEqual(k0, k1)
1964 self.failUnlessEqual(N0, N1)
1965 self.failUnlessEqual(segsize0, segsize1)
1966 self.failUnlessEqual(datalen0, datalen1)
1967 d.addCallback(_check_results)
1970 def failIfSharesChanged(self, ignored=None):
1971 old_shares = self.old_shares[-2]
1972 current_shares = self.old_shares[-1]
1973 self.failUnlessEqual(old_shares, current_shares)
1976 def _test_whether_repairable(self, publisher, nshares, expected_result):
1978 def _delete_some_shares(ign):
1979 shares = self._storage._peers
1980 for peerid in shares:
1981 for shnum in list(shares[peerid]):
1982 if shnum >= nshares:
1983 del shares[peerid][shnum]
1984 d.addCallback(_delete_some_shares)
1985 d.addCallback(lambda ign: self._fn.check(Monitor()))
1987 self.failIf(cr.is_healthy())
1988 self.failUnlessEqual(cr.is_recoverable(), expected_result)
1990 d.addCallback(_check)
1991 d.addCallback(lambda check_results: self._fn.repair(check_results))
1992 d.addCallback(lambda crr: self.failUnlessEqual(crr.get_successful(), expected_result))
1995 def test_unrepairable_0shares(self):
1996 return self._test_whether_repairable(self.publish_one, 0, False)
1998 def test_mdmf_unrepairable_0shares(self):
1999 return self._test_whether_repairable(self.publish_mdmf, 0, False)
2001 def test_unrepairable_1share(self):
2002 return self._test_whether_repairable(self.publish_one, 1, False)
2004 def test_mdmf_unrepairable_1share(self):
2005 return self._test_whether_repairable(self.publish_mdmf, 1, False)
2007 def test_repairable_5shares(self):
2008 return self._test_whether_repairable(self.publish_one, 5, True)
2010 def test_mdmf_repairable_5shares(self):
2011 return self._test_whether_repairable(self.publish_mdmf, 5, True)
2013 def _test_whether_checkandrepairable(self, publisher, nshares, expected_result):
2015 Like the _test_whether_repairable tests, but invoking check_and_repair
2016 instead of invoking check and then invoking repair.
2019 def _delete_some_shares(ign):
2020 shares = self._storage._peers
2021 for peerid in shares:
2022 for shnum in list(shares[peerid]):
2023 if shnum >= nshares:
2024 del shares[peerid][shnum]
2025 d.addCallback(_delete_some_shares)
2026 d.addCallback(lambda ign: self._fn.check_and_repair(Monitor()))
2027 d.addCallback(lambda crr: self.failUnlessEqual(crr.get_repair_successful(), expected_result))
2030 def test_unrepairable_0shares_checkandrepair(self):
2031 return self._test_whether_checkandrepairable(self.publish_one, 0, False)
2033 def test_mdmf_unrepairable_0shares_checkandrepair(self):
2034 return self._test_whether_checkandrepairable(self.publish_mdmf, 0, False)
2036 def test_unrepairable_1share_checkandrepair(self):
2037 return self._test_whether_checkandrepairable(self.publish_one, 1, False)
2039 def test_mdmf_unrepairable_1share_checkandrepair(self):
2040 return self._test_whether_checkandrepairable(self.publish_mdmf, 1, False)
2042 def test_repairable_5shares_checkandrepair(self):
2043 return self._test_whether_checkandrepairable(self.publish_one, 5, True)
2045 def test_mdmf_repairable_5shares_checkandrepair(self):
2046 return self._test_whether_checkandrepairable(self.publish_mdmf, 5, True)
2049 def test_merge(self):
2050 self.old_shares = []
2051 d = self.publish_multiple()
2052 # repair will refuse to merge multiple highest seqnums unless you
2054 d.addCallback(lambda res:
2055 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2056 1:4,3:4,5:4,7:4,9:4}))
2057 d.addCallback(self.copy_shares)
2058 d.addCallback(lambda res: self._fn.check(Monitor()))
2059 def _try_repair(check_results):
2060 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2061 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2062 self._fn.repair, check_results)
2063 d2.addCallback(self.copy_shares)
2064 d2.addCallback(self.failIfSharesChanged)
2065 d2.addCallback(lambda res: check_results)
2067 d.addCallback(_try_repair)
2068 d.addCallback(lambda check_results:
2069 self._fn.repair(check_results, force=True))
2070 # this should give us 10 shares of the highest roothash
2071 def _check_repair_results(rres):
2072 self.failUnless(rres.get_successful())
2074 d.addCallback(_check_repair_results)
2075 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2076 def _check_smap(smap):
2077 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2078 self.failIf(smap.unrecoverable_versions())
2079 # now, which should have won?
2080 roothash_s4a = self.get_roothash_for(3)
2081 roothash_s4b = self.get_roothash_for(4)
2082 if roothash_s4b > roothash_s4a:
2083 expected_contents = self.CONTENTS[4]
2085 expected_contents = self.CONTENTS[3]
2086 new_versionid = smap.best_recoverable_version()
2087 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2088 d2 = self._fn.download_version(smap, new_versionid)
2089 d2.addCallback(self.failUnlessEqual, expected_contents)
2091 d.addCallback(_check_smap)
2094 def test_non_merge(self):
2095 self.old_shares = []
2096 d = self.publish_multiple()
2097 # repair should not refuse a repair that doesn't need to merge. In
2098 # this case, we combine v2 with v3. The repair should ignore v2 and
2099 # copy v3 into a new v5.
2100 d.addCallback(lambda res:
2101 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2102 1:3,3:3,5:3,7:3,9:3}))
2103 d.addCallback(lambda res: self._fn.check(Monitor()))
2104 d.addCallback(lambda check_results: self._fn.repair(check_results))
2105 # this should give us 10 shares of v3
2106 def _check_repair_results(rres):
2107 self.failUnless(rres.get_successful())
2109 d.addCallback(_check_repair_results)
2110 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2111 def _check_smap(smap):
2112 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2113 self.failIf(smap.unrecoverable_versions())
2114 # now, which should have won?
2115 expected_contents = self.CONTENTS[3]
2116 new_versionid = smap.best_recoverable_version()
2117 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2118 d2 = self._fn.download_version(smap, new_versionid)
2119 d2.addCallback(self.failUnlessEqual, expected_contents)
2121 d.addCallback(_check_smap)
2124 def get_roothash_for(self, index):
2125 # return the roothash for the first share we see in the saved set
2126 shares = self._copied_shares[index]
2127 for peerid in shares:
2128 for shnum in shares[peerid]:
2129 share = shares[peerid][shnum]
2130 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2131 unpack_header(share)
2134 def test_check_and_repair_readcap(self):
2135 # we can't currently repair from a mutable readcap: #625
2136 self.old_shares = []
2137 d = self.publish_one()
2138 d.addCallback(self.copy_shares)
2139 def _get_readcap(res):
2140 self._fn3 = self._fn.get_readonly()
2141 # also delete some shares
2142 for peerid,shares in self._storage._peers.items():
2144 d.addCallback(_get_readcap)
2145 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2146 def _check_results(crr):
2147 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2148 # we should detect the unhealthy, but skip over mutable-readcap
2149 # repairs until #625 is fixed
2150 self.failIf(crr.get_pre_repair_results().is_healthy())
2151 self.failIf(crr.get_repair_attempted())
2152 self.failIf(crr.get_post_repair_results().is_healthy())
2153 d.addCallback(_check_results)
2156 def test_repair_empty(self):
2157 # bug 1689: delete one share of an empty mutable file, then repair.
2158 # In the buggy version, the check that precedes the retrieve+publish
2159 # cycle uses MODE_READ, instead of MODE_REPAIR, and fails to get the
2160 # privkey that repair needs.
2161 d = self.publish_sdmf("")
2162 def _delete_one_share(ign):
2163 shares = self._storage._peers
2164 for peerid in shares:
2165 for shnum in list(shares[peerid]):
2167 del shares[peerid][shnum]
2168 d.addCallback(_delete_one_share)
2169 d.addCallback(lambda ign: self._fn2.check(Monitor()))
2170 d.addCallback(lambda check_results: self._fn2.repair(check_results))
2172 self.failUnlessEqual(crr.get_successful(), True)
2173 d.addCallback(_check)
2176 class DevNullDictionary(dict):
2177 def __setitem__(self, key, value):
2180 class MultipleEncodings(unittest.TestCase):
2182 self.CONTENTS = "New contents go here"
2183 self.uploadable = MutableData(self.CONTENTS)
2184 self._storage = FakeStorage()
2185 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2186 self._storage_broker = self._nodemaker.storage_broker
2187 d = self._nodemaker.create_mutable_file(self.uploadable)
2190 d.addCallback(_created)
2193 def _encode(self, k, n, data, version=SDMF_VERSION):
2194 # encode 'data' into a peerid->shares dict.
2197 # disable the nodecache, since for these tests we explicitly need
2198 # multiple nodes pointing at the same file
2199 self._nodemaker._node_cache = DevNullDictionary()
2200 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2201 # then we copy over other fields that are normally fetched from the
2203 fn2._pubkey = fn._pubkey
2204 fn2._privkey = fn._privkey
2205 fn2._encprivkey = fn._encprivkey
2206 # and set the encoding parameters to something completely different
2207 fn2._required_shares = k
2208 fn2._total_shares = n
2211 s._peers = {} # clear existing storage
2212 p2 = Publish(fn2, self._storage_broker, None)
2213 uploadable = MutableData(data)
2214 d = p2.publish(uploadable)
2215 def _published(res):
2219 d.addCallback(_published)
2222 def make_servermap(self, mode=MODE_READ, oldmap=None):
2224 oldmap = ServerMap()
2225 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2230 def test_multiple_encodings(self):
2231 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2232 # then mix up the shares, to make sure that download survives seeing
2233 # a variety of encodings. This is actually kind of tricky to set up.
2235 contents1 = "Contents for encoding 1 (3-of-10) go here"*1000
2236 contents2 = "Contents for encoding 2 (4-of-9) go here"*1000
2237 contents3 = "Contents for encoding 3 (4-of-7) go here"*1000
2239 # we make a retrieval object that doesn't know what encoding
2241 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2243 # now we upload a file through fn1, and grab its shares
2244 d = self._encode(3, 10, contents1)
2245 def _encoded_1(shares):
2246 self._shares1 = shares
2247 d.addCallback(_encoded_1)
2248 d.addCallback(lambda res: self._encode(4, 9, contents2))
2249 def _encoded_2(shares):
2250 self._shares2 = shares
2251 d.addCallback(_encoded_2)
2252 d.addCallback(lambda res: self._encode(4, 7, contents3))
2253 def _encoded_3(shares):
2254 self._shares3 = shares
2255 d.addCallback(_encoded_3)
2258 log.msg("merging sharelists")
2259 # we merge the shares from the two sets, leaving each shnum in
2260 # its original location, but using a share from set1 or set2
2261 # according to the following sequence:
2272 # so that neither form can be recovered until fetch [f], at which
2273 # point version-s1 (the 3-of-10 form) should be recoverable. If
2274 # the implementation latches on to the first version it sees,
2275 # then s2 will be recoverable at fetch [g].
2277 # Later, when we implement code that handles multiple versions,
2278 # we can use this framework to assert that all recoverable
2279 # versions are retrieved, and test that 'epsilon' does its job
2281 places = [2, 2, 3, 2, 1, 1, 1, 2]
2284 sb = self._storage_broker
2286 for peerid in sorted(sb.get_all_serverids()):
2287 for shnum in self._shares1.get(peerid, {}):
2288 if shnum < len(places):
2289 which = places[shnum]
2292 self._storage._peers[peerid] = peers = {}
2293 in_1 = shnum in self._shares1[peerid]
2294 in_2 = shnum in self._shares2.get(peerid, {})
2295 in_3 = shnum in self._shares3.get(peerid, {})
2298 peers[shnum] = self._shares1[peerid][shnum]
2299 sharemap[shnum] = peerid
2302 peers[shnum] = self._shares2[peerid][shnum]
2303 sharemap[shnum] = peerid
2306 peers[shnum] = self._shares3[peerid][shnum]
2307 sharemap[shnum] = peerid
2309 # we don't bother placing any other shares
2310 # now sort the sequence so that share 0 is returned first
2311 new_sequence = [sharemap[shnum]
2312 for shnum in sorted(sharemap.keys())]
2313 self._storage._sequence = new_sequence
2314 log.msg("merge done")
2315 d.addCallback(_merge)
2316 d.addCallback(lambda res: fn3.download_best_version())
2317 def _retrieved(new_contents):
2318 # the current specified behavior is "first version recoverable"
2319 self.failUnlessEqual(new_contents, contents1)
2320 d.addCallback(_retrieved)
2324 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2327 return self.publish_multiple()
2329 def test_multiple_versions(self):
2330 # if we see a mix of versions in the grid, download_best_version
2331 # should get the latest one
2332 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2333 d = self._fn.download_best_version()
2334 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2335 # and the checker should report problems
2336 d.addCallback(lambda res: self._fn.check(Monitor()))
2337 d.addCallback(self.check_bad, "test_multiple_versions")
2339 # but if everything is at version 2, that's what we should download
2340 d.addCallback(lambda res:
2341 self._set_versions(dict([(i,2) for i in range(10)])))
2342 d.addCallback(lambda res: self._fn.download_best_version())
2343 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2344 # if exactly one share is at version 3, we should still get v2
2345 d.addCallback(lambda res:
2346 self._set_versions({0:3}))
2347 d.addCallback(lambda res: self._fn.download_best_version())
2348 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2349 # but the servermap should see the unrecoverable version. This
2350 # depends upon the single newer share being queried early.
2351 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2352 def _check_smap(smap):
2353 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2354 newer = smap.unrecoverable_newer_versions()
2355 self.failUnlessEqual(len(newer), 1)
2356 verinfo, health = newer.items()[0]
2357 self.failUnlessEqual(verinfo[0], 4)
2358 self.failUnlessEqual(health, (1,3))
2359 self.failIf(smap.needs_merge())
2360 d.addCallback(_check_smap)
2361 # if we have a mix of two parallel versions (s4a and s4b), we could
2363 d.addCallback(lambda res:
2364 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2365 1:4,3:4,5:4,7:4,9:4}))
2366 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2367 def _check_smap_mixed(smap):
2368 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2369 newer = smap.unrecoverable_newer_versions()
2370 self.failUnlessEqual(len(newer), 0)
2371 self.failUnless(smap.needs_merge())
2372 d.addCallback(_check_smap_mixed)
2373 d.addCallback(lambda res: self._fn.download_best_version())
2374 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2375 res == self.CONTENTS[4]))
2378 def test_replace(self):
2379 # if we see a mix of versions in the grid, we should be able to
2380 # replace them all with a newer version
2382 # if exactly one share is at version 3, we should download (and
2383 # replace) v2, and the result should be v4. Note that the index we
2384 # give to _set_versions is different than the sequence number.
2385 target = dict([(i,2) for i in range(10)]) # seqnum3
2386 target[0] = 3 # seqnum4
2387 self._set_versions(target)
2389 def _modify(oldversion, servermap, first_time):
2390 return oldversion + " modified"
2391 d = self._fn.modify(_modify)
2392 d.addCallback(lambda res: self._fn.download_best_version())
2393 expected = self.CONTENTS[2] + " modified"
2394 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2395 # and the servermap should indicate that the outlier was replaced too
2396 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2397 def _check_smap(smap):
2398 self.failUnlessEqual(smap.highest_seqnum(), 5)
2399 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2400 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2401 d.addCallback(_check_smap)
2405 class Exceptions(unittest.TestCase):
2406 def test_repr(self):
2407 nmde = NeedMoreDataError(100, 50, 100)
2408 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2409 ucwe = UncoordinatedWriteError()
2410 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2413 class SameKeyGenerator:
2414 def __init__(self, pubkey, privkey):
2415 self.pubkey = pubkey
2416 self.privkey = privkey
2417 def generate(self, keysize=None):
2418 return defer.succeed( (self.pubkey, self.privkey) )
2420 class FirstServerGetsKilled:
2422 def notify(self, retval, wrapper, methname):
2424 wrapper.broken = True
2428 class FirstServerGetsDeleted:
2431 self.silenced = None
2432 def notify(self, retval, wrapper, methname):
2434 # this query will work, but later queries should think the share
2437 self.silenced = wrapper
2439 if wrapper == self.silenced:
2440 assert methname == "slot_testv_and_readv_and_writev"
2444 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2445 def do_publish_surprise(self, version):
2446 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2448 nm = self.g.clients[0].nodemaker
2449 d = nm.create_mutable_file(MutableData("contents 1"),
2452 d = defer.succeed(None)
2453 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2454 def _got_smap1(smap):
2455 # stash the old state of the file
2457 d.addCallback(_got_smap1)
2458 # then modify the file, leaving the old map untouched
2459 d.addCallback(lambda res: log.msg("starting winning write"))
2460 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2461 # now attempt to modify the file with the old servermap. This
2462 # will look just like an uncoordinated write, in which every
2463 # single share got updated between our mapupdate and our publish
2464 d.addCallback(lambda res: log.msg("starting doomed write"))
2465 d.addCallback(lambda res:
2466 self.shouldFail(UncoordinatedWriteError,
2467 "test_publish_surprise", None,
2469 MutableData("contents 2a"), self.old_map))
2471 d.addCallback(_created)
2474 def test_publish_surprise_sdmf(self):
2475 return self.do_publish_surprise(SDMF_VERSION)
2477 def test_publish_surprise_mdmf(self):
2478 return self.do_publish_surprise(MDMF_VERSION)
2480 def test_retrieve_surprise(self):
2481 self.basedir = "mutable/Problems/test_retrieve_surprise"
2483 nm = self.g.clients[0].nodemaker
2484 d = nm.create_mutable_file(MutableData("contents 1"*4000))
2486 d = defer.succeed(None)
2487 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2488 def _got_smap1(smap):
2489 # stash the old state of the file
2491 d.addCallback(_got_smap1)
2492 # then modify the file, leaving the old map untouched
2493 d.addCallback(lambda res: log.msg("starting winning write"))
2494 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2495 # now attempt to retrieve the old version with the old servermap.
2496 # This will look like someone has changed the file since we
2497 # updated the servermap.
2498 d.addCallback(lambda res: log.msg("starting doomed read"))
2499 d.addCallback(lambda res:
2500 self.shouldFail(NotEnoughSharesError,
2501 "test_retrieve_surprise",
2502 "ran out of servers: have 0 of 1",
2505 self.old_map.best_recoverable_version(),
2508 d.addCallback(_created)
2512 def test_unexpected_shares(self):
2513 # upload the file, take a servermap, shut down one of the servers,
2514 # upload it again (causing shares to appear on a new server), then
2515 # upload using the old servermap. The last upload should fail with an
2516 # UncoordinatedWriteError, because of the shares that didn't appear
2518 self.basedir = "mutable/Problems/test_unexpected_shares"
2520 nm = self.g.clients[0].nodemaker
2521 d = nm.create_mutable_file(MutableData("contents 1"))
2523 d = defer.succeed(None)
2524 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2525 def _got_smap1(smap):
2526 # stash the old state of the file
2528 # now shut down one of the servers
2529 peer0 = list(smap.make_sharemap()[0])[0].get_serverid()
2530 self.g.remove_server(peer0)
2531 # then modify the file, leaving the old map untouched
2532 log.msg("starting winning write")
2533 return n.overwrite(MutableData("contents 2"))
2534 d.addCallback(_got_smap1)
2535 # now attempt to modify the file with the old servermap. This
2536 # will look just like an uncoordinated write, in which every
2537 # single share got updated between our mapupdate and our publish
2538 d.addCallback(lambda res: log.msg("starting doomed write"))
2539 d.addCallback(lambda res:
2540 self.shouldFail(UncoordinatedWriteError,
2541 "test_surprise", None,
2543 MutableData("contents 2a"), self.old_map))
2545 d.addCallback(_created)
2548 def test_multiply_placed_shares(self):
2549 self.basedir = "mutable/Problems/test_multiply_placed_shares"
2551 nm = self.g.clients[0].nodemaker
2552 d = nm.create_mutable_file(MutableData("contents 1"))
2553 # remove one of the servers and reupload the file.
2557 servers = self.g.get_all_serverids()
2558 self.ss = self.g.remove_server(servers[len(servers)-1])
2560 new_server = self.g.make_server(len(servers)-1)
2561 self.g.add_server(len(servers)-1, new_server)
2563 return self._node.download_best_version()
2564 d.addCallback(_created)
2565 d.addCallback(lambda data: MutableData(data))
2566 d.addCallback(lambda data: self._node.overwrite(data))
2568 # restore the server we removed earlier, then download+upload
2570 def _overwritten(ign):
2571 self.g.add_server(len(self.g.servers_by_number), self.ss)
2572 return self._node.download_best_version()
2573 d.addCallback(_overwritten)
2574 d.addCallback(lambda data: MutableData(data))
2575 d.addCallback(lambda data: self._node.overwrite(data))
2576 d.addCallback(lambda ignored:
2577 self._node.get_servermap(MODE_CHECK))
2578 def _overwritten_again(smap):
2579 # Make sure that all shares were updated by making sure that
2580 # there aren't any other versions in the sharemap.
2581 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2582 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2583 d.addCallback(_overwritten_again)
2586 def test_bad_server(self):
2587 # Break one server, then create the file: the initial publish should
2588 # complete with an alternate server. Breaking a second server should
2589 # not prevent an update from succeeding either.
2590 self.basedir = "mutable/Problems/test_bad_server"
2592 nm = self.g.clients[0].nodemaker
2594 # to make sure that one of the initial peers is broken, we have to
2595 # get creative. We create an RSA key and compute its storage-index.
2596 # Then we make a KeyGenerator that always returns that one key, and
2597 # use it to create the mutable file. This will get easier when we can
2598 # use #467 static-server-selection to disable permutation and force
2599 # the choice of server for share[0].
2601 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2602 def _got_key( (pubkey, privkey) ):
2603 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2604 pubkey_s = pubkey.serialize()
2605 privkey_s = privkey.serialize()
2606 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2607 ssk_pubkey_fingerprint_hash(pubkey_s))
2608 self._storage_index = u.get_storage_index()
2609 d.addCallback(_got_key)
2610 def _break_peer0(res):
2611 si = self._storage_index
2612 servers = nm.storage_broker.get_servers_for_psi(si)
2613 self.g.break_server(servers[0].get_serverid())
2614 self.server1 = servers[1]
2615 d.addCallback(_break_peer0)
2616 # now "create" the file, using the pre-established key, and let the
2617 # initial publish finally happen
2618 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2619 # that ought to work
2621 d = n.download_best_version()
2622 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2623 # now break the second peer
2624 def _break_peer1(res):
2625 self.g.break_server(self.server1.get_serverid())
2626 d.addCallback(_break_peer1)
2627 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2628 # that ought to work too
2629 d.addCallback(lambda res: n.download_best_version())
2630 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2631 def _explain_error(f):
2633 if f.check(NotEnoughServersError):
2634 print "first_error:", f.value.first_error
2636 d.addErrback(_explain_error)
2638 d.addCallback(_got_node)
2641 def test_bad_server_overlap(self):
2642 # like test_bad_server, but with no extra unused servers to fall back
2643 # upon. This means that we must re-use a server which we've already
2644 # used. If we don't remember the fact that we sent them one share
2645 # already, we'll mistakenly think we're experiencing an
2646 # UncoordinatedWriteError.
2648 # Break one server, then create the file: the initial publish should
2649 # complete with an alternate server. Breaking a second server should
2650 # not prevent an update from succeeding either.
2651 self.basedir = "mutable/Problems/test_bad_server_overlap"
2653 nm = self.g.clients[0].nodemaker
2654 sb = nm.storage_broker
2656 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2657 self.g.break_server(peerids[0])
2659 d = nm.create_mutable_file(MutableData("contents 1"))
2661 d = n.download_best_version()
2662 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2663 # now break one of the remaining servers
2664 def _break_second_server(res):
2665 self.g.break_server(peerids[1])
2666 d.addCallback(_break_second_server)
2667 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2668 # that ought to work too
2669 d.addCallback(lambda res: n.download_best_version())
2670 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2672 d.addCallback(_created)
2675 def test_publish_all_servers_bad(self):
2676 # Break all servers: the publish should fail
2677 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2679 nm = self.g.clients[0].nodemaker
2680 for s in nm.storage_broker.get_connected_servers():
2681 s.get_rref().broken = True
2683 d = self.shouldFail(NotEnoughServersError,
2684 "test_publish_all_servers_bad",
2685 "ran out of good servers",
2686 nm.create_mutable_file, MutableData("contents"))
2689 def test_publish_no_servers(self):
2690 # no servers at all: the publish should fail
2691 self.basedir = "mutable/Problems/test_publish_no_servers"
2692 self.set_up_grid(num_servers=0)
2693 nm = self.g.clients[0].nodemaker
2695 d = self.shouldFail(NotEnoughServersError,
2696 "test_publish_no_servers",
2697 "Ran out of non-bad servers",
2698 nm.create_mutable_file, MutableData("contents"))
2702 def test_privkey_query_error(self):
2703 # when a servermap is updated with MODE_WRITE, it tries to get the
2704 # privkey. Something might go wrong during this query attempt.
2705 # Exercise the code in _privkey_query_failed which tries to handle
2707 self.basedir = "mutable/Problems/test_privkey_query_error"
2708 self.set_up_grid(num_servers=20)
2709 nm = self.g.clients[0].nodemaker
2710 nm._node_cache = DevNullDictionary() # disable the nodecache
2712 # we need some contents that are large enough to push the privkey out
2713 # of the early part of the file
2714 LARGE = "These are Larger contents" * 2000 # about 50KB
2715 LARGE_uploadable = MutableData(LARGE)
2716 d = nm.create_mutable_file(LARGE_uploadable)
2718 self.uri = n.get_uri()
2719 self.n2 = nm.create_from_cap(self.uri)
2721 # When a mapupdate is performed on a node that doesn't yet know
2722 # the privkey, a short read is sent to a batch of servers, to get
2723 # the verinfo and (hopefully, if the file is short enough) the
2724 # encprivkey. Our file is too large to let this first read
2725 # contain the encprivkey. Each non-encprivkey-bearing response
2726 # that arrives (until the node gets the encprivkey) will trigger
2727 # a second read to specifically read the encprivkey.
2729 # So, to exercise this case:
2730 # 1. notice which server gets a read() call first
2731 # 2. tell that server to start throwing errors
2732 killer = FirstServerGetsKilled()
2733 for s in nm.storage_broker.get_connected_servers():
2734 s.get_rref().post_call_notifier = killer.notify
2735 d.addCallback(_created)
2737 # now we update a servermap from a new node (which doesn't have the
2738 # privkey yet, forcing it to use a separate privkey query). Note that
2739 # the map-update will succeed, since we'll just get a copy from one
2740 # of the other shares.
2741 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2745 def test_privkey_query_missing(self):
2746 # like test_privkey_query_error, but the shares are deleted by the
2747 # second query, instead of raising an exception.
2748 self.basedir = "mutable/Problems/test_privkey_query_missing"
2749 self.set_up_grid(num_servers=20)
2750 nm = self.g.clients[0].nodemaker
2751 LARGE = "These are Larger contents" * 2000 # about 50KiB
2752 LARGE_uploadable = MutableData(LARGE)
2753 nm._node_cache = DevNullDictionary() # disable the nodecache
2755 d = nm.create_mutable_file(LARGE_uploadable)
2757 self.uri = n.get_uri()
2758 self.n2 = nm.create_from_cap(self.uri)
2759 deleter = FirstServerGetsDeleted()
2760 for s in nm.storage_broker.get_connected_servers():
2761 s.get_rref().post_call_notifier = deleter.notify
2762 d.addCallback(_created)
2763 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2767 def test_block_and_hash_query_error(self):
2768 # This tests for what happens when a query to a remote server
2769 # fails in either the hash validation step or the block getting
2770 # step (because of batching, this is the same actual query).
2771 # We need to have the storage server persist up until the point
2772 # that its prefix is validated, then suddenly die. This
2773 # exercises some exception handling code in Retrieve.
2774 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2775 self.set_up_grid(num_servers=20)
2776 nm = self.g.clients[0].nodemaker
2777 CONTENTS = "contents" * 2000
2778 CONTENTS_uploadable = MutableData(CONTENTS)
2779 d = nm.create_mutable_file(CONTENTS_uploadable)
2782 d.addCallback(_created)
2783 d.addCallback(lambda ignored:
2784 self._node.get_servermap(MODE_READ))
2785 def _then(servermap):
2786 # we have our servermap. Now we set up the servers like the
2787 # tests above -- the first one that gets a read call should
2788 # start throwing errors, but only after returning its prefix
2789 # for validation. Since we'll download without fetching the
2790 # private key, the next query to the remote server will be
2791 # for either a block and salt or for hashes, either of which
2792 # will exercise the error handling code.
2793 killer = FirstServerGetsKilled()
2794 for s in nm.storage_broker.get_connected_servers():
2795 s.get_rref().post_call_notifier = killer.notify
2796 ver = servermap.best_recoverable_version()
2798 return self._node.download_version(servermap, ver)
2799 d.addCallback(_then)
2800 d.addCallback(lambda data:
2801 self.failUnlessEqual(data, CONTENTS))
2804 def test_1654(self):
2805 # test that the Retrieve object unconditionally verifies the block
2806 # hash tree root for mutable shares. The failure mode is that
2807 # carefully crafted shares can cause undetected corruption (the
2808 # retrieve appears to finish successfully, but the result is
2809 # corrupted). When fixed, these shares always cause a
2810 # CorruptShareError, which results in NotEnoughSharesError in this
2812 self.basedir = "mutable/Problems/test_1654"
2813 self.set_up_grid(num_servers=2)
2814 cap = uri.from_string(TEST_1654_CAP)
2815 si = cap.get_storage_index()
2817 for share, shnum in [(TEST_1654_SH0, 0), (TEST_1654_SH1, 1)]:
2818 sharedata = base64.b64decode(share)
2819 storedir = self.get_serverdir(shnum)
2820 storage_path = os.path.join(storedir, "shares",
2821 storage_index_to_dir(si))
2822 fileutil.make_dirs(storage_path)
2823 fileutil.write(os.path.join(storage_path, "%d" % shnum),
2826 nm = self.g.clients[0].nodemaker
2827 n = nm.create_from_cap(TEST_1654_CAP)
2828 # to exercise the problem correctly, we must ensure that sh0 is
2829 # processed first, and sh1 second. NoNetworkGrid has facilities to
2830 # stall the first request from a single server, but it's not
2831 # currently easy to extend that to stall the second request (mutable
2832 # retrievals will see two: first the mapupdate, then the fetch).
2833 # However, repeated executions of this run without the #1654 fix
2834 # suggests that we're failing reliably even without explicit stalls,
2835 # probably because the servers are queried in a fixed order. So I'm
2836 # ok with relying upon that.
2837 d = self.shouldFail(NotEnoughSharesError, "test #1654 share corruption",
2838 "ran out of servers",
2839 n.download_best_version)
2843 TEST_1654_CAP = "URI:SSK:6jthysgozssjnagqlcxjq7recm:yxawei54fmf2ijkrvs2shs6iey4kpdp6joi7brj2vrva6sp5nf3a"
2845 TEST_1654_SH0 = """\
2846 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA46m9s5j6lnzsOHytBTs2JOo
2847 AkWe8058hyrDa8igfBSqZMKO3aDOrFuRVt0ySYZ6oihFqPJRAAAAAAAAB8YAAAAA
2848 AAAJmgAAAAFPNgDkK8brSCzKz6n8HFqzbnAlALvnaB0Qpa1Bjo9jiZdmeMyneHR+
2849 UoJcDb1Ls+lVLeUqP2JitBEXdCzcF/X2YMDlmKb2zmPqWfOw4fK0FOzYk6gCRZ7z
2850 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2851 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2852 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2853 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2854 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2855 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2856 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2857 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2858 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2859 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2860 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2861 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2862 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2863 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2864 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2865 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2866 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2867 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2868 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2869 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2870 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp4z9+5yd/pjkVy
2871 bmvc7Jr70bOVpxvRoI2ZEgh/+QdxfcxGzRV0shAW86irr5bDQOyyknYk0p2xw2Wn
2872 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2873 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2874 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2875 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2876 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2877 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2878 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2879 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2880 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2881 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2882 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2883 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2884 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2885 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2886 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2887 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2888 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2889 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2890 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2891 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2892 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2893 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2894 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2895 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2896 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2897 72mXGlqyLyWYuAAAAAA="""
2899 TEST_1654_SH1 = """\
2900 VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA45R4Y4kuV458rSTGDVTqdzz
2901 9Fig3NQ3LermyD+0XLeqbC7KNgvv6cNzMZ9psQQ3FseYsIR1AAAAAAAAB8YAAAAA
2902 AAAJmgAAAAFPNgDkd/Y9Z+cuKctZk9gjwF8thT+fkmNCsulILsJw5StGHAA1f7uL
2903 MG73c5WBcesHB2epwazfbD3/0UZTlxXWXotywVHhjiS5XjnytJMYNVOp3PP0WKDc
2904 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2905 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2906 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2907 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2908 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
2909 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABCDwr
2910 uIlhFlv21pDqyMeA9X1wHp98a1CKY4qfC7gn5exyODAcnhZKHCV18XBerbZLAgIA
2911 AAAAAAAAJgAAAAAAAAAmAAABjwAAAo8AAALTAAAC8wAAAAAAAAMGAAAAAAAAB8Yw
2912 ggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQCXKMor062nfxHVutMbqNcj
2913 vVC92wXTcQulenNWEX+0huK54igTAG60p0lZ6FpBJ9A+dlStT386bn5I6qe50ky5
2914 CFodQSsQX+1yByMFlzqPDo4rclk/6oVySLypxnt/iBs3FPZ4zruhYXcITc6zaYYU
2915 Xqaw/C86g6M06MWQKsGev7PS3tH7q+dtovWzDgU13Q8PG2whGvGNfxPOmEX4j0wL
2916 FCBavpFnLpo3bJrj27V33HXxpPz3NP+fkaG0pKH03ANd/yYHfGf74dC+eD5dvWBM
2917 DU6fZQN4k/T+cth+qzjS52FPPTY9IHXIb4y+1HryVvxcx6JDifKoOzpFc3SDbBAP
2918 AgERKDjOFxVClH81DF/QkqpP0glOh6uTsFNx8Nes02q0d7iip2WqfG9m2+LmiWy8
2919 Pg7RlQQy2M45gert1EDsH4OI69uxteviZP1Mo0wD6HjmWUbGIQRmsT3DmYEZCCMA
2920 /KjhNmlov2+OhVxIaHwE7aN840IfkGdJ/JssB6Z/Ym3+ou4+jAYKhifPQGrpBVjd
2921 73oH6w9StnoGYIrEEQw8LFc4jnAFYciKlPuo6E6E3zDseE7gwkcOpCtVVksZu6Ii
2922 GQgIV8vjFbNz9M//RMXOBTwKFDiG08IAPh7fv2uKzFis0TFrR7sQcMQ/kZZCLPPi
2923 ECIX95NRoFRlxK/1kZ1+FuuDQgABz9+5yd/pjkVybmvc7Jr70bOVpxvRoI2ZEgh/
2924 +QdxfcwAAm5iDnzPtsVdcbuNkKprfI8N4n+QmUOSMbAJ7M8r1cp40cTBnAw+rMKC
2925 98P4pURrotx116Kd0i3XmMZu81ew57H3Zb73r+syQCXZNOP0xhMDclIt0p2xw2Wn
2926 z6QccyXyobXPOFLO3ZBPnKaE58aaN7x3srQZYUKafet5ZMDX8fsQf2mbxnaeG5NF
2927 eO6wG++WBUo9leddnzKBnRcMGRAtJEjwfKMVPE8SmuTlL6kRc7n8wvY2ygClWlRm
2928 d7o95tZfoO+mexB/DLEpWLtlAiqh8yJ8cWaC5rYz4ZC2+z7QkeKXCHWAN3i4C++u
2929 dfZoD7qWnyAldYTydADwL885dVY7WN6NX9YtQrG3JGrp3wZvFrX5x9Jv7hls0A6l
2930 2xI4NlcSSrgWIjzrGdwQEjIUDyfc7DWroEpJEfIaSnjkeTT0D8WV5NqzWH8UwWoF
2931 wjwDltaQ3Y8O/wJPGBqBAJEob+p6QxvP5T2W1jnOvbgsMZLNDuY6FF1XcuR7yvNF
2932 sXKP6aXMV8BKSlrehFlpBMTu4HvJ1rZlKuxgR1A9njiaKD2U0NitCKMIpIXQxT6L
2933 eZn9M8Ky68m0Zjdw/WCsKz22GTljSM5Nfme32BrW+4G+R55ECwZ1oh08nrnWjXmw
2934 PlSHj2lwpnsuOG2fwJkyMnIIoIUII31VLATeLERD9HfMK8/+uZqJ2PftT2fhHL/u
2935 CDCIdEWSUBBHpA7p8BbgiZKCpYzf+pbS2/EJGL8gQAvSH1atGv/o0BiAd10MzTXC
2936 Xn5xDB1Yh+FtYPYloBGAwmxKieDMnsjy6wp5ovdmOc2y6KBr27DzgEGchLyOxHV4
2937 Q7u0Hkm7Om33ir1TUgK6bdPFL8rGNDOZq/SR4yn4qSsQTPD6Y/HQSK5GzkU4dGLw
2938 tU6GNpu142QE36NfWkoUWHKf1YgIYrlAGJWlj93et54ZGUZGVN7pAspZ+mvoMnDU
2939 Jh46nrQsEJiQz8AqgREck4Fi4S7Rmjh/AhXmzFWFca3YD0BmuYU6fxGTRPZ70eys
2940 LV5qPTmTGpX+bpvufAp0vznkiOdqTn1flnxdslM2AukiD6OwkX1dBH8AvzObhbz0
2941 ABhx3c+cAhAnYhJmsYaAwbpWpp8CM5opmsRgwgaz8f8lxiRfXbrWD8vdd4dm2B9J
2942 jaiGCR8/UXHFBGZhCgLB2S+BNXKynIeP+POGQtMIIERUtwOIKt1KfZ9jZwf/ulJK
2943 fv/VmBPmGu+CHvFIlHAzlxwJeUz8wSltUeeHjADZ9Wag5ESN3R6hsmJL+KL4av5v
2944 DFobNPiNWbc+4H+3wg1R0oK/uTQb8u1S7uWIGVmi5fJ4rVVZ/VKKtHGVwm/8OGKF
2945 tcrJFJcJADFVkgpsqN8UINsMJLxfJRoBgABEWih5DTRwNXK76Ma2LjDBrEvxhw8M
2946 7SLKhi5vH7/Cs7jfLZFgh2T6flDV4VM/EA7CYEHgEb8MFmioFGOmhUpqifkA3SdX
2947 jGi2KuZZ5+O+sHFWXsUjiFPEzUJF+syPEzH1aF5R+F8pkhifeYh0KP6OHd6Sgn8s
2948 TStXB+q0MndBXw5ADp/Jac1DVaSWruVAdjemQ+si1olk8xH+uTMXU7PgV9WkpIiy
2949 4BhnFU9IbCr/m7806c13xfeelaffP2pr7EDdgwz5K89VWCa3k9OSDnMtj2CQXlC7
2950 bQHi/oRGA1aHSn84SIt+HpAfRoVdr4N90bYWmYQNqfKoyWCbEr+dge/GSD1nddAJ
2951 72mXGlqyLyWYuAAAAAA="""
2954 class FileHandle(unittest.TestCase):
2956 self.test_data = "Test Data" * 50000
2957 self.sio = StringIO(self.test_data)
2958 self.uploadable = MutableFileHandle(self.sio)
2961 def test_filehandle_read(self):
2962 self.basedir = "mutable/FileHandle/test_filehandle_read"
2964 for i in xrange(0, len(self.test_data), chunk_size):
2965 data = self.uploadable.read(chunk_size)
2966 data = "".join(data)
2968 end = i + chunk_size
2969 self.failUnlessEqual(data, self.test_data[start:end])
2972 def test_filehandle_get_size(self):
2973 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2974 actual_size = len(self.test_data)
2975 size = self.uploadable.get_size()
2976 self.failUnlessEqual(size, actual_size)
2979 def test_filehandle_get_size_out_of_order(self):
2980 # We should be able to call get_size whenever we want without
2981 # disturbing the location of the seek pointer.
2983 data = self.uploadable.read(chunk_size)
2984 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2987 size = self.uploadable.get_size()
2988 self.failUnlessEqual(size, len(self.test_data))
2990 # Now get more data. We should be right where we left off.
2991 more_data = self.uploadable.read(chunk_size)
2993 end = chunk_size * 2
2994 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2997 def test_filehandle_file(self):
2998 # Make sure that the MutableFileHandle works on a file as well
2999 # as a StringIO object, since in some cases it will be asked to
3001 self.basedir = self.mktemp()
3002 # necessary? What am I doing wrong here?
3003 os.mkdir(self.basedir)
3004 f_path = os.path.join(self.basedir, "test_file")
3005 f = open(f_path, "w")
3006 f.write(self.test_data)
3008 f = open(f_path, "r")
3010 uploadable = MutableFileHandle(f)
3012 data = uploadable.read(len(self.test_data))
3013 self.failUnlessEqual("".join(data), self.test_data)
3014 size = uploadable.get_size()
3015 self.failUnlessEqual(size, len(self.test_data))
3018 def test_close(self):
3019 # Make sure that the MutableFileHandle closes its handle when
3021 self.uploadable.close()
3022 self.failUnless(self.sio.closed)
3025 class DataHandle(unittest.TestCase):
3027 self.test_data = "Test Data" * 50000
3028 self.uploadable = MutableData(self.test_data)
3031 def test_datahandle_read(self):
3033 for i in xrange(0, len(self.test_data), chunk_size):
3034 data = self.uploadable.read(chunk_size)
3035 data = "".join(data)
3037 end = i + chunk_size
3038 self.failUnlessEqual(data, self.test_data[start:end])
3041 def test_datahandle_get_size(self):
3042 actual_size = len(self.test_data)
3043 size = self.uploadable.get_size()
3044 self.failUnlessEqual(size, actual_size)
3047 def test_datahandle_get_size_out_of_order(self):
3048 # We should be able to call get_size whenever we want without
3049 # disturbing the location of the seek pointer.
3051 data = self.uploadable.read(chunk_size)
3052 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
3055 size = self.uploadable.get_size()
3056 self.failUnlessEqual(size, len(self.test_data))
3058 # Now get more data. We should be right where we left off.
3059 more_data = self.uploadable.read(chunk_size)
3061 end = chunk_size * 2
3062 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
3065 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
3068 GridTestMixin.setUp(self)
3069 self.basedir = self.mktemp()
3071 self.c = self.g.clients[0]
3072 self.nm = self.c.nodemaker
3073 self.data = "test data" * 100000 # about 900 KiB; MDMF
3074 self.small_data = "test data" * 10 # 90 B; SDMF
3077 def do_upload_mdmf(self, data=None):
3080 d = self.nm.create_mutable_file(MutableData(data),
3081 version=MDMF_VERSION)
3083 assert isinstance(n, MutableFileNode)
3084 assert n._protocol_version == MDMF_VERSION
3087 d.addCallback(_then)
3090 def do_upload_sdmf(self, data=None):
3092 data = self.small_data
3093 d = self.nm.create_mutable_file(MutableData(data))
3095 assert isinstance(n, MutableFileNode)
3096 assert n._protocol_version == SDMF_VERSION
3099 d.addCallback(_then)
3102 def do_upload_empty_sdmf(self):
3103 d = self.nm.create_mutable_file(MutableData(""))
3105 assert isinstance(n, MutableFileNode)
3106 self.sdmf_zero_length_node = n
3107 assert n._protocol_version == SDMF_VERSION
3109 d.addCallback(_then)
3112 def do_upload(self):
3113 d = self.do_upload_mdmf()
3114 d.addCallback(lambda ign: self.do_upload_sdmf())
3117 def test_debug(self):
3118 d = self.do_upload_mdmf()
3120 fso = debug.FindSharesOptions()
3121 storage_index = base32.b2a(n.get_storage_index())
3122 fso.si_s = storage_index
3123 fso.nodedirs = [os.path.dirname(abspath_expanduser_unicode(unicode(storedir)))
3125 in self.iterate_servers()]
3126 fso.stdout = StringIO()
3127 fso.stderr = StringIO()
3128 debug.find_shares(fso)
3129 sharefiles = fso.stdout.getvalue().splitlines()
3130 expected = self.nm.default_encoding_parameters["n"]
3131 self.failUnlessEqual(len(sharefiles), expected)
3133 do = debug.DumpOptions()
3134 do["filename"] = sharefiles[0]
3135 do.stdout = StringIO()
3136 debug.dump_share(do)
3137 output = do.stdout.getvalue()
3138 lines = set(output.splitlines())
3139 self.failUnless("Mutable slot found:" in lines, output)
3140 self.failUnless(" share_type: MDMF" in lines, output)
3141 self.failUnless(" num_extra_leases: 0" in lines, output)
3142 self.failUnless(" MDMF contents:" in lines, output)
3143 self.failUnless(" seqnum: 1" in lines, output)
3144 self.failUnless(" required_shares: 3" in lines, output)
3145 self.failUnless(" total_shares: 10" in lines, output)
3146 self.failUnless(" segsize: 131073" in lines, output)
3147 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
3148 vcap = n.get_verify_cap().to_string()
3149 self.failUnless(" verify-cap: %s" % vcap in lines, output)
3151 cso = debug.CatalogSharesOptions()
3152 cso.nodedirs = fso.nodedirs
3153 cso.stdout = StringIO()
3154 cso.stderr = StringIO()
3155 debug.catalog_shares(cso)
3156 shares = cso.stdout.getvalue().splitlines()
3157 oneshare = shares[0] # all shares should be MDMF
3158 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3159 self.failUnless(oneshare.startswith("MDMF"), oneshare)
3160 fields = oneshare.split()
3161 self.failUnlessEqual(fields[0], "MDMF")
3162 self.failUnlessEqual(fields[1], storage_index)
3163 self.failUnlessEqual(fields[2], "3/10")
3164 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3165 self.failUnless(fields[4].startswith("#1:"), fields[3])
3166 # the rest of fields[4] is the roothash, which depends upon
3167 # encryption salts and is not constant. fields[5] is the
3168 # remaining time on the longest lease, which is timing dependent.
3169 # The rest of the line is the quoted pathname to the share.
3170 d.addCallback(_debug)
3173 def test_get_sequence_number(self):
3174 d = self.do_upload()
3175 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3176 d.addCallback(lambda bv:
3177 self.failUnlessEqual(bv.get_sequence_number(), 1))
3178 d.addCallback(lambda ignored:
3179 self.sdmf_node.get_best_readable_version())
3180 d.addCallback(lambda bv:
3181 self.failUnlessEqual(bv.get_sequence_number(), 1))
3182 # Now update. The sequence number in both cases should be 1 in
3184 def _do_update(ignored):
3185 new_data = MutableData("foo bar baz" * 100000)
3186 new_small_data = MutableData("foo bar baz" * 10)
3187 d1 = self.mdmf_node.overwrite(new_data)
3188 d2 = self.sdmf_node.overwrite(new_small_data)
3189 dl = gatherResults([d1, d2])
3191 d.addCallback(_do_update)
3192 d.addCallback(lambda ignored:
3193 self.mdmf_node.get_best_readable_version())
3194 d.addCallback(lambda bv:
3195 self.failUnlessEqual(bv.get_sequence_number(), 2))
3196 d.addCallback(lambda ignored:
3197 self.sdmf_node.get_best_readable_version())
3198 d.addCallback(lambda bv:
3199 self.failUnlessEqual(bv.get_sequence_number(), 2))
3203 def test_cap_after_upload(self):
3204 # If we create a new mutable file and upload things to it, and
3205 # it's an MDMF file, we should get an MDMF cap back from that
3206 # file and should be able to use that.
3207 # That's essentially what MDMF node is, so just check that.
3208 d = self.do_upload_mdmf()
3210 mdmf_uri = self.mdmf_node.get_uri()
3211 cap = uri.from_string(mdmf_uri)
3212 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3213 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3214 cap = uri.from_string(readonly_mdmf_uri)
3215 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3216 d.addCallback(_then)
3219 def test_mutable_version(self):
3220 # assert that getting parameters from the IMutableVersion object
3221 # gives us the same data as getting them from the filenode itself
3222 d = self.do_upload()
3223 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3224 def _check_mdmf(bv):
3226 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3227 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3228 self.failIf(bv.is_readonly())
3229 d.addCallback(_check_mdmf)
3230 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3231 def _check_sdmf(bv):
3233 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3234 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3235 self.failIf(bv.is_readonly())
3236 d.addCallback(_check_sdmf)
3240 def test_get_readonly_version(self):
3241 d = self.do_upload()
3242 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3243 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3245 # Attempting to get a mutable version of a mutable file from a
3246 # filenode initialized with a readcap should return a readonly
3247 # version of that same node.
3248 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3249 d.addCallback(lambda ro: ro.get_best_mutable_version())
3250 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3252 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3253 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3255 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3256 d.addCallback(lambda ro: ro.get_best_mutable_version())
3257 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3261 def test_toplevel_overwrite(self):
3262 new_data = MutableData("foo bar baz" * 100000)
3263 new_small_data = MutableData("foo bar baz" * 10)
3264 d = self.do_upload()
3265 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3266 d.addCallback(lambda ignored:
3267 self.mdmf_node.download_best_version())
3268 d.addCallback(lambda data:
3269 self.failUnlessEqual(data, "foo bar baz" * 100000))
3270 d.addCallback(lambda ignored:
3271 self.sdmf_node.overwrite(new_small_data))
3272 d.addCallback(lambda ignored:
3273 self.sdmf_node.download_best_version())
3274 d.addCallback(lambda data:
3275 self.failUnlessEqual(data, "foo bar baz" * 10))
3279 def test_toplevel_modify(self):
3280 d = self.do_upload()
3281 def modifier(old_contents, servermap, first_time):
3282 return old_contents + "modified"
3283 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3284 d.addCallback(lambda ignored:
3285 self.mdmf_node.download_best_version())
3286 d.addCallback(lambda data:
3287 self.failUnlessIn("modified", data))
3288 d.addCallback(lambda ignored:
3289 self.sdmf_node.modify(modifier))
3290 d.addCallback(lambda ignored:
3291 self.sdmf_node.download_best_version())
3292 d.addCallback(lambda data:
3293 self.failUnlessIn("modified", data))
3297 def test_version_modify(self):
3298 # TODO: When we can publish multiple versions, alter this test
3299 # to modify a version other than the best usable version, then
3300 # test to see that the best recoverable version is that.
3301 d = self.do_upload()
3302 def modifier(old_contents, servermap, first_time):
3303 return old_contents + "modified"
3304 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3305 d.addCallback(lambda ignored:
3306 self.mdmf_node.download_best_version())
3307 d.addCallback(lambda data:
3308 self.failUnlessIn("modified", data))
3309 d.addCallback(lambda ignored:
3310 self.sdmf_node.modify(modifier))
3311 d.addCallback(lambda ignored:
3312 self.sdmf_node.download_best_version())
3313 d.addCallback(lambda data:
3314 self.failUnlessIn("modified", data))
3318 def test_download_version(self):
3319 d = self.publish_multiple()
3320 # We want to have two recoverable versions on the grid.
3321 d.addCallback(lambda res:
3322 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3323 1:1,3:1,5:1,7:1,9:1}))
3324 # Now try to download each version. We should get the plaintext
3325 # associated with that version.
3326 d.addCallback(lambda ignored:
3327 self._fn.get_servermap(mode=MODE_READ))
3328 def _got_servermap(smap):
3329 versions = smap.recoverable_versions()
3330 assert len(versions) == 2
3332 self.servermap = smap
3333 self.version1, self.version2 = versions
3334 assert self.version1 != self.version2
3336 self.version1_seqnum = self.version1[0]
3337 self.version2_seqnum = self.version2[0]
3338 self.version1_index = self.version1_seqnum - 1
3339 self.version2_index = self.version2_seqnum - 1
3341 d.addCallback(_got_servermap)
3342 d.addCallback(lambda ignored:
3343 self._fn.download_version(self.servermap, self.version1))
3344 d.addCallback(lambda results:
3345 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3347 d.addCallback(lambda ignored:
3348 self._fn.download_version(self.servermap, self.version2))
3349 d.addCallback(lambda results:
3350 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3355 def test_download_nonexistent_version(self):
3356 d = self.do_upload_mdmf()
3357 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3358 def _set_servermap(servermap):
3359 self.servermap = servermap
3360 d.addCallback(_set_servermap)
3361 d.addCallback(lambda ignored:
3362 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3364 self.mdmf_node.download_version, self.servermap,
3369 def _test_partial_read(self, node, expected, modes, step):
3370 d = node.get_best_readable_version()
3371 for (name, offset, length) in modes:
3372 d.addCallback(self._do_partial_read, name, expected, offset, length)
3373 # then read the whole thing, but only a few bytes at a time, and see
3374 # that the results are what we expect.
3375 def _read_data(version):
3376 c = consumer.MemoryConsumer()
3377 d2 = defer.succeed(None)
3378 for i in xrange(0, len(expected), step):
3379 d2.addCallback(lambda ignored, i=i: version.read(c, i, step))
3380 d2.addCallback(lambda ignored:
3381 self.failUnlessEqual(expected, "".join(c.chunks)))
3383 d.addCallback(_read_data)
3386 def _do_partial_read(self, version, name, expected, offset, length):
3387 c = consumer.MemoryConsumer()
3388 d = version.read(c, offset, length)
3390 expected_range = expected[offset:]
3392 expected_range = expected[offset:offset+length]
3393 d.addCallback(lambda ignored: "".join(c.chunks))
3394 def _check(results):
3395 if results != expected_range:
3396 print "read([%d]+%s) got %d bytes, not %d" % \
3397 (offset, length, len(results), len(expected_range))
3398 print "got: %s ... %s" % (results[:20], results[-20:])
3399 print "exp: %s ... %s" % (expected_range[:20], expected_range[-20:])
3400 self.fail("results[%s] != expected_range" % name)
3401 return version # daisy-chained to next call
3402 d.addCallback(_check)
3405 def test_partial_read_mdmf_0(self):
3407 d = self.do_upload_mdmf(data=data)
3408 modes = [("all1", 0,0),
3411 d.addCallback(self._test_partial_read, data, modes, 1)
3414 def test_partial_read_mdmf_large(self):
3415 segment_boundary = mathutil.next_multiple(128 * 1024, 3)
3416 modes = [("start_on_segment_boundary", segment_boundary, 50),
3417 ("ending_one_byte_after_segment_boundary", segment_boundary-50, 51),
3418 ("zero_length_at_start", 0, 0),
3419 ("zero_length_in_middle", 50, 0),
3420 ("zero_length_at_segment_boundary", segment_boundary, 0),
3421 ("complete_file1", 0, len(self.data)),
3422 ("complete_file2", 0, None),
3424 d = self.do_upload_mdmf()
3425 d.addCallback(self._test_partial_read, self.data, modes, 10000)
3428 def test_partial_read_sdmf_0(self):
3430 modes = [("all1", 0,0),
3433 d = self.do_upload_sdmf(data=data)
3434 d.addCallback(self._test_partial_read, data, modes, 1)
3437 def test_partial_read_sdmf_2(self):
3439 modes = [("one_byte", 0, 1),
3440 ("last_byte", 1, 1),
3441 ("complete_file", 0, 2),
3443 d = self.do_upload_sdmf(data=data)
3444 d.addCallback(self._test_partial_read, data, modes, 1)
3447 def test_partial_read_sdmf_90(self):
3448 modes = [("start_at_middle", 50, 40),
3449 ("zero_length_at_start", 0, 0),
3450 ("zero_length_in_middle", 50, 0),
3451 ("zero_length_at_end", 90, 0),
3452 ("complete_file1", 0, None),
3453 ("complete_file2", 0, 90),
3455 d = self.do_upload_sdmf()
3456 d.addCallback(self._test_partial_read, self.small_data, modes, 10)
3459 def test_partial_read_sdmf_100(self):
3460 data = "test data "*10
3461 modes = [("start_at_middle", 50, 50),
3462 ("zero_length_at_start", 0, 0),
3463 ("zero_length_in_middle", 50, 0),
3464 ("complete_file", 0, 100),
3466 d = self.do_upload_sdmf(data=data)
3467 d.addCallback(self._test_partial_read, data, modes, 10)
3471 def _test_read_and_download(self, node, expected):
3472 d = node.get_best_readable_version()
3473 def _read_data(version):
3474 c = consumer.MemoryConsumer()
3475 c2 = consumer.MemoryConsumer()
3476 d2 = defer.succeed(None)
3477 d2.addCallback(lambda ignored: version.read(c))
3478 d2.addCallback(lambda ignored:
3479 self.failUnlessEqual(expected, "".join(c.chunks)))
3481 d2.addCallback(lambda ignored: version.read(c2, offset=0,
3482 size=len(expected)))
3483 d2.addCallback(lambda ignored:
3484 self.failUnlessEqual(expected, "".join(c2.chunks)))
3486 d.addCallback(_read_data)
3487 d.addCallback(lambda ignored: node.download_best_version())
3488 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3491 def test_read_and_download_mdmf(self):
3492 d = self.do_upload_mdmf()
3493 d.addCallback(self._test_read_and_download, self.data)
3496 def test_read_and_download_sdmf(self):
3497 d = self.do_upload_sdmf()
3498 d.addCallback(self._test_read_and_download, self.small_data)
3501 def test_read_and_download_sdmf_zero_length(self):
3502 d = self.do_upload_empty_sdmf()
3503 d.addCallback(self._test_read_and_download, "")
3507 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3508 timeout = 400 # these tests are too big, 120s is not enough on slow
3511 GridTestMixin.setUp(self)
3512 self.basedir = self.mktemp()
3513 self.set_up_grid(num_servers=13)
3514 self.c = self.g.clients[0]
3515 self.nm = self.c.nodemaker
3516 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3517 self.small_data = "test data" * 10 # 90 B; SDMF
3520 def do_upload_sdmf(self):
3521 d = self.nm.create_mutable_file(MutableData(self.small_data))
3523 assert isinstance(n, MutableFileNode)
3525 # Make SDMF node that has 255 shares.
3526 self.nm.default_encoding_parameters['n'] = 255
3527 self.nm.default_encoding_parameters['k'] = 127
3528 return self.nm.create_mutable_file(MutableData(self.small_data))
3529 d.addCallback(_then)
3531 assert isinstance(n, MutableFileNode)
3532 self.sdmf_max_shares_node = n
3533 d.addCallback(_then2)
3536 def do_upload_mdmf(self):
3537 d = self.nm.create_mutable_file(MutableData(self.data),
3538 version=MDMF_VERSION)
3540 assert isinstance(n, MutableFileNode)
3542 # Make MDMF node that has 255 shares.
3543 self.nm.default_encoding_parameters['n'] = 255
3544 self.nm.default_encoding_parameters['k'] = 127
3545 return self.nm.create_mutable_file(MutableData(self.data),
3546 version=MDMF_VERSION)
3547 d.addCallback(_then)
3549 assert isinstance(n, MutableFileNode)
3550 self.mdmf_max_shares_node = n
3551 d.addCallback(_then2)
3554 def _test_replace(self, offset, new_data):
3555 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3556 d0 = self.do_upload_mdmf()
3558 d = defer.succeed(None)
3559 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3560 # close over 'node'.
3561 d.addCallback(lambda ign, node=node:
3562 node.get_best_mutable_version())
3563 d.addCallback(lambda mv:
3564 mv.update(MutableData(new_data), offset))
3565 d.addCallback(lambda ign, node=node:
3566 node.download_best_version())
3567 def _check(results):
3568 if results != expected:
3570 print "got: %s ... %s" % (results[:20], results[-20:])
3571 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3572 self.fail("results != expected")
3573 d.addCallback(_check)
3575 d0.addCallback(_run)
3578 def test_append(self):
3579 # We should be able to append data to a mutable file and get
3581 return self._test_replace(len(self.data), "appended")
3583 def test_replace_middle(self):
3584 # We should be able to replace data in the middle of a mutable
3585 # file and get what we expect back.
3586 return self._test_replace(100, "replaced")
3588 def test_replace_beginning(self):
3589 # We should be able to replace data at the beginning of the file
3590 # without truncating the file
3591 return self._test_replace(0, "beginning")
3593 def test_replace_segstart1(self):
3594 return self._test_replace(128*1024+1, "NNNN")
3596 def test_replace_zero_length_beginning(self):
3597 return self._test_replace(0, "")
3599 def test_replace_zero_length_middle(self):
3600 return self._test_replace(50, "")
3602 def test_replace_zero_length_segstart1(self):
3603 return self._test_replace(128*1024+1, "")
3605 def test_replace_and_extend(self):
3606 # We should be able to replace data in the middle of a mutable
3607 # file and extend that mutable file and get what we expect.
3608 return self._test_replace(100, "modified " * 100000)
3611 def _check_differences(self, got, expected):
3612 # displaying arbitrary file corruption is tricky for a
3613 # 1MB file of repeating data,, so look for likely places
3614 # with problems and display them separately
3615 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3616 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3617 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3618 for (start,end) in gotmods]
3619 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3620 for (start,end) in expmods]
3621 #print "expecting: %s" % expspans
3625 print "differences:"
3626 for segnum in range(len(expected)//SEGSIZE):
3627 start = segnum * SEGSIZE
3628 end = (segnum+1) * SEGSIZE
3629 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3630 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3631 if got_ends != exp_ends:
3632 print "expected[%d]: %s" % (start, exp_ends)
3633 print "got [%d]: %s" % (start, got_ends)
3634 if expspans != gotspans:
3635 print "expected: %s" % expspans
3636 print "got : %s" % gotspans
3637 open("EXPECTED","wb").write(expected)
3638 open("GOT","wb").write(got)
3639 print "wrote data to EXPECTED and GOT"
3640 self.fail("didn't get expected data")
3643 def test_replace_locations(self):
3644 # exercise fencepost conditions
3646 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3647 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3648 d0 = self.do_upload_mdmf()
3650 expected = self.data
3651 d = defer.succeed(None)
3652 for offset in suspects:
3653 new_data = letters.next()*2 # "AA", then "BB", etc
3654 expected = expected[:offset]+new_data+expected[offset+2:]
3655 d.addCallback(lambda ign:
3656 self.mdmf_node.get_best_mutable_version())
3657 def _modify(mv, offset=offset, new_data=new_data):
3658 # close over 'offset','new_data'
3659 md = MutableData(new_data)
3660 return mv.update(md, offset)
3661 d.addCallback(_modify)
3662 d.addCallback(lambda ignored:
3663 self.mdmf_node.download_best_version())
3664 d.addCallback(self._check_differences, expected)
3666 d0.addCallback(_run)
3669 def test_replace_locations_max_shares(self):
3670 # exercise fencepost conditions
3672 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3673 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3674 d0 = self.do_upload_mdmf()
3676 expected = self.data
3677 d = defer.succeed(None)
3678 for offset in suspects:
3679 new_data = letters.next()*2 # "AA", then "BB", etc
3680 expected = expected[:offset]+new_data+expected[offset+2:]
3681 d.addCallback(lambda ign:
3682 self.mdmf_max_shares_node.get_best_mutable_version())
3683 def _modify(mv, offset=offset, new_data=new_data):
3684 # close over 'offset','new_data'
3685 md = MutableData(new_data)
3686 return mv.update(md, offset)
3687 d.addCallback(_modify)
3688 d.addCallback(lambda ignored:
3689 self.mdmf_max_shares_node.download_best_version())
3690 d.addCallback(self._check_differences, expected)
3692 d0.addCallback(_run)
3696 def test_append_power_of_two(self):
3697 # If we attempt to extend a mutable file so that its segment
3698 # count crosses a power-of-two boundary, the update operation
3699 # should know how to reencode the file.
3701 # Note that the data populating self.mdmf_node is about 900 KiB
3702 # long -- this is 7 segments in the default segment size. So we
3703 # need to add 2 segments worth of data to push it over a
3704 # power-of-two boundary.
3705 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3706 new_data = self.data + (segment * 2)
3707 d0 = self.do_upload_mdmf()
3709 d = defer.succeed(None)
3710 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3711 # close over 'node'.
3712 d.addCallback(lambda ign, node=node:
3713 node.get_best_mutable_version())
3714 d.addCallback(lambda mv:
3715 mv.update(MutableData(segment * 2), len(self.data)))
3716 d.addCallback(lambda ign, node=node:
3717 node.download_best_version())
3718 d.addCallback(lambda results:
3719 self.failUnlessEqual(results, new_data))
3721 d0.addCallback(_run)
3724 def test_update_sdmf(self):
3725 # Running update on a single-segment file should still work.
3726 new_data = self.small_data + "appended"
3727 d0 = self.do_upload_sdmf()
3729 d = defer.succeed(None)
3730 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3731 # close over 'node'.
3732 d.addCallback(lambda ign, node=node:
3733 node.get_best_mutable_version())
3734 d.addCallback(lambda mv:
3735 mv.update(MutableData("appended"), len(self.small_data)))
3736 d.addCallback(lambda ign, node=node:
3737 node.download_best_version())
3738 d.addCallback(lambda results:
3739 self.failUnlessEqual(results, new_data))
3741 d0.addCallback(_run)
3744 def test_replace_in_last_segment(self):
3745 # The wrapper should know how to handle the tail segment
3747 replace_offset = len(self.data) - 100
3748 new_data = self.data[:replace_offset] + "replaced"
3749 rest_offset = replace_offset + len("replaced")
3750 new_data += self.data[rest_offset:]
3751 d0 = self.do_upload_mdmf()
3753 d = defer.succeed(None)
3754 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3755 # close over 'node'.
3756 d.addCallback(lambda ign, node=node:
3757 node.get_best_mutable_version())
3758 d.addCallback(lambda mv:
3759 mv.update(MutableData("replaced"), replace_offset))
3760 d.addCallback(lambda ign, node=node:
3761 node.download_best_version())
3762 d.addCallback(lambda results:
3763 self.failUnlessEqual(results, new_data))
3765 d0.addCallback(_run)
3768 def test_multiple_segment_replace(self):
3769 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3770 new_data = self.data[:replace_offset]
3771 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3772 new_data += 2 * new_segment
3773 new_data += "replaced"
3774 rest_offset = len(new_data)
3775 new_data += self.data[rest_offset:]
3776 d0 = self.do_upload_mdmf()
3778 d = defer.succeed(None)
3779 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3780 # close over 'node'.
3781 d.addCallback(lambda ign, node=node:
3782 node.get_best_mutable_version())
3783 d.addCallback(lambda mv:
3784 mv.update(MutableData((2 * new_segment) + "replaced"),
3786 d.addCallback(lambda ignored, node=node:
3787 node.download_best_version())
3788 d.addCallback(lambda results:
3789 self.failUnlessEqual(results, new_data))
3791 d0.addCallback(_run)
3794 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3795 sdmf_old_shares = {}
3796 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3797 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3798 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3799 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3800 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3801 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3802 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3803 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3804 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3805 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3806 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3807 sdmf_old_contents = "This is a test file.\n"
3808 def copy_sdmf_shares(self):
3809 # We'll basically be short-circuiting the upload process.
3810 servernums = self.g.servers_by_number.keys()
3811 assert len(servernums) == 10
3813 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3814 # Get the storage index.
3815 cap = uri.from_string(self.sdmf_old_cap)
3816 si = cap.get_storage_index()
3818 # Now execute each assignment by writing the storage.
3819 for (share, servernum) in assignments:
3820 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3821 storedir = self.get_serverdir(servernum)
3822 storage_path = os.path.join(storedir, "shares",
3823 storage_index_to_dir(si))
3824 fileutil.make_dirs(storage_path)
3825 fileutil.write(os.path.join(storage_path, "%d" % share),
3827 # ...and verify that the shares are there.
3828 shares = self.find_uri_shares(self.sdmf_old_cap)
3829 assert len(shares) == 10
3831 def test_new_downloader_can_read_old_shares(self):
3832 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3834 self.copy_sdmf_shares()
3835 nm = self.g.clients[0].nodemaker
3836 n = nm.create_from_cap(self.sdmf_old_cap)
3837 d = n.download_best_version()
3838 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3841 class DifferentEncoding(unittest.TestCase):
3843 self._storage = s = FakeStorage()
3844 self.nodemaker = make_nodemaker(s)
3846 def test_filenode(self):
3847 # create a file with 3-of-20, then modify it with a client configured
3848 # to do 3-of-10. #1510 tracks a failure here
3849 self.nodemaker.default_encoding_parameters["n"] = 20
3850 d = self.nodemaker.create_mutable_file("old contents")
3852 filecap = n.get_cap().to_string()
3853 del n # we want a new object, not the cached one
3854 self.nodemaker.default_encoding_parameters["n"] = 10
3855 n2 = self.nodemaker.create_from_cap(filecap)
3857 d.addCallback(_created)
3858 def modifier(old_contents, servermap, first_time):
3859 return "new contents"
3860 d.addCallback(lambda n: n.modify(modifier))