3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from allmydata import uri, client
7 from allmydata.nodemaker import NodeMaker
8 from allmydata.util import base32, consumer, fileutil, mathutil
9 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
10 ssk_pubkey_fingerprint_hash
11 from allmydata.util.consumer import MemoryConsumer
12 from allmydata.util.deferredutil import gatherResults
13 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
14 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION, DownloadStopped
15 from allmydata.monitor import Monitor
16 from allmydata.test.common import ShouldFailMixin
17 from allmydata.test.no_network import GridTestMixin
18 from foolscap.api import eventually, fireEventually
19 from foolscap.logging import log
20 from allmydata.storage_client import StorageFarmBroker
21 from allmydata.storage.common import storage_index_to_dir
22 from allmydata.scripts import debug
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28 NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32 DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
39 from allmydata.test.test_download import PausingConsumer, \
40 PausingAndStoppingConsumer, StoppingConsumer, \
41 ImmediatelyStoppingConsumer
44 # this "FakeStorage" exists to put the share data in RAM and avoid using real
45 # network connections, both to speed up the tests and to reduce the amount of
46 # non-mutable.py code being exercised.
49 # this class replaces the collection of storage servers, allowing the
50 # tests to examine and manipulate the published shares. It also lets us
51 # control the order in which read queries are answered, to exercise more
52 # of the error-handling code in Retrieve .
54 # Note that we ignore the storage index: this FakeStorage instance can
55 # only be used for a single storage index.
60 # _sequence is used to cause the responses to occur in a specific
61 # order. If it is in use, then we will defer queries instead of
62 # answering them right away, accumulating the Deferreds in a dict. We
63 # don't know exactly how many queries we'll get, so exactly one
64 # second after the first query arrives, we will release them all (in
68 self._pending_timer = None
70 def read(self, peerid, storage_index):
71 shares = self._peers.get(peerid, {})
72 if self._sequence is None:
73 return defer.succeed(shares)
76 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
77 if peerid not in self._pending:
78 self._pending[peerid] = []
79 self._pending[peerid].append( (d, shares) )
82 def _fire_readers(self):
83 self._pending_timer = None
84 pending = self._pending
86 for peerid in self._sequence:
88 for (d, shares) in pending.pop(peerid):
89 eventually(d.callback, shares)
90 for peerid in pending:
91 for (d, shares) in pending[peerid]:
92 eventually(d.callback, shares)
94 def write(self, peerid, storage_index, shnum, offset, data):
95 if peerid not in self._peers:
96 self._peers[peerid] = {}
97 shares = self._peers[peerid]
99 f.write(shares.get(shnum, ""))
102 shares[shnum] = f.getvalue()
105 class FakeStorageServer:
106 def __init__(self, peerid, storage):
108 self.storage = storage
110 def callRemote(self, methname, *args, **kwargs):
113 meth = getattr(self, methname)
114 return meth(*args, **kwargs)
116 d.addCallback(lambda res: _call())
119 def callRemoteOnly(self, methname, *args, **kwargs):
121 d = self.callRemote(methname, *args, **kwargs)
122 d.addBoth(lambda ignore: None)
125 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
128 def slot_readv(self, storage_index, shnums, readv):
129 d = self.storage.read(self.peerid, storage_index)
133 if shnums and shnum not in shnums:
135 vector = response[shnum] = []
136 for (offset, length) in readv:
137 assert isinstance(offset, (int, long)), offset
138 assert isinstance(length, (int, long)), length
139 vector.append(shares[shnum][offset:offset+length])
144 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
145 tw_vectors, read_vector):
146 # always-pass: parrot the test vectors back to them.
148 for shnum, (testv, writev, new_length) in tw_vectors.items():
149 for (offset, length, op, specimen) in testv:
150 assert op in ("le", "eq", "ge")
151 # TODO: this isn't right, the read is controlled by read_vector,
153 readv[shnum] = [ specimen
154 for (offset, length, op, specimen)
156 for (offset, data) in writev:
157 self.storage.write(self.peerid, storage_index, shnum,
159 answer = (True, readv)
160 return fireEventually(answer)
163 def flip_bit(original, byte_offset):
164 return (original[:byte_offset] +
165 chr(ord(original[byte_offset]) ^ 0x01) +
166 original[byte_offset+1:])
168 def add_two(original, byte_offset):
169 # It isn't enough to simply flip the bit for the version number,
170 # because 1 is a valid version number. So we add two instead.
171 return (original[:byte_offset] +
172 chr(ord(original[byte_offset]) ^ 0x02) +
173 original[byte_offset+1:])
175 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
176 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
177 # list of shnums to corrupt.
179 for peerid in s._peers:
180 shares = s._peers[peerid]
182 if (shnums_to_corrupt is not None
183 and shnum not in shnums_to_corrupt):
186 # We're feeding the reader all of the share data, so it
187 # won't need to use the rref that we didn't provide, nor the
188 # storage index that we didn't provide. We do this because
189 # the reader will work for both MDMF and SDMF.
190 reader = MDMFSlotReadProxy(None, None, shnum, data)
191 # We need to get the offsets for the next part.
192 d = reader.get_verinfo()
193 def _do_corruption(verinfo, data, shnum):
199 k, n, prefix, o) = verinfo
200 if isinstance(offset, tuple):
201 offset1, offset2 = offset
205 if offset1 == "pubkey" and IV:
208 real_offset = o[offset1]
210 real_offset = offset1
211 real_offset = int(real_offset) + offset2 + offset_offset
212 assert isinstance(real_offset, int), offset
213 if offset1 == 0: # verbyte
217 shares[shnum] = f(data, real_offset)
218 d.addCallback(_do_corruption, data, shnum)
220 dl = defer.DeferredList(ds)
221 dl.addCallback(lambda ignored: res)
224 def make_storagebroker(s=None, num_peers=10):
227 peerids = [tagged_hash("peerid", "%d" % i)[:20]
228 for i in range(num_peers)]
229 storage_broker = StorageFarmBroker(None, True)
230 for peerid in peerids:
231 fss = FakeStorageServer(peerid, s)
232 storage_broker.test_add_rref(peerid, fss)
233 return storage_broker
235 def make_nodemaker(s=None, num_peers=10):
236 storage_broker = make_storagebroker(s, num_peers)
237 sh = client.SecretHolder("lease secret", "convergence secret")
238 keygen = client.KeyGenerator()
239 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
240 nodemaker = NodeMaker(storage_broker, sh, None,
242 {"k": 3, "n": 10}, keygen)
245 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
246 # this used to be in Publish, but we removed the limit. Some of
247 # these tests test whether the new code correctly allows files
248 # larger than the limit.
249 OLD_MAX_SEGMENT_SIZE = 3500000
251 self._storage = s = FakeStorage()
252 self.nodemaker = make_nodemaker(s)
254 def test_create(self):
255 d = self.nodemaker.create_mutable_file()
257 self.failUnless(isinstance(n, MutableFileNode))
258 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
259 sb = self.nodemaker.storage_broker
260 peer0 = sorted(sb.get_all_serverids())[0]
261 shnums = self._storage._peers[peer0].keys()
262 self.failUnlessEqual(len(shnums), 1)
263 d.addCallback(_created)
267 def test_create_mdmf(self):
268 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
270 self.failUnless(isinstance(n, MutableFileNode))
271 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
272 sb = self.nodemaker.storage_broker
273 peer0 = sorted(sb.get_all_serverids())[0]
274 shnums = self._storage._peers[peer0].keys()
275 self.failUnlessEqual(len(shnums), 1)
276 d.addCallback(_created)
279 def test_single_share(self):
280 # Make sure that we tolerate publishing a single share.
281 self.nodemaker.default_encoding_parameters['k'] = 1
282 self.nodemaker.default_encoding_parameters['happy'] = 1
283 self.nodemaker.default_encoding_parameters['n'] = 1
284 d = defer.succeed(None)
285 for v in (SDMF_VERSION, MDMF_VERSION):
286 d.addCallback(lambda ignored:
287 self.nodemaker.create_mutable_file(version=v))
289 self.failUnless(isinstance(n, MutableFileNode))
292 d.addCallback(_created)
293 d.addCallback(lambda n:
294 n.overwrite(MutableData("Contents" * 50000)))
295 d.addCallback(lambda ignored:
296 self._node.download_best_version())
297 d.addCallback(lambda contents:
298 self.failUnlessEqual(contents, "Contents" * 50000))
301 def test_max_shares(self):
302 self.nodemaker.default_encoding_parameters['n'] = 255
303 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
305 self.failUnless(isinstance(n, MutableFileNode))
306 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
307 sb = self.nodemaker.storage_broker
308 num_shares = sum([len(self._storage._peers[x].keys()) for x \
309 in sb.get_all_serverids()])
310 self.failUnlessEqual(num_shares, 255)
313 d.addCallback(_created)
314 # Now we upload some contents
315 d.addCallback(lambda n:
316 n.overwrite(MutableData("contents" * 50000)))
317 # ...then download contents
318 d.addCallback(lambda ignored:
319 self._node.download_best_version())
320 # ...and check to make sure everything went okay.
321 d.addCallback(lambda contents:
322 self.failUnlessEqual("contents" * 50000, contents))
325 def test_max_shares_mdmf(self):
326 # Test how files behave when there are 255 shares.
327 self.nodemaker.default_encoding_parameters['n'] = 255
328 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
330 self.failUnless(isinstance(n, MutableFileNode))
331 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
332 sb = self.nodemaker.storage_broker
333 num_shares = sum([len(self._storage._peers[x].keys()) for x \
334 in sb.get_all_serverids()])
335 self.failUnlessEqual(num_shares, 255)
338 d.addCallback(_created)
339 d.addCallback(lambda n:
340 n.overwrite(MutableData("contents" * 50000)))
341 d.addCallback(lambda ignored:
342 self._node.download_best_version())
343 d.addCallback(lambda contents:
344 self.failUnlessEqual(contents, "contents" * 50000))
347 def test_mdmf_filenode_cap(self):
348 # Test that an MDMF filenode, once created, returns an MDMF URI.
349 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
351 self.failUnless(isinstance(n, MutableFileNode))
353 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
354 rcap = n.get_readcap()
355 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
356 vcap = n.get_verify_cap()
357 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
358 d.addCallback(_created)
362 def test_create_from_mdmf_writecap(self):
363 # Test that the nodemaker is capable of creating an MDMF
364 # filenode given an MDMF cap.
365 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
367 self.failUnless(isinstance(n, MutableFileNode))
369 self.failUnless(s.startswith("URI:MDMF"))
370 n2 = self.nodemaker.create_from_cap(s)
371 self.failUnless(isinstance(n2, MutableFileNode))
372 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
373 self.failUnlessEqual(n.get_uri(), n2.get_uri())
374 d.addCallback(_created)
378 def test_create_from_mdmf_writecap_with_extensions(self):
379 # Test that the nodemaker is capable of creating an MDMF
380 # filenode when given a writecap with extension parameters in
382 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
384 self.failUnless(isinstance(n, MutableFileNode))
386 # We need to cheat a little and delete the nodemaker's
387 # cache, otherwise we'll get the same node instance back.
388 self.failUnlessIn(":3:131073", s)
389 n2 = self.nodemaker.create_from_cap(s)
391 self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
392 self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
393 hints = n2._downloader_hints
394 self.failUnlessEqual(hints['k'], 3)
395 self.failUnlessEqual(hints['segsize'], 131073)
396 d.addCallback(_created)
400 def test_create_from_mdmf_readcap(self):
401 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
403 self.failUnless(isinstance(n, MutableFileNode))
404 s = n.get_readonly_uri()
405 n2 = self.nodemaker.create_from_cap(s)
406 self.failUnless(isinstance(n2, MutableFileNode))
408 # Check that it's a readonly node
409 self.failUnless(n2.is_readonly())
410 d.addCallback(_created)
414 def test_create_from_mdmf_readcap_with_extensions(self):
415 # We should be able to create an MDMF filenode with the
416 # extension parameters without it breaking.
417 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
419 self.failUnless(isinstance(n, MutableFileNode))
420 s = n.get_readonly_uri()
421 self.failUnlessIn(":3:131073", s)
423 n2 = self.nodemaker.create_from_cap(s)
424 self.failUnless(isinstance(n2, MutableFileNode))
425 self.failUnless(n2.is_readonly())
426 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
427 hints = n2._downloader_hints
428 self.failUnlessEqual(hints["k"], 3)
429 self.failUnlessEqual(hints["segsize"], 131073)
430 d.addCallback(_created)
434 def test_internal_version_from_cap(self):
435 # MutableFileNodes and MutableFileVersions have an internal
436 # switch that tells them whether they're dealing with an SDMF or
437 # MDMF mutable file when they start doing stuff. We want to make
438 # sure that this is set appropriately given an MDMF cap.
439 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
441 self.uri = n.get_uri()
442 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
444 n2 = self.nodemaker.create_from_cap(self.uri)
445 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
446 d.addCallback(_created)
450 def test_serialize(self):
451 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
453 def _callback(*args, **kwargs):
454 self.failUnlessEqual(args, (4,) )
455 self.failUnlessEqual(kwargs, {"foo": 5})
458 d = n._do_serialized(_callback, 4, foo=5)
459 def _check_callback(res):
460 self.failUnlessEqual(res, 6)
461 self.failUnlessEqual(calls, [1])
462 d.addCallback(_check_callback)
465 raise ValueError("heya")
466 d.addCallback(lambda res:
467 self.shouldFail(ValueError, "_check_errback", "heya",
468 n._do_serialized, _errback))
471 def test_upload_and_download(self):
472 d = self.nodemaker.create_mutable_file()
474 d = defer.succeed(None)
475 d.addCallback(lambda res: n.get_servermap(MODE_READ))
476 d.addCallback(lambda smap: smap.dump(StringIO()))
477 d.addCallback(lambda sio:
478 self.failUnless("3-of-10" in sio.getvalue()))
479 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
480 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
481 d.addCallback(lambda res: n.download_best_version())
482 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
483 d.addCallback(lambda res: n.get_size_of_best_version())
484 d.addCallback(lambda size:
485 self.failUnlessEqual(size, len("contents 1")))
486 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
487 d.addCallback(lambda res: n.download_best_version())
488 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
489 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
490 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
491 d.addCallback(lambda res: n.download_best_version())
492 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
494 d.addCallback(lambda smap:
495 n.download_version(smap,
496 smap.best_recoverable_version()))
497 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
498 # test a file that is large enough to overcome the
499 # mapupdate-to-retrieve data caching (i.e. make the shares larger
500 # than the default readsize, which is 2000 bytes). A 15kB file
501 # will have 5kB shares.
502 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
503 d.addCallback(lambda res: n.download_best_version())
504 d.addCallback(lambda res:
505 self.failUnlessEqual(res, "large size file" * 1000))
507 d.addCallback(_created)
511 def test_upload_and_download_mdmf(self):
512 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
514 d = defer.succeed(None)
515 d.addCallback(lambda ignored:
516 n.get_servermap(MODE_READ))
517 def _then(servermap):
518 dumped = servermap.dump(StringIO())
519 self.failUnlessIn("3-of-10", dumped.getvalue())
521 # Now overwrite the contents with some new contents. We want
522 # to make them big enough to force the file to be uploaded
523 # in more than one segment.
524 big_contents = "contents1" * 100000 # about 900 KiB
525 big_contents_uploadable = MutableData(big_contents)
526 d.addCallback(lambda ignored:
527 n.overwrite(big_contents_uploadable))
528 d.addCallback(lambda ignored:
529 n.download_best_version())
530 d.addCallback(lambda data:
531 self.failUnlessEqual(data, big_contents))
532 # Overwrite the contents again with some new contents. As
533 # before, they need to be big enough to force multiple
534 # segments, so that we make the downloader deal with
536 bigger_contents = "contents2" * 1000000 # about 9MiB
537 bigger_contents_uploadable = MutableData(bigger_contents)
538 d.addCallback(lambda ignored:
539 n.overwrite(bigger_contents_uploadable))
540 d.addCallback(lambda ignored:
541 n.download_best_version())
542 d.addCallback(lambda data:
543 self.failUnlessEqual(data, bigger_contents))
545 d.addCallback(_created)
549 def test_retrieve_producer_mdmf(self):
550 # We should make sure that the retriever is able to pause and stop
552 data = "contents1" * 100000
553 d = self.nodemaker.create_mutable_file(MutableData(data),
554 version=MDMF_VERSION)
555 d.addCallback(lambda node: node.get_best_mutable_version())
556 d.addCallback(self._test_retrieve_producer, "MDMF", data)
559 # note: SDMF has only one big segment, so we can't use the usual
560 # after-the-first-write() trick to pause or stop the download.
561 # Disabled until we find a better approach.
562 def OFF_test_retrieve_producer_sdmf(self):
563 data = "contents1" * 100000
564 d = self.nodemaker.create_mutable_file(MutableData(data),
565 version=SDMF_VERSION)
566 d.addCallback(lambda node: node.get_best_mutable_version())
567 d.addCallback(self._test_retrieve_producer, "SDMF", data)
570 def _test_retrieve_producer(self, version, kind, data):
571 # Now we'll retrieve it into a pausing consumer.
572 c = PausingConsumer()
574 d.addCallback(lambda ign: self.failUnlessEqual(c.size, len(data)))
576 c2 = PausingAndStoppingConsumer()
577 d.addCallback(lambda ign:
578 self.shouldFail(DownloadStopped, kind+"_pause_stop",
579 "our Consumer called stopProducing()",
582 c3 = StoppingConsumer()
583 d.addCallback(lambda ign:
584 self.shouldFail(DownloadStopped, kind+"_stop",
585 "our Consumer called stopProducing()",
588 c4 = ImmediatelyStoppingConsumer()
589 d.addCallback(lambda ign:
590 self.shouldFail(DownloadStopped, kind+"_stop_imm",
591 "our Consumer called stopProducing()",
595 c5 = MemoryConsumer()
596 d1 = version.read(c5)
597 c5.producer.stopProducing()
598 return self.shouldFail(DownloadStopped, kind+"_stop_imm2",
599 "our Consumer called stopProducing()",
604 def test_download_from_mdmf_cap(self):
605 # We should be able to download an MDMF file given its cap
606 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
608 self.uri = node.get_uri()
610 return node.overwrite(MutableData("contents1" * 100000))
612 node = self.nodemaker.create_from_cap(self.uri)
613 return node.download_best_version()
614 def _downloaded(data):
615 self.failUnlessEqual(data, "contents1" * 100000)
616 d.addCallback(_created)
618 d.addCallback(_downloaded)
622 def test_create_and_download_from_bare_mdmf_cap(self):
623 # MDMF caps have extension parameters on them by default. We
624 # need to make sure that they work without extension parameters.
625 contents = MutableData("contents" * 100000)
626 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
631 self.failUnlessIn(":3:131073", uri)
632 # Now strip that off the end of the uri, then try creating
633 # and downloading the node again.
634 bare_uri = uri.replace(":3:131073", "")
635 assert ":3:131073" not in bare_uri
637 return self.nodemaker.create_from_cap(bare_uri)
638 d.addCallback(_created)
639 def _created_bare(node):
640 self.failUnlessEqual(node.get_writekey(),
641 self._created.get_writekey())
642 self.failUnlessEqual(node.get_readkey(),
643 self._created.get_readkey())
644 self.failUnlessEqual(node.get_storage_index(),
645 self._created.get_storage_index())
646 return node.download_best_version()
647 d.addCallback(_created_bare)
648 d.addCallback(lambda data:
649 self.failUnlessEqual(data, "contents" * 100000))
653 def test_mdmf_write_count(self):
654 # Publishing an MDMF file should only cause one write for each
655 # share that is to be published. Otherwise, we introduce
656 # undesirable semantics that are a regression from SDMF
657 upload = MutableData("MDMF" * 100000) # about 400 KiB
658 d = self.nodemaker.create_mutable_file(upload,
659 version=MDMF_VERSION)
660 def _check_server_write_counts(ignored):
661 sb = self.nodemaker.storage_broker
662 for server in sb.servers.itervalues():
663 self.failUnlessEqual(server.get_rref().queries, 1)
664 d.addCallback(_check_server_write_counts)
668 def test_create_with_initial_contents(self):
669 upload1 = MutableData("contents 1")
670 d = self.nodemaker.create_mutable_file(upload1)
672 d = n.download_best_version()
673 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
674 upload2 = MutableData("contents 2")
675 d.addCallback(lambda res: n.overwrite(upload2))
676 d.addCallback(lambda res: n.download_best_version())
677 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
679 d.addCallback(_created)
683 def test_create_mdmf_with_initial_contents(self):
684 initial_contents = "foobarbaz" * 131072 # 900KiB
685 initial_contents_uploadable = MutableData(initial_contents)
686 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
687 version=MDMF_VERSION)
689 d = n.download_best_version()
690 d.addCallback(lambda data:
691 self.failUnlessEqual(data, initial_contents))
692 uploadable2 = MutableData(initial_contents + "foobarbaz")
693 d.addCallback(lambda ignored:
694 n.overwrite(uploadable2))
695 d.addCallback(lambda ignored:
696 n.download_best_version())
697 d.addCallback(lambda data:
698 self.failUnlessEqual(data, initial_contents +
701 d.addCallback(_created)
705 def test_response_cache_memory_leak(self):
706 d = self.nodemaker.create_mutable_file("contents")
708 d = n.download_best_version()
709 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
710 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
712 def _check_cache(expected):
713 # The total size of cache entries should not increase on the second download;
714 # in fact the cache contents should be identical.
715 d2 = n.download_best_version()
716 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
718 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
720 d.addCallback(_created)
723 def test_create_with_initial_contents_function(self):
724 data = "initial contents"
725 def _make_contents(n):
726 self.failUnless(isinstance(n, MutableFileNode))
727 key = n.get_writekey()
728 self.failUnless(isinstance(key, str), key)
729 self.failUnlessEqual(len(key), 16) # AES key size
730 return MutableData(data)
731 d = self.nodemaker.create_mutable_file(_make_contents)
733 return n.download_best_version()
734 d.addCallback(_created)
735 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
739 def test_create_mdmf_with_initial_contents_function(self):
740 data = "initial contents" * 100000
741 def _make_contents(n):
742 self.failUnless(isinstance(n, MutableFileNode))
743 key = n.get_writekey()
744 self.failUnless(isinstance(key, str), key)
745 self.failUnlessEqual(len(key), 16)
746 return MutableData(data)
747 d = self.nodemaker.create_mutable_file(_make_contents,
748 version=MDMF_VERSION)
749 d.addCallback(lambda n:
750 n.download_best_version())
751 d.addCallback(lambda data2:
752 self.failUnlessEqual(data2, data))
756 def test_create_with_too_large_contents(self):
757 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
758 BIG_uploadable = MutableData(BIG)
759 d = self.nodemaker.create_mutable_file(BIG_uploadable)
761 other_BIG_uploadable = MutableData(BIG)
762 d = n.overwrite(other_BIG_uploadable)
764 d.addCallback(_created)
767 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
768 d = n.get_servermap(MODE_READ)
769 d.addCallback(lambda servermap: servermap.best_recoverable_version())
770 d.addCallback(lambda verinfo:
771 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
774 def test_modify(self):
775 def _modifier(old_contents, servermap, first_time):
776 new_contents = old_contents + "line2"
778 def _non_modifier(old_contents, servermap, first_time):
780 def _none_modifier(old_contents, servermap, first_time):
782 def _error_modifier(old_contents, servermap, first_time):
783 raise ValueError("oops")
784 def _toobig_modifier(old_contents, servermap, first_time):
785 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
788 def _ucw_error_modifier(old_contents, servermap, first_time):
789 # simulate an UncoordinatedWriteError once
792 raise UncoordinatedWriteError("simulated")
793 new_contents = old_contents + "line3"
795 def _ucw_error_non_modifier(old_contents, servermap, first_time):
796 # simulate an UncoordinatedWriteError once, and don't actually
797 # modify the contents on subsequent invocations
800 raise UncoordinatedWriteError("simulated")
803 initial_contents = "line1"
804 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
806 d = n.modify(_modifier)
807 d.addCallback(lambda res: n.download_best_version())
808 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
809 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
811 d.addCallback(lambda res: n.modify(_non_modifier))
812 d.addCallback(lambda res: n.download_best_version())
813 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
814 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
816 d.addCallback(lambda res: n.modify(_none_modifier))
817 d.addCallback(lambda res: n.download_best_version())
818 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
819 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
821 d.addCallback(lambda res:
822 self.shouldFail(ValueError, "error_modifier", None,
823 n.modify, _error_modifier))
824 d.addCallback(lambda res: n.download_best_version())
825 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
826 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
829 d.addCallback(lambda res: n.download_best_version())
830 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
831 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
833 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
834 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
835 d.addCallback(lambda res: n.download_best_version())
836 d.addCallback(lambda res: self.failUnlessEqual(res,
838 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
840 def _reset_ucw_error_modifier(res):
843 d.addCallback(_reset_ucw_error_modifier)
845 # in practice, this n.modify call should publish twice: the first
846 # one gets a UCWE, the second does not. But our test jig (in
847 # which the modifier raises the UCWE) skips over the first one,
848 # so in this test there will be only one publish, and the seqnum
849 # will only be one larger than the previous test, not two (i.e. 4
851 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
852 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
853 d.addCallback(lambda res: n.download_best_version())
854 d.addCallback(lambda res: self.failUnlessEqual(res,
856 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
857 d.addCallback(lambda res: n.modify(_toobig_modifier))
859 d.addCallback(_created)
863 def test_modify_backoffer(self):
864 def _modifier(old_contents, servermap, first_time):
865 return old_contents + "line2"
867 def _ucw_error_modifier(old_contents, servermap, first_time):
868 # simulate an UncoordinatedWriteError once
871 raise UncoordinatedWriteError("simulated")
872 return old_contents + "line3"
873 def _always_ucw_error_modifier(old_contents, servermap, first_time):
874 raise UncoordinatedWriteError("simulated")
875 def _backoff_stopper(node, f):
877 def _backoff_pauser(node, f):
879 reactor.callLater(0.5, d.callback, None)
882 # the give-up-er will hit its maximum retry count quickly
883 giveuper = BackoffAgent()
884 giveuper._delay = 0.1
887 d = self.nodemaker.create_mutable_file(MutableData("line1"))
889 d = n.modify(_modifier)
890 d.addCallback(lambda res: n.download_best_version())
891 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
892 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
894 d.addCallback(lambda res:
895 self.shouldFail(UncoordinatedWriteError,
896 "_backoff_stopper", None,
897 n.modify, _ucw_error_modifier,
899 d.addCallback(lambda res: n.download_best_version())
900 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
901 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
903 def _reset_ucw_error_modifier(res):
906 d.addCallback(_reset_ucw_error_modifier)
907 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
909 d.addCallback(lambda res: n.download_best_version())
910 d.addCallback(lambda res: self.failUnlessEqual(res,
912 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
914 d.addCallback(lambda res:
915 self.shouldFail(UncoordinatedWriteError,
917 n.modify, _always_ucw_error_modifier,
919 d.addCallback(lambda res: n.download_best_version())
920 d.addCallback(lambda res: self.failUnlessEqual(res,
922 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
925 d.addCallback(_created)
928 def test_upload_and_download_full_size_keys(self):
929 self.nodemaker.key_generator = client.KeyGenerator()
930 d = self.nodemaker.create_mutable_file()
932 d = defer.succeed(None)
933 d.addCallback(lambda res: n.get_servermap(MODE_READ))
934 d.addCallback(lambda smap: smap.dump(StringIO()))
935 d.addCallback(lambda sio:
936 self.failUnless("3-of-10" in sio.getvalue()))
937 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
938 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
939 d.addCallback(lambda res: n.download_best_version())
940 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
941 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
942 d.addCallback(lambda res: n.download_best_version())
943 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
944 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
945 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
946 d.addCallback(lambda res: n.download_best_version())
947 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
948 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
949 d.addCallback(lambda smap:
950 n.download_version(smap,
951 smap.best_recoverable_version()))
952 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
954 d.addCallback(_created)
958 def test_size_after_servermap_update(self):
959 # a mutable file node should have something to say about how big
960 # it is after a servermap update is performed, since this tells
961 # us how large the best version of that mutable file is.
962 d = self.nodemaker.create_mutable_file()
965 return n.get_servermap(MODE_READ)
966 d.addCallback(_created)
967 d.addCallback(lambda ignored:
968 self.failUnlessEqual(self.n.get_size(), 0))
969 d.addCallback(lambda ignored:
970 self.n.overwrite(MutableData("foobarbaz")))
971 d.addCallback(lambda ignored:
972 self.failUnlessEqual(self.n.get_size(), 9))
973 d.addCallback(lambda ignored:
974 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
975 d.addCallback(_created)
976 d.addCallback(lambda ignored:
977 self.failUnlessEqual(self.n.get_size(), 9))
982 def publish_one(self):
983 # publish a file and create shares, which can then be manipulated
985 self.CONTENTS = "New contents go here" * 1000
986 self.uploadable = MutableData(self.CONTENTS)
987 self._storage = FakeStorage()
988 self._nodemaker = make_nodemaker(self._storage)
989 self._storage_broker = self._nodemaker.storage_broker
990 d = self._nodemaker.create_mutable_file(self.uploadable)
993 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
994 d.addCallback(_created)
997 def publish_mdmf(self):
998 # like publish_one, except that the result is guaranteed to be
1000 # self.CONTENTS should have more than one segment.
1001 self.CONTENTS = "This is an MDMF file" * 100000
1002 self.uploadable = MutableData(self.CONTENTS)
1003 self._storage = FakeStorage()
1004 self._nodemaker = make_nodemaker(self._storage)
1005 self._storage_broker = self._nodemaker.storage_broker
1006 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
1009 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1010 d.addCallback(_created)
1014 def publish_sdmf(self):
1015 # like publish_one, except that the result is guaranteed to be
1017 self.CONTENTS = "This is an SDMF file" * 1000
1018 self.uploadable = MutableData(self.CONTENTS)
1019 self._storage = FakeStorage()
1020 self._nodemaker = make_nodemaker(self._storage)
1021 self._storage_broker = self._nodemaker.storage_broker
1022 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
1025 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
1026 d.addCallback(_created)
1030 def publish_multiple(self, version=0):
1031 self.CONTENTS = ["Contents 0",
1036 self.uploadables = [MutableData(d) for d in self.CONTENTS]
1037 self._copied_shares = {}
1038 self._storage = FakeStorage()
1039 self._nodemaker = make_nodemaker(self._storage)
1040 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1043 # now create multiple versions of the same file, and accumulate
1044 # their shares, so we can mix and match them later.
1045 d = defer.succeed(None)
1046 d.addCallback(self._copy_shares, 0)
1047 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1048 d.addCallback(self._copy_shares, 1)
1049 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1050 d.addCallback(self._copy_shares, 2)
1051 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1052 d.addCallback(self._copy_shares, 3)
1053 # now we replace all the shares with version s3, and upload a new
1054 # version to get s4b.
1055 rollback = dict([(i,2) for i in range(10)])
1056 d.addCallback(lambda res: self._set_versions(rollback))
1057 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1058 d.addCallback(self._copy_shares, 4)
1059 # we leave the storage in state 4
1061 d.addCallback(_created)
1065 def _copy_shares(self, ignored, index):
1066 shares = self._storage._peers
1067 # we need a deep copy
1069 for peerid in shares:
1070 new_shares[peerid] = {}
1071 for shnum in shares[peerid]:
1072 new_shares[peerid][shnum] = shares[peerid][shnum]
1073 self._copied_shares[index] = new_shares
1075 def _set_versions(self, versionmap):
1076 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1077 # share to be at. Any shnum which is left out of the map will stay at
1078 # its current version.
1079 shares = self._storage._peers
1080 oldshares = self._copied_shares
1081 for peerid in shares:
1082 for shnum in shares[peerid]:
1083 if shnum in versionmap:
1084 index = versionmap[shnum]
1085 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1087 class Servermap(unittest.TestCase, PublishMixin):
1089 return self.publish_one()
1091 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1096 sb = self._storage_broker
1097 smu = ServermapUpdater(fn, sb, Monitor(),
1098 ServerMap(), mode, update_range=update_range)
1102 def update_servermap(self, oldmap, mode=MODE_CHECK):
1103 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1108 def failUnlessOneRecoverable(self, sm, num_shares):
1109 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1110 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1111 best = sm.best_recoverable_version()
1112 self.failIfEqual(best, None)
1113 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1114 self.failUnlessEqual(len(sm.shares_available()), 1)
1115 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1116 shnum, peerids = sm.make_sharemap().items()[0]
1117 peerid = list(peerids)[0]
1118 self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1119 self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1122 def test_basic(self):
1123 d = defer.succeed(None)
1124 ms = self.make_servermap
1125 us = self.update_servermap
1127 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1128 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1129 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1130 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1131 d.addCallback(lambda res: ms(mode=MODE_READ))
1132 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1133 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1134 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1135 # this mode stops at 'k' shares
1136 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1138 # and can we re-use the same servermap? Note that these are sorted in
1139 # increasing order of number of servers queried, since once a server
1140 # gets into the servermap, we'll always ask it for an update.
1141 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1142 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1143 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1144 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1145 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1146 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1147 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1148 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1149 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1153 def test_fetch_privkey(self):
1154 d = defer.succeed(None)
1155 # use the sibling filenode (which hasn't been used yet), and make
1156 # sure it can fetch the privkey. The file is small, so the privkey
1157 # will be fetched on the first (query) pass.
1158 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1159 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1161 # create a new file, which is large enough to knock the privkey out
1162 # of the early part of the file
1163 LARGE = "These are Larger contents" * 200 # about 5KB
1164 LARGE_uploadable = MutableData(LARGE)
1165 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1166 def _created(large_fn):
1167 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1168 return self.make_servermap(MODE_WRITE, large_fn2)
1169 d.addCallback(_created)
1170 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1174 def test_mark_bad(self):
1175 d = defer.succeed(None)
1176 ms = self.make_servermap
1178 d.addCallback(lambda res: ms(mode=MODE_READ))
1179 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1181 v = sm.best_recoverable_version()
1182 vm = sm.make_versionmap()
1183 shares = list(vm[v])
1184 self.failUnlessEqual(len(shares), 6)
1185 self._corrupted = set()
1186 # mark the first 5 shares as corrupt, then update the servermap.
1187 # The map should not have the marked shares it in any more, and
1188 # new shares should be found to replace the missing ones.
1189 for (shnum, peerid, timestamp) in shares:
1191 self._corrupted.add( (peerid, shnum) )
1192 sm.mark_bad_share(peerid, shnum, "")
1193 return self.update_servermap(sm, MODE_WRITE)
1194 d.addCallback(_made_map)
1196 # this should find all 5 shares that weren't marked bad
1197 v = sm.best_recoverable_version()
1198 vm = sm.make_versionmap()
1199 shares = list(vm[v])
1200 for (peerid, shnum) in self._corrupted:
1201 peer_shares = sm.shares_on_peer(peerid)
1202 self.failIf(shnum in peer_shares,
1203 "%d was in %s" % (shnum, peer_shares))
1204 self.failUnlessEqual(len(shares), 5)
1205 d.addCallback(_check_map)
1208 def failUnlessNoneRecoverable(self, sm):
1209 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1210 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1211 best = sm.best_recoverable_version()
1212 self.failUnlessEqual(best, None)
1213 self.failUnlessEqual(len(sm.shares_available()), 0)
1215 def test_no_shares(self):
1216 self._storage._peers = {} # delete all shares
1217 ms = self.make_servermap
1218 d = defer.succeed(None)
1220 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1221 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1223 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1224 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1226 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1227 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1229 d.addCallback(lambda res: ms(mode=MODE_READ))
1230 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1234 def failUnlessNotQuiteEnough(self, sm):
1235 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1236 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1237 best = sm.best_recoverable_version()
1238 self.failUnlessEqual(best, None)
1239 self.failUnlessEqual(len(sm.shares_available()), 1)
1240 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1243 def test_not_quite_enough_shares(self):
1245 ms = self.make_servermap
1246 num_shares = len(s._peers)
1247 for peerid in s._peers:
1248 s._peers[peerid] = {}
1252 # now there ought to be only two shares left
1253 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1255 d = defer.succeed(None)
1257 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1258 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1259 d.addCallback(lambda sm:
1260 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1261 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1262 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1263 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1264 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1265 d.addCallback(lambda res: ms(mode=MODE_READ))
1266 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1271 def test_servermapupdater_finds_mdmf_files(self):
1272 # setUp already published an MDMF file for us. We just need to
1273 # make sure that when we run the ServermapUpdater, the file is
1274 # reported to have one recoverable version.
1275 d = defer.succeed(None)
1276 d.addCallback(lambda ignored:
1277 self.publish_mdmf())
1278 d.addCallback(lambda ignored:
1279 self.make_servermap(mode=MODE_CHECK))
1280 # Calling make_servermap also updates the servermap in the mode
1281 # that we specify, so we just need to see what it says.
1282 def _check_servermap(sm):
1283 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1284 d.addCallback(_check_servermap)
1288 def test_fetch_update(self):
1289 d = defer.succeed(None)
1290 d.addCallback(lambda ignored:
1291 self.publish_mdmf())
1292 d.addCallback(lambda ignored:
1293 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1294 def _check_servermap(sm):
1296 self.failUnlessEqual(len(sm.update_data), 10)
1298 for data in sm.update_data.itervalues():
1299 self.failUnlessEqual(len(data), 1)
1300 d.addCallback(_check_servermap)
1304 def test_servermapupdater_finds_sdmf_files(self):
1305 d = defer.succeed(None)
1306 d.addCallback(lambda ignored:
1307 self.publish_sdmf())
1308 d.addCallback(lambda ignored:
1309 self.make_servermap(mode=MODE_CHECK))
1310 d.addCallback(lambda servermap:
1311 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1315 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1317 return self.publish_one()
1319 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1321 oldmap = ServerMap()
1323 sb = self._storage_broker
1324 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1328 def abbrev_verinfo(self, verinfo):
1331 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1332 offsets_tuple) = verinfo
1333 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1335 def abbrev_verinfo_dict(self, verinfo_d):
1337 for verinfo,value in verinfo_d.items():
1338 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1339 offsets_tuple) = verinfo
1340 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1343 def dump_servermap(self, servermap):
1344 print "SERVERMAP", servermap
1345 print "RECOVERABLE", [self.abbrev_verinfo(v)
1346 for v in servermap.recoverable_versions()]
1347 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1348 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1350 def do_download(self, servermap, version=None):
1352 version = servermap.best_recoverable_version()
1353 r = Retrieve(self._fn, servermap, version)
1354 c = consumer.MemoryConsumer()
1355 d = r.download(consumer=c)
1356 d.addCallback(lambda mc: "".join(mc.chunks))
1360 def test_basic(self):
1361 d = self.make_servermap()
1362 def _do_retrieve(servermap):
1363 self._smap = servermap
1364 #self.dump_servermap(servermap)
1365 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1366 return self.do_download(servermap)
1367 d.addCallback(_do_retrieve)
1368 def _retrieved(new_contents):
1369 self.failUnlessEqual(new_contents, self.CONTENTS)
1370 d.addCallback(_retrieved)
1371 # we should be able to re-use the same servermap, both with and
1372 # without updating it.
1373 d.addCallback(lambda res: self.do_download(self._smap))
1374 d.addCallback(_retrieved)
1375 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1376 d.addCallback(lambda res: self.do_download(self._smap))
1377 d.addCallback(_retrieved)
1378 # clobbering the pubkey should make the servermap updater re-fetch it
1379 def _clobber_pubkey(res):
1380 self._fn._pubkey = None
1381 d.addCallback(_clobber_pubkey)
1382 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1383 d.addCallback(lambda res: self.do_download(self._smap))
1384 d.addCallback(_retrieved)
1387 def test_all_shares_vanished(self):
1388 d = self.make_servermap()
1389 def _remove_shares(servermap):
1390 for shares in self._storage._peers.values():
1392 d1 = self.shouldFail(NotEnoughSharesError,
1393 "test_all_shares_vanished",
1395 self.do_download, servermap)
1397 d.addCallback(_remove_shares)
1400 def test_no_servers(self):
1401 sb2 = make_storagebroker(num_peers=0)
1402 # if there are no servers, then a MODE_READ servermap should come
1404 d = self.make_servermap(sb=sb2)
1405 def _check_servermap(servermap):
1406 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1407 self.failIf(servermap.recoverable_versions())
1408 self.failIf(servermap.unrecoverable_versions())
1409 self.failIf(servermap.all_peers())
1410 d.addCallback(_check_servermap)
1413 def test_no_servers_download(self):
1414 sb2 = make_storagebroker(num_peers=0)
1415 self._fn._storage_broker = sb2
1416 d = self.shouldFail(UnrecoverableFileError,
1417 "test_no_servers_download",
1418 "no recoverable versions",
1419 self._fn.download_best_version)
1421 # a failed download that occurs while we aren't connected to
1422 # anybody should not prevent a subsequent download from working.
1423 # This isn't quite the webapi-driven test that #463 wants, but it
1424 # should be close enough.
1425 self._fn._storage_broker = self._storage_broker
1426 return self._fn.download_best_version()
1427 def _retrieved(new_contents):
1428 self.failUnlessEqual(new_contents, self.CONTENTS)
1429 d.addCallback(_restore)
1430 d.addCallback(_retrieved)
1434 def _test_corrupt_all(self, offset, substring,
1435 should_succeed=False,
1437 failure_checker=None,
1438 fetch_privkey=False):
1439 d = defer.succeed(None)
1441 d.addCallback(corrupt, self._storage, offset)
1442 d.addCallback(lambda res: self.make_servermap())
1443 if not corrupt_early:
1444 d.addCallback(corrupt, self._storage, offset)
1445 def _do_retrieve(servermap):
1446 ver = servermap.best_recoverable_version()
1447 if ver is None and not should_succeed:
1448 # no recoverable versions == not succeeding. The problem
1449 # should be noted in the servermap's list of problems.
1451 allproblems = [str(f) for f in servermap.problems]
1452 self.failUnlessIn(substring, "".join(allproblems))
1455 d1 = self._fn.download_version(servermap, ver,
1457 d1.addCallback(lambda new_contents:
1458 self.failUnlessEqual(new_contents, self.CONTENTS))
1460 d1 = self.shouldFail(NotEnoughSharesError,
1461 "_corrupt_all(offset=%s)" % (offset,),
1463 self._fn.download_version, servermap,
1467 d1.addCallback(failure_checker)
1468 d1.addCallback(lambda res: servermap)
1470 d.addCallback(_do_retrieve)
1473 def test_corrupt_all_verbyte(self):
1474 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1475 # error in unpack_share().
1476 d = self._test_corrupt_all(0, "UnknownVersionError")
1477 def _check_servermap(servermap):
1478 # and the dump should mention the problems
1480 dump = servermap.dump(s).getvalue()
1481 self.failUnless("30 PROBLEMS" in dump, dump)
1482 d.addCallback(_check_servermap)
1485 def test_corrupt_all_seqnum(self):
1486 # a corrupt sequence number will trigger a bad signature
1487 return self._test_corrupt_all(1, "signature is invalid")
1489 def test_corrupt_all_R(self):
1490 # a corrupt root hash will trigger a bad signature
1491 return self._test_corrupt_all(9, "signature is invalid")
1493 def test_corrupt_all_IV(self):
1494 # a corrupt salt/IV will trigger a bad signature
1495 return self._test_corrupt_all(41, "signature is invalid")
1497 def test_corrupt_all_k(self):
1498 # a corrupt 'k' will trigger a bad signature
1499 return self._test_corrupt_all(57, "signature is invalid")
1501 def test_corrupt_all_N(self):
1502 # a corrupt 'N' will trigger a bad signature
1503 return self._test_corrupt_all(58, "signature is invalid")
1505 def test_corrupt_all_segsize(self):
1506 # a corrupt segsize will trigger a bad signature
1507 return self._test_corrupt_all(59, "signature is invalid")
1509 def test_corrupt_all_datalen(self):
1510 # a corrupt data length will trigger a bad signature
1511 return self._test_corrupt_all(67, "signature is invalid")
1513 def test_corrupt_all_pubkey(self):
1514 # a corrupt pubkey won't match the URI's fingerprint. We need to
1515 # remove the pubkey from the filenode, or else it won't bother trying
1517 self._fn._pubkey = None
1518 return self._test_corrupt_all("pubkey",
1519 "pubkey doesn't match fingerprint")
1521 def test_corrupt_all_sig(self):
1522 # a corrupt signature is a bad one
1523 # the signature runs from about [543:799], depending upon the length
1525 return self._test_corrupt_all("signature", "signature is invalid")
1527 def test_corrupt_all_share_hash_chain_number(self):
1528 # a corrupt share hash chain entry will show up as a bad hash. If we
1529 # mangle the first byte, that will look like a bad hash number,
1530 # causing an IndexError
1531 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1533 def test_corrupt_all_share_hash_chain_hash(self):
1534 # a corrupt share hash chain entry will show up as a bad hash. If we
1535 # mangle a few bytes in, that will look like a bad hash.
1536 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1538 def test_corrupt_all_block_hash_tree(self):
1539 return self._test_corrupt_all("block_hash_tree",
1540 "block hash tree failure")
1542 def test_corrupt_all_block(self):
1543 return self._test_corrupt_all("share_data", "block hash tree failure")
1545 def test_corrupt_all_encprivkey(self):
1546 # a corrupted privkey won't even be noticed by the reader, only by a
1548 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1551 def test_corrupt_all_encprivkey_late(self):
1552 # this should work for the same reason as above, but we corrupt
1553 # after the servermap update to exercise the error handling
1555 # We need to remove the privkey from the node, or the retrieve
1556 # process won't know to update it.
1557 self._fn._privkey = None
1558 return self._test_corrupt_all("enc_privkey",
1559 None, # this shouldn't fail
1560 should_succeed=True,
1561 corrupt_early=False,
1565 # disabled until retrieve tests checkstring on each blockfetch. I didn't
1566 # just use a .todo because the failing-but-ignored test emits about 30kB
1568 def OFF_test_corrupt_all_seqnum_late(self):
1569 # corrupting the seqnum between mapupdate and retrieve should result
1570 # in NotEnoughSharesError, since each share will look invalid
1573 self.failUnless(f.check(NotEnoughSharesError))
1574 self.failUnless("uncoordinated write" in str(f))
1575 return self._test_corrupt_all(1, "ran out of peers",
1576 corrupt_early=False,
1577 failure_checker=_check)
1579 def test_corrupt_all_block_hash_tree_late(self):
1582 self.failUnless(f.check(NotEnoughSharesError))
1583 return self._test_corrupt_all("block_hash_tree",
1584 "block hash tree failure",
1585 corrupt_early=False,
1586 failure_checker=_check)
1589 def test_corrupt_all_block_late(self):
1592 self.failUnless(f.check(NotEnoughSharesError))
1593 return self._test_corrupt_all("share_data", "block hash tree failure",
1594 corrupt_early=False,
1595 failure_checker=_check)
1598 def test_basic_pubkey_at_end(self):
1599 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1600 # download to succeed but forcing a bunch of retries first. Note that
1601 # this is rather pessimistic: our Retrieve process will throw away
1602 # the whole share if the pubkey is bad, even though the rest of the
1603 # share might be good.
1605 self._fn._pubkey = None
1606 k = self._fn.get_required_shares()
1607 N = self._fn.get_total_shares()
1608 d = defer.succeed(None)
1609 d.addCallback(corrupt, self._storage, "pubkey",
1610 shnums_to_corrupt=range(0, N-k))
1611 d.addCallback(lambda res: self.make_servermap())
1612 def _do_retrieve(servermap):
1613 self.failUnless(servermap.problems)
1614 self.failUnless("pubkey doesn't match fingerprint"
1615 in str(servermap.problems[0]))
1616 ver = servermap.best_recoverable_version()
1617 r = Retrieve(self._fn, servermap, ver)
1618 c = consumer.MemoryConsumer()
1619 return r.download(c)
1620 d.addCallback(_do_retrieve)
1621 d.addCallback(lambda mc: "".join(mc.chunks))
1622 d.addCallback(lambda new_contents:
1623 self.failUnlessEqual(new_contents, self.CONTENTS))
1627 def _test_corrupt_some(self, offset, mdmf=False):
1629 d = self.publish_mdmf()
1631 d = defer.succeed(None)
1632 d.addCallback(lambda ignored:
1633 corrupt(None, self._storage, offset, range(5)))
1634 d.addCallback(lambda ignored:
1635 self.make_servermap())
1636 def _do_retrieve(servermap):
1637 ver = servermap.best_recoverable_version()
1638 self.failUnless(ver)
1639 return self._fn.download_best_version()
1640 d.addCallback(_do_retrieve)
1641 d.addCallback(lambda new_contents:
1642 self.failUnlessEqual(new_contents, self.CONTENTS))
1646 def test_corrupt_some(self):
1647 # corrupt the data of first five shares (so the servermap thinks
1648 # they're good but retrieve marks them as bad), so that the
1649 # MODE_READ set of 6 will be insufficient, forcing node.download to
1650 # retry with more servers.
1651 return self._test_corrupt_some("share_data")
1654 def test_download_fails(self):
1655 d = corrupt(None, self._storage, "signature")
1656 d.addCallback(lambda ignored:
1657 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1658 "no recoverable versions",
1659 self._fn.download_best_version))
1664 def test_corrupt_mdmf_block_hash_tree(self):
1665 d = self.publish_mdmf()
1666 d.addCallback(lambda ignored:
1667 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1668 "block hash tree failure",
1669 corrupt_early=False,
1670 should_succeed=False))
1674 def test_corrupt_mdmf_block_hash_tree_late(self):
1675 d = self.publish_mdmf()
1676 d.addCallback(lambda ignored:
1677 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1678 "block hash tree failure",
1680 should_succeed=False))
1684 def test_corrupt_mdmf_share_data(self):
1685 d = self.publish_mdmf()
1686 d.addCallback(lambda ignored:
1687 # TODO: Find out what the block size is and corrupt a
1688 # specific block, rather than just guessing.
1689 self._test_corrupt_all(("share_data", 12 * 40),
1690 "block hash tree failure",
1692 should_succeed=False))
1696 def test_corrupt_some_mdmf(self):
1697 return self._test_corrupt_some(("share_data", 12 * 40),
1702 def check_good(self, r, where):
1703 self.failUnless(r.is_healthy(), where)
1706 def check_bad(self, r, where):
1707 self.failIf(r.is_healthy(), where)
1710 def check_expected_failure(self, r, expected_exception, substring, where):
1711 for (peerid, storage_index, shnum, f) in r.problems:
1712 if f.check(expected_exception):
1713 self.failUnless(substring in str(f),
1714 "%s: substring '%s' not in '%s'" %
1715 (where, substring, str(f)))
1717 self.fail("%s: didn't see expected exception %s in problems %s" %
1718 (where, expected_exception, r.problems))
1721 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1723 return self.publish_one()
1726 def test_check_good(self):
1727 d = self._fn.check(Monitor())
1728 d.addCallback(self.check_good, "test_check_good")
1731 def test_check_mdmf_good(self):
1732 d = self.publish_mdmf()
1733 d.addCallback(lambda ignored:
1734 self._fn.check(Monitor()))
1735 d.addCallback(self.check_good, "test_check_mdmf_good")
1738 def test_check_no_shares(self):
1739 for shares in self._storage._peers.values():
1741 d = self._fn.check(Monitor())
1742 d.addCallback(self.check_bad, "test_check_no_shares")
1745 def test_check_mdmf_no_shares(self):
1746 d = self.publish_mdmf()
1748 for share in self._storage._peers.values():
1750 d.addCallback(_then)
1751 d.addCallback(lambda ignored:
1752 self._fn.check(Monitor()))
1753 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1756 def test_check_not_enough_shares(self):
1757 for shares in self._storage._peers.values():
1758 for shnum in shares.keys():
1761 d = self._fn.check(Monitor())
1762 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1765 def test_check_mdmf_not_enough_shares(self):
1766 d = self.publish_mdmf()
1768 for shares in self._storage._peers.values():
1769 for shnum in shares.keys():
1772 d.addCallback(_then)
1773 d.addCallback(lambda ignored:
1774 self._fn.check(Monitor()))
1775 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1779 def test_check_all_bad_sig(self):
1780 d = corrupt(None, self._storage, 1) # bad sig
1781 d.addCallback(lambda ignored:
1782 self._fn.check(Monitor()))
1783 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1786 def test_check_mdmf_all_bad_sig(self):
1787 d = self.publish_mdmf()
1788 d.addCallback(lambda ignored:
1789 corrupt(None, self._storage, 1))
1790 d.addCallback(lambda ignored:
1791 self._fn.check(Monitor()))
1792 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1795 def test_check_all_bad_blocks(self):
1796 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1797 # the Checker won't notice this.. it doesn't look at actual data
1798 d.addCallback(lambda ignored:
1799 self._fn.check(Monitor()))
1800 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1804 def test_check_mdmf_all_bad_blocks(self):
1805 d = self.publish_mdmf()
1806 d.addCallback(lambda ignored:
1807 corrupt(None, self._storage, "share_data"))
1808 d.addCallback(lambda ignored:
1809 self._fn.check(Monitor()))
1810 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1813 def test_verify_good(self):
1814 d = self._fn.check(Monitor(), verify=True)
1815 d.addCallback(self.check_good, "test_verify_good")
1818 def test_verify_all_bad_sig(self):
1819 d = corrupt(None, self._storage, 1) # bad sig
1820 d.addCallback(lambda ignored:
1821 self._fn.check(Monitor(), verify=True))
1822 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1825 def test_verify_one_bad_sig(self):
1826 d = corrupt(None, self._storage, 1, [9]) # bad sig
1827 d.addCallback(lambda ignored:
1828 self._fn.check(Monitor(), verify=True))
1829 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1832 def test_verify_one_bad_block(self):
1833 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1834 # the Verifier *will* notice this, since it examines every byte
1835 d.addCallback(lambda ignored:
1836 self._fn.check(Monitor(), verify=True))
1837 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1838 d.addCallback(self.check_expected_failure,
1839 CorruptShareError, "block hash tree failure",
1840 "test_verify_one_bad_block")
1843 def test_verify_one_bad_sharehash(self):
1844 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1845 d.addCallback(lambda ignored:
1846 self._fn.check(Monitor(), verify=True))
1847 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1848 d.addCallback(self.check_expected_failure,
1849 CorruptShareError, "corrupt hashes",
1850 "test_verify_one_bad_sharehash")
1853 def test_verify_one_bad_encprivkey(self):
1854 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1855 d.addCallback(lambda ignored:
1856 self._fn.check(Monitor(), verify=True))
1857 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1858 d.addCallback(self.check_expected_failure,
1859 CorruptShareError, "invalid privkey",
1860 "test_verify_one_bad_encprivkey")
1863 def test_verify_one_bad_encprivkey_uncheckable(self):
1864 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1865 readonly_fn = self._fn.get_readonly()
1866 # a read-only node has no way to validate the privkey
1867 d.addCallback(lambda ignored:
1868 readonly_fn.check(Monitor(), verify=True))
1869 d.addCallback(self.check_good,
1870 "test_verify_one_bad_encprivkey_uncheckable")
1874 def test_verify_mdmf_good(self):
1875 d = self.publish_mdmf()
1876 d.addCallback(lambda ignored:
1877 self._fn.check(Monitor(), verify=True))
1878 d.addCallback(self.check_good, "test_verify_mdmf_good")
1882 def test_verify_mdmf_one_bad_block(self):
1883 d = self.publish_mdmf()
1884 d.addCallback(lambda ignored:
1885 corrupt(None, self._storage, "share_data", [1]))
1886 d.addCallback(lambda ignored:
1887 self._fn.check(Monitor(), verify=True))
1888 # We should find one bad block here
1889 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1890 d.addCallback(self.check_expected_failure,
1891 CorruptShareError, "block hash tree failure",
1892 "test_verify_mdmf_one_bad_block")
1896 def test_verify_mdmf_bad_encprivkey(self):
1897 d = self.publish_mdmf()
1898 d.addCallback(lambda ignored:
1899 corrupt(None, self._storage, "enc_privkey", [0]))
1900 d.addCallback(lambda ignored:
1901 self._fn.check(Monitor(), verify=True))
1902 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1903 d.addCallback(self.check_expected_failure,
1904 CorruptShareError, "privkey",
1905 "test_verify_mdmf_bad_encprivkey")
1909 def test_verify_mdmf_bad_sig(self):
1910 d = self.publish_mdmf()
1911 d.addCallback(lambda ignored:
1912 corrupt(None, self._storage, 1, [1]))
1913 d.addCallback(lambda ignored:
1914 self._fn.check(Monitor(), verify=True))
1915 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1919 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1920 d = self.publish_mdmf()
1921 d.addCallback(lambda ignored:
1922 corrupt(None, self._storage, "enc_privkey", [1]))
1923 d.addCallback(lambda ignored:
1924 self._fn.get_readonly())
1925 d.addCallback(lambda fn:
1926 fn.check(Monitor(), verify=True))
1927 d.addCallback(self.check_good,
1928 "test_verify_mdmf_bad_encprivkey_uncheckable")
1932 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1934 def get_shares(self, s):
1935 all_shares = {} # maps (peerid, shnum) to share data
1936 for peerid in s._peers:
1937 shares = s._peers[peerid]
1938 for shnum in shares:
1939 data = shares[shnum]
1940 all_shares[ (peerid, shnum) ] = data
1943 def copy_shares(self, ignored=None):
1944 self.old_shares.append(self.get_shares(self._storage))
1946 def test_repair_nop(self):
1947 self.old_shares = []
1948 d = self.publish_one()
1949 d.addCallback(self.copy_shares)
1950 d.addCallback(lambda res: self._fn.check(Monitor()))
1951 d.addCallback(lambda check_results: self._fn.repair(check_results))
1952 def _check_results(rres):
1953 self.failUnless(IRepairResults.providedBy(rres))
1954 self.failUnless(rres.get_successful())
1955 # TODO: examine results
1959 initial_shares = self.old_shares[0]
1960 new_shares = self.old_shares[1]
1961 # TODO: this really shouldn't change anything. When we implement
1962 # a "minimal-bandwidth" repairer", change this test to assert:
1963 #self.failUnlessEqual(new_shares, initial_shares)
1965 # all shares should be in the same place as before
1966 self.failUnlessEqual(set(initial_shares.keys()),
1967 set(new_shares.keys()))
1968 # but they should all be at a newer seqnum. The IV will be
1969 # different, so the roothash will be too.
1970 for key in initial_shares:
1975 k0, N0, segsize0, datalen0,
1976 o0) = unpack_header(initial_shares[key])
1981 k1, N1, segsize1, datalen1,
1982 o1) = unpack_header(new_shares[key])
1983 self.failUnlessEqual(version0, version1)
1984 self.failUnlessEqual(seqnum0+1, seqnum1)
1985 self.failUnlessEqual(k0, k1)
1986 self.failUnlessEqual(N0, N1)
1987 self.failUnlessEqual(segsize0, segsize1)
1988 self.failUnlessEqual(datalen0, datalen1)
1989 d.addCallback(_check_results)
1992 def failIfSharesChanged(self, ignored=None):
1993 old_shares = self.old_shares[-2]
1994 current_shares = self.old_shares[-1]
1995 self.failUnlessEqual(old_shares, current_shares)
1998 def test_unrepairable_0shares(self):
1999 d = self.publish_one()
2000 def _delete_all_shares(ign):
2001 shares = self._storage._peers
2002 for peerid in shares:
2004 d.addCallback(_delete_all_shares)
2005 d.addCallback(lambda ign: self._fn.check(Monitor()))
2006 d.addCallback(lambda check_results: self._fn.repair(check_results))
2008 self.failUnlessEqual(crr.get_successful(), False)
2009 d.addCallback(_check)
2012 def test_mdmf_unrepairable_0shares(self):
2013 d = self.publish_mdmf()
2014 def _delete_all_shares(ign):
2015 shares = self._storage._peers
2016 for peerid in shares:
2018 d.addCallback(_delete_all_shares)
2019 d.addCallback(lambda ign: self._fn.check(Monitor()))
2020 d.addCallback(lambda check_results: self._fn.repair(check_results))
2021 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2025 def test_unrepairable_1share(self):
2026 d = self.publish_one()
2027 def _delete_all_shares(ign):
2028 shares = self._storage._peers
2029 for peerid in shares:
2030 for shnum in list(shares[peerid]):
2032 del shares[peerid][shnum]
2033 d.addCallback(_delete_all_shares)
2034 d.addCallback(lambda ign: self._fn.check(Monitor()))
2035 d.addCallback(lambda check_results: self._fn.repair(check_results))
2037 self.failUnlessEqual(crr.get_successful(), False)
2038 d.addCallback(_check)
2041 def test_mdmf_unrepairable_1share(self):
2042 d = self.publish_mdmf()
2043 def _delete_all_shares(ign):
2044 shares = self._storage._peers
2045 for peerid in shares:
2046 for shnum in list(shares[peerid]):
2048 del shares[peerid][shnum]
2049 d.addCallback(_delete_all_shares)
2050 d.addCallback(lambda ign: self._fn.check(Monitor()))
2051 d.addCallback(lambda check_results: self._fn.repair(check_results))
2053 self.failUnlessEqual(crr.get_successful(), False)
2054 d.addCallback(_check)
2057 def test_repairable_5shares(self):
2058 d = self.publish_mdmf()
2059 def _delete_all_shares(ign):
2060 shares = self._storage._peers
2061 for peerid in shares:
2062 for shnum in list(shares[peerid]):
2064 del shares[peerid][shnum]
2065 d.addCallback(_delete_all_shares)
2066 d.addCallback(lambda ign: self._fn.check(Monitor()))
2067 d.addCallback(lambda check_results: self._fn.repair(check_results))
2069 self.failUnlessEqual(crr.get_successful(), True)
2070 d.addCallback(_check)
2073 def test_mdmf_repairable_5shares(self):
2074 d = self.publish_mdmf()
2075 def _delete_some_shares(ign):
2076 shares = self._storage._peers
2077 for peerid in shares:
2078 for shnum in list(shares[peerid]):
2080 del shares[peerid][shnum]
2081 d.addCallback(_delete_some_shares)
2082 d.addCallback(lambda ign: self._fn.check(Monitor()))
2084 self.failIf(cr.is_healthy())
2085 self.failUnless(cr.is_recoverable())
2087 d.addCallback(_check)
2088 d.addCallback(lambda check_results: self._fn.repair(check_results))
2090 self.failUnlessEqual(crr.get_successful(), True)
2091 d.addCallback(_check1)
2095 def test_merge(self):
2096 self.old_shares = []
2097 d = self.publish_multiple()
2098 # repair will refuse to merge multiple highest seqnums unless you
2100 d.addCallback(lambda res:
2101 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2102 1:4,3:4,5:4,7:4,9:4}))
2103 d.addCallback(self.copy_shares)
2104 d.addCallback(lambda res: self._fn.check(Monitor()))
2105 def _try_repair(check_results):
2106 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2107 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2108 self._fn.repair, check_results)
2109 d2.addCallback(self.copy_shares)
2110 d2.addCallback(self.failIfSharesChanged)
2111 d2.addCallback(lambda res: check_results)
2113 d.addCallback(_try_repair)
2114 d.addCallback(lambda check_results:
2115 self._fn.repair(check_results, force=True))
2116 # this should give us 10 shares of the highest roothash
2117 def _check_repair_results(rres):
2118 self.failUnless(rres.get_successful())
2120 d.addCallback(_check_repair_results)
2121 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2122 def _check_smap(smap):
2123 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2124 self.failIf(smap.unrecoverable_versions())
2125 # now, which should have won?
2126 roothash_s4a = self.get_roothash_for(3)
2127 roothash_s4b = self.get_roothash_for(4)
2128 if roothash_s4b > roothash_s4a:
2129 expected_contents = self.CONTENTS[4]
2131 expected_contents = self.CONTENTS[3]
2132 new_versionid = smap.best_recoverable_version()
2133 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2134 d2 = self._fn.download_version(smap, new_versionid)
2135 d2.addCallback(self.failUnlessEqual, expected_contents)
2137 d.addCallback(_check_smap)
2140 def test_non_merge(self):
2141 self.old_shares = []
2142 d = self.publish_multiple()
2143 # repair should not refuse a repair that doesn't need to merge. In
2144 # this case, we combine v2 with v3. The repair should ignore v2 and
2145 # copy v3 into a new v5.
2146 d.addCallback(lambda res:
2147 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2148 1:3,3:3,5:3,7:3,9:3}))
2149 d.addCallback(lambda res: self._fn.check(Monitor()))
2150 d.addCallback(lambda check_results: self._fn.repair(check_results))
2151 # this should give us 10 shares of v3
2152 def _check_repair_results(rres):
2153 self.failUnless(rres.get_successful())
2155 d.addCallback(_check_repair_results)
2156 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2157 def _check_smap(smap):
2158 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2159 self.failIf(smap.unrecoverable_versions())
2160 # now, which should have won?
2161 expected_contents = self.CONTENTS[3]
2162 new_versionid = smap.best_recoverable_version()
2163 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2164 d2 = self._fn.download_version(smap, new_versionid)
2165 d2.addCallback(self.failUnlessEqual, expected_contents)
2167 d.addCallback(_check_smap)
2170 def get_roothash_for(self, index):
2171 # return the roothash for the first share we see in the saved set
2172 shares = self._copied_shares[index]
2173 for peerid in shares:
2174 for shnum in shares[peerid]:
2175 share = shares[peerid][shnum]
2176 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2177 unpack_header(share)
2180 def test_check_and_repair_readcap(self):
2181 # we can't currently repair from a mutable readcap: #625
2182 self.old_shares = []
2183 d = self.publish_one()
2184 d.addCallback(self.copy_shares)
2185 def _get_readcap(res):
2186 self._fn3 = self._fn.get_readonly()
2187 # also delete some shares
2188 for peerid,shares in self._storage._peers.items():
2190 d.addCallback(_get_readcap)
2191 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2192 def _check_results(crr):
2193 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2194 # we should detect the unhealthy, but skip over mutable-readcap
2195 # repairs until #625 is fixed
2196 self.failIf(crr.get_pre_repair_results().is_healthy())
2197 self.failIf(crr.get_repair_attempted())
2198 self.failIf(crr.get_post_repair_results().is_healthy())
2199 d.addCallback(_check_results)
2202 class DevNullDictionary(dict):
2203 def __setitem__(self, key, value):
2206 class MultipleEncodings(unittest.TestCase):
2208 self.CONTENTS = "New contents go here"
2209 self.uploadable = MutableData(self.CONTENTS)
2210 self._storage = FakeStorage()
2211 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2212 self._storage_broker = self._nodemaker.storage_broker
2213 d = self._nodemaker.create_mutable_file(self.uploadable)
2216 d.addCallback(_created)
2219 def _encode(self, k, n, data, version=SDMF_VERSION):
2220 # encode 'data' into a peerid->shares dict.
2223 # disable the nodecache, since for these tests we explicitly need
2224 # multiple nodes pointing at the same file
2225 self._nodemaker._node_cache = DevNullDictionary()
2226 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2227 # then we copy over other fields that are normally fetched from the
2229 fn2._pubkey = fn._pubkey
2230 fn2._privkey = fn._privkey
2231 fn2._encprivkey = fn._encprivkey
2232 # and set the encoding parameters to something completely different
2233 fn2._required_shares = k
2234 fn2._total_shares = n
2237 s._peers = {} # clear existing storage
2238 p2 = Publish(fn2, self._storage_broker, None)
2239 uploadable = MutableData(data)
2240 d = p2.publish(uploadable)
2241 def _published(res):
2245 d.addCallback(_published)
2248 def make_servermap(self, mode=MODE_READ, oldmap=None):
2250 oldmap = ServerMap()
2251 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2256 def test_multiple_encodings(self):
2257 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2258 # then mix up the shares, to make sure that download survives seeing
2259 # a variety of encodings. This is actually kind of tricky to set up.
2261 contents1 = "Contents for encoding 1 (3-of-10) go here"
2262 contents2 = "Contents for encoding 2 (4-of-9) go here"
2263 contents3 = "Contents for encoding 3 (4-of-7) go here"
2265 # we make a retrieval object that doesn't know what encoding
2267 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2269 # now we upload a file through fn1, and grab its shares
2270 d = self._encode(3, 10, contents1)
2271 def _encoded_1(shares):
2272 self._shares1 = shares
2273 d.addCallback(_encoded_1)
2274 d.addCallback(lambda res: self._encode(4, 9, contents2))
2275 def _encoded_2(shares):
2276 self._shares2 = shares
2277 d.addCallback(_encoded_2)
2278 d.addCallback(lambda res: self._encode(4, 7, contents3))
2279 def _encoded_3(shares):
2280 self._shares3 = shares
2281 d.addCallback(_encoded_3)
2284 log.msg("merging sharelists")
2285 # we merge the shares from the two sets, leaving each shnum in
2286 # its original location, but using a share from set1 or set2
2287 # according to the following sequence:
2298 # so that neither form can be recovered until fetch [f], at which
2299 # point version-s1 (the 3-of-10 form) should be recoverable. If
2300 # the implementation latches on to the first version it sees,
2301 # then s2 will be recoverable at fetch [g].
2303 # Later, when we implement code that handles multiple versions,
2304 # we can use this framework to assert that all recoverable
2305 # versions are retrieved, and test that 'epsilon' does its job
2307 places = [2, 2, 3, 2, 1, 1, 1, 2]
2310 sb = self._storage_broker
2312 for peerid in sorted(sb.get_all_serverids()):
2313 for shnum in self._shares1.get(peerid, {}):
2314 if shnum < len(places):
2315 which = places[shnum]
2318 self._storage._peers[peerid] = peers = {}
2319 in_1 = shnum in self._shares1[peerid]
2320 in_2 = shnum in self._shares2.get(peerid, {})
2321 in_3 = shnum in self._shares3.get(peerid, {})
2324 peers[shnum] = self._shares1[peerid][shnum]
2325 sharemap[shnum] = peerid
2328 peers[shnum] = self._shares2[peerid][shnum]
2329 sharemap[shnum] = peerid
2332 peers[shnum] = self._shares3[peerid][shnum]
2333 sharemap[shnum] = peerid
2335 # we don't bother placing any other shares
2336 # now sort the sequence so that share 0 is returned first
2337 new_sequence = [sharemap[shnum]
2338 for shnum in sorted(sharemap.keys())]
2339 self._storage._sequence = new_sequence
2340 log.msg("merge done")
2341 d.addCallback(_merge)
2342 d.addCallback(lambda res: fn3.download_best_version())
2343 def _retrieved(new_contents):
2344 # the current specified behavior is "first version recoverable"
2345 self.failUnlessEqual(new_contents, contents1)
2346 d.addCallback(_retrieved)
2350 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2353 return self.publish_multiple()
2355 def test_multiple_versions(self):
2356 # if we see a mix of versions in the grid, download_best_version
2357 # should get the latest one
2358 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2359 d = self._fn.download_best_version()
2360 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2361 # and the checker should report problems
2362 d.addCallback(lambda res: self._fn.check(Monitor()))
2363 d.addCallback(self.check_bad, "test_multiple_versions")
2365 # but if everything is at version 2, that's what we should download
2366 d.addCallback(lambda res:
2367 self._set_versions(dict([(i,2) for i in range(10)])))
2368 d.addCallback(lambda res: self._fn.download_best_version())
2369 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2370 # if exactly one share is at version 3, we should still get v2
2371 d.addCallback(lambda res:
2372 self._set_versions({0:3}))
2373 d.addCallback(lambda res: self._fn.download_best_version())
2374 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2375 # but the servermap should see the unrecoverable version. This
2376 # depends upon the single newer share being queried early.
2377 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2378 def _check_smap(smap):
2379 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2380 newer = smap.unrecoverable_newer_versions()
2381 self.failUnlessEqual(len(newer), 1)
2382 verinfo, health = newer.items()[0]
2383 self.failUnlessEqual(verinfo[0], 4)
2384 self.failUnlessEqual(health, (1,3))
2385 self.failIf(smap.needs_merge())
2386 d.addCallback(_check_smap)
2387 # if we have a mix of two parallel versions (s4a and s4b), we could
2389 d.addCallback(lambda res:
2390 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2391 1:4,3:4,5:4,7:4,9:4}))
2392 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2393 def _check_smap_mixed(smap):
2394 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2395 newer = smap.unrecoverable_newer_versions()
2396 self.failUnlessEqual(len(newer), 0)
2397 self.failUnless(smap.needs_merge())
2398 d.addCallback(_check_smap_mixed)
2399 d.addCallback(lambda res: self._fn.download_best_version())
2400 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2401 res == self.CONTENTS[4]))
2404 def test_replace(self):
2405 # if we see a mix of versions in the grid, we should be able to
2406 # replace them all with a newer version
2408 # if exactly one share is at version 3, we should download (and
2409 # replace) v2, and the result should be v4. Note that the index we
2410 # give to _set_versions is different than the sequence number.
2411 target = dict([(i,2) for i in range(10)]) # seqnum3
2412 target[0] = 3 # seqnum4
2413 self._set_versions(target)
2415 def _modify(oldversion, servermap, first_time):
2416 return oldversion + " modified"
2417 d = self._fn.modify(_modify)
2418 d.addCallback(lambda res: self._fn.download_best_version())
2419 expected = self.CONTENTS[2] + " modified"
2420 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2421 # and the servermap should indicate that the outlier was replaced too
2422 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2423 def _check_smap(smap):
2424 self.failUnlessEqual(smap.highest_seqnum(), 5)
2425 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2426 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2427 d.addCallback(_check_smap)
2431 class Utils(unittest.TestCase):
2432 def test_cache(self):
2434 # xdata = base62.b2a(os.urandom(100))[:100]
2435 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2436 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2437 c.add("v1", 1, 0, xdata)
2438 c.add("v1", 1, 2000, ydata)
2439 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2440 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2441 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2442 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2443 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2444 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2445 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2446 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2447 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2448 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2449 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2450 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2451 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2452 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2453 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2454 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2455 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2456 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2458 # test joining fragments
2460 c.add("v1", 1, 0, xdata[:10])
2461 c.add("v1", 1, 10, xdata[10:20])
2462 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2464 class Exceptions(unittest.TestCase):
2465 def test_repr(self):
2466 nmde = NeedMoreDataError(100, 50, 100)
2467 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2468 ucwe = UncoordinatedWriteError()
2469 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2471 class SameKeyGenerator:
2472 def __init__(self, pubkey, privkey):
2473 self.pubkey = pubkey
2474 self.privkey = privkey
2475 def generate(self, keysize=None):
2476 return defer.succeed( (self.pubkey, self.privkey) )
2478 class FirstServerGetsKilled:
2480 def notify(self, retval, wrapper, methname):
2482 wrapper.broken = True
2486 class FirstServerGetsDeleted:
2489 self.silenced = None
2490 def notify(self, retval, wrapper, methname):
2492 # this query will work, but later queries should think the share
2495 self.silenced = wrapper
2497 if wrapper == self.silenced:
2498 assert methname == "slot_testv_and_readv_and_writev"
2502 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2503 def do_publish_surprise(self, version):
2504 self.basedir = "mutable/Problems/test_publish_surprise_%s" % version
2506 nm = self.g.clients[0].nodemaker
2507 d = nm.create_mutable_file(MutableData("contents 1"),
2510 d = defer.succeed(None)
2511 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2512 def _got_smap1(smap):
2513 # stash the old state of the file
2515 d.addCallback(_got_smap1)
2516 # then modify the file, leaving the old map untouched
2517 d.addCallback(lambda res: log.msg("starting winning write"))
2518 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2519 # now attempt to modify the file with the old servermap. This
2520 # will look just like an uncoordinated write, in which every
2521 # single share got updated between our mapupdate and our publish
2522 d.addCallback(lambda res: log.msg("starting doomed write"))
2523 d.addCallback(lambda res:
2524 self.shouldFail(UncoordinatedWriteError,
2525 "test_publish_surprise", None,
2527 MutableData("contents 2a"), self.old_map))
2529 d.addCallback(_created)
2532 def test_publish_surprise_sdmf(self):
2533 return self.do_publish_surprise(SDMF_VERSION)
2535 def test_publish_surprise_mdmf(self):
2536 return self.do_publish_surprise(MDMF_VERSION)
2538 def test_retrieve_surprise(self):
2539 self.basedir = "mutable/Problems/test_retrieve_surprise"
2541 nm = self.g.clients[0].nodemaker
2542 d = nm.create_mutable_file(MutableData("contents 1"))
2544 d = defer.succeed(None)
2545 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2546 def _got_smap1(smap):
2547 # stash the old state of the file
2549 d.addCallback(_got_smap1)
2550 # then modify the file, leaving the old map untouched
2551 d.addCallback(lambda res: log.msg("starting winning write"))
2552 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2553 # now attempt to retrieve the old version with the old servermap.
2554 # This will look like someone has changed the file since we
2555 # updated the servermap.
2556 d.addCallback(lambda res: n._cache._clear())
2557 d.addCallback(lambda res: log.msg("starting doomed read"))
2558 d.addCallback(lambda res:
2559 self.shouldFail(NotEnoughSharesError,
2560 "test_retrieve_surprise",
2561 "ran out of peers: have 0 of 1",
2564 self.old_map.best_recoverable_version(),
2567 d.addCallback(_created)
2571 def test_unexpected_shares(self):
2572 # upload the file, take a servermap, shut down one of the servers,
2573 # upload it again (causing shares to appear on a new server), then
2574 # upload using the old servermap. The last upload should fail with an
2575 # UncoordinatedWriteError, because of the shares that didn't appear
2577 self.basedir = "mutable/Problems/test_unexpected_shares"
2579 nm = self.g.clients[0].nodemaker
2580 d = nm.create_mutable_file(MutableData("contents 1"))
2582 d = defer.succeed(None)
2583 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2584 def _got_smap1(smap):
2585 # stash the old state of the file
2587 # now shut down one of the servers
2588 peer0 = list(smap.make_sharemap()[0])[0]
2589 self.g.remove_server(peer0)
2590 # then modify the file, leaving the old map untouched
2591 log.msg("starting winning write")
2592 return n.overwrite(MutableData("contents 2"))
2593 d.addCallback(_got_smap1)
2594 # now attempt to modify the file with the old servermap. This
2595 # will look just like an uncoordinated write, in which every
2596 # single share got updated between our mapupdate and our publish
2597 d.addCallback(lambda res: log.msg("starting doomed write"))
2598 d.addCallback(lambda res:
2599 self.shouldFail(UncoordinatedWriteError,
2600 "test_surprise", None,
2602 MutableData("contents 2a"), self.old_map))
2604 d.addCallback(_created)
2607 def test_bad_server(self):
2608 # Break one server, then create the file: the initial publish should
2609 # complete with an alternate server. Breaking a second server should
2610 # not prevent an update from succeeding either.
2611 self.basedir = "mutable/Problems/test_bad_server"
2613 nm = self.g.clients[0].nodemaker
2615 # to make sure that one of the initial peers is broken, we have to
2616 # get creative. We create an RSA key and compute its storage-index.
2617 # Then we make a KeyGenerator that always returns that one key, and
2618 # use it to create the mutable file. This will get easier when we can
2619 # use #467 static-server-selection to disable permutation and force
2620 # the choice of server for share[0].
2622 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2623 def _got_key( (pubkey, privkey) ):
2624 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2625 pubkey_s = pubkey.serialize()
2626 privkey_s = privkey.serialize()
2627 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2628 ssk_pubkey_fingerprint_hash(pubkey_s))
2629 self._storage_index = u.get_storage_index()
2630 d.addCallback(_got_key)
2631 def _break_peer0(res):
2632 si = self._storage_index
2633 servers = nm.storage_broker.get_servers_for_psi(si)
2634 self.g.break_server(servers[0].get_serverid())
2635 self.server1 = servers[1]
2636 d.addCallback(_break_peer0)
2637 # now "create" the file, using the pre-established key, and let the
2638 # initial publish finally happen
2639 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2640 # that ought to work
2642 d = n.download_best_version()
2643 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2644 # now break the second peer
2645 def _break_peer1(res):
2646 self.g.break_server(self.server1.get_serverid())
2647 d.addCallback(_break_peer1)
2648 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2649 # that ought to work too
2650 d.addCallback(lambda res: n.download_best_version())
2651 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2652 def _explain_error(f):
2654 if f.check(NotEnoughServersError):
2655 print "first_error:", f.value.first_error
2657 d.addErrback(_explain_error)
2659 d.addCallback(_got_node)
2662 def test_bad_server_overlap(self):
2663 # like test_bad_server, but with no extra unused servers to fall back
2664 # upon. This means that we must re-use a server which we've already
2665 # used. If we don't remember the fact that we sent them one share
2666 # already, we'll mistakenly think we're experiencing an
2667 # UncoordinatedWriteError.
2669 # Break one server, then create the file: the initial publish should
2670 # complete with an alternate server. Breaking a second server should
2671 # not prevent an update from succeeding either.
2672 self.basedir = "mutable/Problems/test_bad_server_overlap"
2674 nm = self.g.clients[0].nodemaker
2675 sb = nm.storage_broker
2677 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2678 self.g.break_server(peerids[0])
2680 d = nm.create_mutable_file(MutableData("contents 1"))
2682 d = n.download_best_version()
2683 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2684 # now break one of the remaining servers
2685 def _break_second_server(res):
2686 self.g.break_server(peerids[1])
2687 d.addCallback(_break_second_server)
2688 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2689 # that ought to work too
2690 d.addCallback(lambda res: n.download_best_version())
2691 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2693 d.addCallback(_created)
2696 def test_publish_all_servers_bad(self):
2697 # Break all servers: the publish should fail
2698 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2700 nm = self.g.clients[0].nodemaker
2701 for s in nm.storage_broker.get_connected_servers():
2702 s.get_rref().broken = True
2704 d = self.shouldFail(NotEnoughServersError,
2705 "test_publish_all_servers_bad",
2706 "ran out of good servers",
2707 nm.create_mutable_file, MutableData("contents"))
2710 def test_publish_no_servers(self):
2711 # no servers at all: the publish should fail
2712 self.basedir = "mutable/Problems/test_publish_no_servers"
2713 self.set_up_grid(num_servers=0)
2714 nm = self.g.clients[0].nodemaker
2716 d = self.shouldFail(NotEnoughServersError,
2717 "test_publish_no_servers",
2718 "Ran out of non-bad servers",
2719 nm.create_mutable_file, MutableData("contents"))
2723 def test_privkey_query_error(self):
2724 # when a servermap is updated with MODE_WRITE, it tries to get the
2725 # privkey. Something might go wrong during this query attempt.
2726 # Exercise the code in _privkey_query_failed which tries to handle
2728 self.basedir = "mutable/Problems/test_privkey_query_error"
2729 self.set_up_grid(num_servers=20)
2730 nm = self.g.clients[0].nodemaker
2731 nm._node_cache = DevNullDictionary() # disable the nodecache
2733 # we need some contents that are large enough to push the privkey out
2734 # of the early part of the file
2735 LARGE = "These are Larger contents" * 2000 # about 50KB
2736 LARGE_uploadable = MutableData(LARGE)
2737 d = nm.create_mutable_file(LARGE_uploadable)
2739 self.uri = n.get_uri()
2740 self.n2 = nm.create_from_cap(self.uri)
2742 # When a mapupdate is performed on a node that doesn't yet know
2743 # the privkey, a short read is sent to a batch of servers, to get
2744 # the verinfo and (hopefully, if the file is short enough) the
2745 # encprivkey. Our file is too large to let this first read
2746 # contain the encprivkey. Each non-encprivkey-bearing response
2747 # that arrives (until the node gets the encprivkey) will trigger
2748 # a second read to specifically read the encprivkey.
2750 # So, to exercise this case:
2751 # 1. notice which server gets a read() call first
2752 # 2. tell that server to start throwing errors
2753 killer = FirstServerGetsKilled()
2754 for s in nm.storage_broker.get_connected_servers():
2755 s.get_rref().post_call_notifier = killer.notify
2756 d.addCallback(_created)
2758 # now we update a servermap from a new node (which doesn't have the
2759 # privkey yet, forcing it to use a separate privkey query). Note that
2760 # the map-update will succeed, since we'll just get a copy from one
2761 # of the other shares.
2762 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2766 def test_privkey_query_missing(self):
2767 # like test_privkey_query_error, but the shares are deleted by the
2768 # second query, instead of raising an exception.
2769 self.basedir = "mutable/Problems/test_privkey_query_missing"
2770 self.set_up_grid(num_servers=20)
2771 nm = self.g.clients[0].nodemaker
2772 LARGE = "These are Larger contents" * 2000 # about 50KiB
2773 LARGE_uploadable = MutableData(LARGE)
2774 nm._node_cache = DevNullDictionary() # disable the nodecache
2776 d = nm.create_mutable_file(LARGE_uploadable)
2778 self.uri = n.get_uri()
2779 self.n2 = nm.create_from_cap(self.uri)
2780 deleter = FirstServerGetsDeleted()
2781 for s in nm.storage_broker.get_connected_servers():
2782 s.get_rref().post_call_notifier = deleter.notify
2783 d.addCallback(_created)
2784 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2788 def test_block_and_hash_query_error(self):
2789 # This tests for what happens when a query to a remote server
2790 # fails in either the hash validation step or the block getting
2791 # step (because of batching, this is the same actual query).
2792 # We need to have the storage server persist up until the point
2793 # that its prefix is validated, then suddenly die. This
2794 # exercises some exception handling code in Retrieve.
2795 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2796 self.set_up_grid(num_servers=20)
2797 nm = self.g.clients[0].nodemaker
2798 CONTENTS = "contents" * 2000
2799 CONTENTS_uploadable = MutableData(CONTENTS)
2800 d = nm.create_mutable_file(CONTENTS_uploadable)
2803 d.addCallback(_created)
2804 d.addCallback(lambda ignored:
2805 self._node.get_servermap(MODE_READ))
2806 def _then(servermap):
2807 # we have our servermap. Now we set up the servers like the
2808 # tests above -- the first one that gets a read call should
2809 # start throwing errors, but only after returning its prefix
2810 # for validation. Since we'll download without fetching the
2811 # private key, the next query to the remote server will be
2812 # for either a block and salt or for hashes, either of which
2813 # will exercise the error handling code.
2814 killer = FirstServerGetsKilled()
2815 for s in nm.storage_broker.get_connected_servers():
2816 s.get_rref().post_call_notifier = killer.notify
2817 ver = servermap.best_recoverable_version()
2819 return self._node.download_version(servermap, ver)
2820 d.addCallback(_then)
2821 d.addCallback(lambda data:
2822 self.failUnlessEqual(data, CONTENTS))
2826 class FileHandle(unittest.TestCase):
2828 self.test_data = "Test Data" * 50000
2829 self.sio = StringIO(self.test_data)
2830 self.uploadable = MutableFileHandle(self.sio)
2833 def test_filehandle_read(self):
2834 self.basedir = "mutable/FileHandle/test_filehandle_read"
2836 for i in xrange(0, len(self.test_data), chunk_size):
2837 data = self.uploadable.read(chunk_size)
2838 data = "".join(data)
2840 end = i + chunk_size
2841 self.failUnlessEqual(data, self.test_data[start:end])
2844 def test_filehandle_get_size(self):
2845 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2846 actual_size = len(self.test_data)
2847 size = self.uploadable.get_size()
2848 self.failUnlessEqual(size, actual_size)
2851 def test_filehandle_get_size_out_of_order(self):
2852 # We should be able to call get_size whenever we want without
2853 # disturbing the location of the seek pointer.
2855 data = self.uploadable.read(chunk_size)
2856 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2859 size = self.uploadable.get_size()
2860 self.failUnlessEqual(size, len(self.test_data))
2862 # Now get more data. We should be right where we left off.
2863 more_data = self.uploadable.read(chunk_size)
2865 end = chunk_size * 2
2866 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2869 def test_filehandle_file(self):
2870 # Make sure that the MutableFileHandle works on a file as well
2871 # as a StringIO object, since in some cases it will be asked to
2873 self.basedir = self.mktemp()
2874 # necessary? What am I doing wrong here?
2875 os.mkdir(self.basedir)
2876 f_path = os.path.join(self.basedir, "test_file")
2877 f = open(f_path, "w")
2878 f.write(self.test_data)
2880 f = open(f_path, "r")
2882 uploadable = MutableFileHandle(f)
2884 data = uploadable.read(len(self.test_data))
2885 self.failUnlessEqual("".join(data), self.test_data)
2886 size = uploadable.get_size()
2887 self.failUnlessEqual(size, len(self.test_data))
2890 def test_close(self):
2891 # Make sure that the MutableFileHandle closes its handle when
2893 self.uploadable.close()
2894 self.failUnless(self.sio.closed)
2897 class DataHandle(unittest.TestCase):
2899 self.test_data = "Test Data" * 50000
2900 self.uploadable = MutableData(self.test_data)
2903 def test_datahandle_read(self):
2905 for i in xrange(0, len(self.test_data), chunk_size):
2906 data = self.uploadable.read(chunk_size)
2907 data = "".join(data)
2909 end = i + chunk_size
2910 self.failUnlessEqual(data, self.test_data[start:end])
2913 def test_datahandle_get_size(self):
2914 actual_size = len(self.test_data)
2915 size = self.uploadable.get_size()
2916 self.failUnlessEqual(size, actual_size)
2919 def test_datahandle_get_size_out_of_order(self):
2920 # We should be able to call get_size whenever we want without
2921 # disturbing the location of the seek pointer.
2923 data = self.uploadable.read(chunk_size)
2924 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2927 size = self.uploadable.get_size()
2928 self.failUnlessEqual(size, len(self.test_data))
2930 # Now get more data. We should be right where we left off.
2931 more_data = self.uploadable.read(chunk_size)
2933 end = chunk_size * 2
2934 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2937 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2940 GridTestMixin.setUp(self)
2941 self.basedir = self.mktemp()
2943 self.c = self.g.clients[0]
2944 self.nm = self.c.nodemaker
2945 self.data = "test data" * 100000 # about 900 KiB; MDMF
2946 self.small_data = "test data" * 10 # about 90 B; SDMF
2949 def do_upload_mdmf(self):
2950 d = self.nm.create_mutable_file(MutableData(self.data),
2951 version=MDMF_VERSION)
2953 assert isinstance(n, MutableFileNode)
2954 assert n._protocol_version == MDMF_VERSION
2957 d.addCallback(_then)
2960 def do_upload_sdmf(self):
2961 d = self.nm.create_mutable_file(MutableData(self.small_data))
2963 assert isinstance(n, MutableFileNode)
2964 assert n._protocol_version == SDMF_VERSION
2967 d.addCallback(_then)
2970 def do_upload_empty_sdmf(self):
2971 d = self.nm.create_mutable_file(MutableData(""))
2973 assert isinstance(n, MutableFileNode)
2974 self.sdmf_zero_length_node = n
2975 assert n._protocol_version == SDMF_VERSION
2977 d.addCallback(_then)
2980 def do_upload(self):
2981 d = self.do_upload_mdmf()
2982 d.addCallback(lambda ign: self.do_upload_sdmf())
2985 def test_debug(self):
2986 d = self.do_upload_mdmf()
2988 fso = debug.FindSharesOptions()
2989 storage_index = base32.b2a(n.get_storage_index())
2990 fso.si_s = storage_index
2991 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2993 in self.iterate_servers()]
2994 fso.stdout = StringIO()
2995 fso.stderr = StringIO()
2996 debug.find_shares(fso)
2997 sharefiles = fso.stdout.getvalue().splitlines()
2998 expected = self.nm.default_encoding_parameters["n"]
2999 self.failUnlessEqual(len(sharefiles), expected)
3001 do = debug.DumpOptions()
3002 do["filename"] = sharefiles[0]
3003 do.stdout = StringIO()
3004 debug.dump_share(do)
3005 output = do.stdout.getvalue()
3006 lines = set(output.splitlines())
3007 self.failUnless("Mutable slot found:" in lines, output)
3008 self.failUnless(" share_type: MDMF" in lines, output)
3009 self.failUnless(" num_extra_leases: 0" in lines, output)
3010 self.failUnless(" MDMF contents:" in lines, output)
3011 self.failUnless(" seqnum: 1" in lines, output)
3012 self.failUnless(" required_shares: 3" in lines, output)
3013 self.failUnless(" total_shares: 10" in lines, output)
3014 self.failUnless(" segsize: 131073" in lines, output)
3015 self.failUnless(" datalen: %d" % len(self.data) in lines, output)
3016 vcap = n.get_verify_cap().to_string()
3017 self.failUnless(" verify-cap: %s" % vcap in lines, output)
3019 cso = debug.CatalogSharesOptions()
3020 cso.nodedirs = fso.nodedirs
3021 cso.stdout = StringIO()
3022 cso.stderr = StringIO()
3023 debug.catalog_shares(cso)
3024 shares = cso.stdout.getvalue().splitlines()
3025 oneshare = shares[0] # all shares should be MDMF
3026 self.failIf(oneshare.startswith("UNKNOWN"), oneshare)
3027 self.failUnless(oneshare.startswith("MDMF"), oneshare)
3028 fields = oneshare.split()
3029 self.failUnlessEqual(fields[0], "MDMF")
3030 self.failUnlessEqual(fields[1], storage_index)
3031 self.failUnlessEqual(fields[2], "3/10")
3032 self.failUnlessEqual(fields[3], "%d" % len(self.data))
3033 self.failUnless(fields[4].startswith("#1:"), fields[3])
3034 # the rest of fields[4] is the roothash, which depends upon
3035 # encryption salts and is not constant. fields[5] is the
3036 # remaining time on the longest lease, which is timing dependent.
3037 # The rest of the line is the quoted pathname to the share.
3038 d.addCallback(_debug)
3041 def test_get_sequence_number(self):
3042 d = self.do_upload()
3043 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3044 d.addCallback(lambda bv:
3045 self.failUnlessEqual(bv.get_sequence_number(), 1))
3046 d.addCallback(lambda ignored:
3047 self.sdmf_node.get_best_readable_version())
3048 d.addCallback(lambda bv:
3049 self.failUnlessEqual(bv.get_sequence_number(), 1))
3050 # Now update. The sequence number in both cases should be 1 in
3052 def _do_update(ignored):
3053 new_data = MutableData("foo bar baz" * 100000)
3054 new_small_data = MutableData("foo bar baz" * 10)
3055 d1 = self.mdmf_node.overwrite(new_data)
3056 d2 = self.sdmf_node.overwrite(new_small_data)
3057 dl = gatherResults([d1, d2])
3059 d.addCallback(_do_update)
3060 d.addCallback(lambda ignored:
3061 self.mdmf_node.get_best_readable_version())
3062 d.addCallback(lambda bv:
3063 self.failUnlessEqual(bv.get_sequence_number(), 2))
3064 d.addCallback(lambda ignored:
3065 self.sdmf_node.get_best_readable_version())
3066 d.addCallback(lambda bv:
3067 self.failUnlessEqual(bv.get_sequence_number(), 2))
3071 def test_version_extension_api(self):
3072 # We need to define an API by which an uploader can set the
3073 # extension parameters, and by which a downloader can retrieve
3075 d = self.do_upload_mdmf()
3076 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3077 def _got_version(version):
3078 hints = version.get_downloader_hints()
3079 # Should be empty at this point.
3080 self.failUnlessIn("k", hints)
3081 self.failUnlessEqual(hints['k'], 3)
3082 self.failUnlessIn('segsize', hints)
3083 self.failUnlessEqual(hints['segsize'], 131073)
3084 d.addCallback(_got_version)
3088 def test_extensions_from_cap(self):
3089 # If we initialize a mutable file with a cap that has extension
3090 # parameters in it and then grab the extension parameters using
3091 # our API, we should see that they're set correctly.
3092 d = self.do_upload_mdmf()
3094 mdmf_uri = self.mdmf_node.get_uri()
3095 new_node = self.nm.create_from_cap(mdmf_uri)
3096 return new_node.get_best_mutable_version()
3097 d.addCallback(_then)
3098 def _got_version(version):
3099 hints = version.get_downloader_hints()
3100 self.failUnlessIn("k", hints)
3101 self.failUnlessEqual(hints["k"], 3)
3102 self.failUnlessIn("segsize", hints)
3103 self.failUnlessEqual(hints["segsize"], 131073)
3104 d.addCallback(_got_version)
3108 def test_extensions_from_upload(self):
3109 # If we create a new mutable file with some contents, we should
3110 # get back an MDMF cap with the right hints in place.
3111 contents = "foo bar baz" * 100000
3112 d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3113 def _got_mutable_file(n):
3114 rw_uri = n.get_uri()
3115 expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3116 self.failUnlessIn(expected_k, rw_uri)
3117 # XXX: Get this more intelligently.
3118 self.failUnlessIn("131073", rw_uri)
3120 ro_uri = n.get_readonly_uri()
3121 self.failUnlessIn(expected_k, ro_uri)
3122 self.failUnlessIn("131073", ro_uri)
3123 d.addCallback(_got_mutable_file)
3127 def test_cap_after_upload(self):
3128 # If we create a new mutable file and upload things to it, and
3129 # it's an MDMF file, we should get an MDMF cap back from that
3130 # file and should be able to use that.
3131 # That's essentially what MDMF node is, so just check that.
3132 d = self.do_upload_mdmf()
3134 mdmf_uri = self.mdmf_node.get_uri()
3135 cap = uri.from_string(mdmf_uri)
3136 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3137 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3138 cap = uri.from_string(readonly_mdmf_uri)
3139 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3140 d.addCallback(_then)
3143 def test_mutable_version(self):
3144 # assert that getting parameters from the IMutableVersion object
3145 # gives us the same data as getting them from the filenode itself
3146 d = self.do_upload()
3147 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3148 def _check_mdmf(bv):
3150 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3151 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3152 self.failIf(bv.is_readonly())
3153 d.addCallback(_check_mdmf)
3154 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3155 def _check_sdmf(bv):
3157 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3158 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3159 self.failIf(bv.is_readonly())
3160 d.addCallback(_check_sdmf)
3164 def test_get_readonly_version(self):
3165 d = self.do_upload()
3166 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3167 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3169 # Attempting to get a mutable version of a mutable file from a
3170 # filenode initialized with a readcap should return a readonly
3171 # version of that same node.
3172 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3173 d.addCallback(lambda ro: ro.get_best_mutable_version())
3174 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3176 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3177 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3179 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3180 d.addCallback(lambda ro: ro.get_best_mutable_version())
3181 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3185 def test_toplevel_overwrite(self):
3186 new_data = MutableData("foo bar baz" * 100000)
3187 new_small_data = MutableData("foo bar baz" * 10)
3188 d = self.do_upload()
3189 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3190 d.addCallback(lambda ignored:
3191 self.mdmf_node.download_best_version())
3192 d.addCallback(lambda data:
3193 self.failUnlessEqual(data, "foo bar baz" * 100000))
3194 d.addCallback(lambda ignored:
3195 self.sdmf_node.overwrite(new_small_data))
3196 d.addCallback(lambda ignored:
3197 self.sdmf_node.download_best_version())
3198 d.addCallback(lambda data:
3199 self.failUnlessEqual(data, "foo bar baz" * 10))
3203 def test_toplevel_modify(self):
3204 d = self.do_upload()
3205 def modifier(old_contents, servermap, first_time):
3206 return old_contents + "modified"
3207 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3208 d.addCallback(lambda ignored:
3209 self.mdmf_node.download_best_version())
3210 d.addCallback(lambda data:
3211 self.failUnlessIn("modified", data))
3212 d.addCallback(lambda ignored:
3213 self.sdmf_node.modify(modifier))
3214 d.addCallback(lambda ignored:
3215 self.sdmf_node.download_best_version())
3216 d.addCallback(lambda data:
3217 self.failUnlessIn("modified", data))
3221 def test_version_modify(self):
3222 # TODO: When we can publish multiple versions, alter this test
3223 # to modify a version other than the best usable version, then
3224 # test to see that the best recoverable version is that.
3225 d = self.do_upload()
3226 def modifier(old_contents, servermap, first_time):
3227 return old_contents + "modified"
3228 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3229 d.addCallback(lambda ignored:
3230 self.mdmf_node.download_best_version())
3231 d.addCallback(lambda data:
3232 self.failUnlessIn("modified", data))
3233 d.addCallback(lambda ignored:
3234 self.sdmf_node.modify(modifier))
3235 d.addCallback(lambda ignored:
3236 self.sdmf_node.download_best_version())
3237 d.addCallback(lambda data:
3238 self.failUnlessIn("modified", data))
3242 def test_download_version(self):
3243 d = self.publish_multiple()
3244 # We want to have two recoverable versions on the grid.
3245 d.addCallback(lambda res:
3246 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3247 1:1,3:1,5:1,7:1,9:1}))
3248 # Now try to download each version. We should get the plaintext
3249 # associated with that version.
3250 d.addCallback(lambda ignored:
3251 self._fn.get_servermap(mode=MODE_READ))
3252 def _got_servermap(smap):
3253 versions = smap.recoverable_versions()
3254 assert len(versions) == 2
3256 self.servermap = smap
3257 self.version1, self.version2 = versions
3258 assert self.version1 != self.version2
3260 self.version1_seqnum = self.version1[0]
3261 self.version2_seqnum = self.version2[0]
3262 self.version1_index = self.version1_seqnum - 1
3263 self.version2_index = self.version2_seqnum - 1
3265 d.addCallback(_got_servermap)
3266 d.addCallback(lambda ignored:
3267 self._fn.download_version(self.servermap, self.version1))
3268 d.addCallback(lambda results:
3269 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3271 d.addCallback(lambda ignored:
3272 self._fn.download_version(self.servermap, self.version2))
3273 d.addCallback(lambda results:
3274 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3279 def test_download_nonexistent_version(self):
3280 d = self.do_upload_mdmf()
3281 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3282 def _set_servermap(servermap):
3283 self.servermap = servermap
3284 d.addCallback(_set_servermap)
3285 d.addCallback(lambda ignored:
3286 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3288 self.mdmf_node.download_version, self.servermap,
3293 def test_partial_read(self):
3294 d = self.do_upload_mdmf()
3295 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3296 modes = [("start_on_segment_boundary",
3297 mathutil.next_multiple(128 * 1024, 3), 50),
3298 ("ending_one_byte_after_segment_boundary",
3299 mathutil.next_multiple(128 * 1024, 3)-50, 51),
3300 ("zero_length_at_start", 0, 0),
3301 ("zero_length_in_middle", 50, 0),
3302 ("zero_length_at_segment_boundary",
3303 mathutil.next_multiple(128 * 1024, 3), 0),
3305 for (name, offset, length) in modes:
3306 d.addCallback(self._do_partial_read, name, offset, length)
3307 # then read only a few bytes at a time, and see that the results are
3309 def _read_data(version):
3310 c = consumer.MemoryConsumer()
3311 d2 = defer.succeed(None)
3312 for i in xrange(0, len(self.data), 10000):
3313 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3314 d2.addCallback(lambda ignored:
3315 self.failUnlessEqual(self.data, "".join(c.chunks)))
3317 d.addCallback(_read_data)
3319 def _do_partial_read(self, version, name, offset, length):
3320 c = consumer.MemoryConsumer()
3321 d = version.read(c, offset, length)
3322 expected = self.data[offset:offset+length]
3323 d.addCallback(lambda ignored: "".join(c.chunks))
3324 def _check(results):
3325 if results != expected:
3327 print "got: %s ... %s" % (results[:20], results[-20:])
3328 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3329 self.fail("results[%s] != expected" % name)
3330 return version # daisy-chained to next call
3331 d.addCallback(_check)
3335 def _test_read_and_download(self, node, expected):
3336 d = node.get_best_readable_version()
3337 def _read_data(version):
3338 c = consumer.MemoryConsumer()
3339 d2 = defer.succeed(None)
3340 d2.addCallback(lambda ignored: version.read(c))
3341 d2.addCallback(lambda ignored:
3342 self.failUnlessEqual(expected, "".join(c.chunks)))
3344 d.addCallback(_read_data)
3345 d.addCallback(lambda ignored: node.download_best_version())
3346 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3349 def test_read_and_download_mdmf(self):
3350 d = self.do_upload_mdmf()
3351 d.addCallback(self._test_read_and_download, self.data)
3354 def test_read_and_download_sdmf(self):
3355 d = self.do_upload_sdmf()
3356 d.addCallback(self._test_read_and_download, self.small_data)
3359 def test_read_and_download_sdmf_zero_length(self):
3360 d = self.do_upload_empty_sdmf()
3361 d.addCallback(self._test_read_and_download, "")
3365 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3366 timeout = 400 # these tests are too big, 120s is not enough on slow
3369 GridTestMixin.setUp(self)
3370 self.basedir = self.mktemp()
3372 self.c = self.g.clients[0]
3373 self.nm = self.c.nodemaker
3374 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3375 self.small_data = "test data" * 10 # about 90 B; SDMF
3378 def do_upload_sdmf(self):
3379 d = self.nm.create_mutable_file(MutableData(self.small_data))
3381 assert isinstance(n, MutableFileNode)
3383 # Make SDMF node that has 255 shares.
3384 self.nm.default_encoding_parameters['n'] = 255
3385 self.nm.default_encoding_parameters['k'] = 127
3386 return self.nm.create_mutable_file(MutableData(self.small_data))
3387 d.addCallback(_then)
3389 assert isinstance(n, MutableFileNode)
3390 self.sdmf_max_shares_node = n
3391 d.addCallback(_then2)
3394 def do_upload_mdmf(self):
3395 d = self.nm.create_mutable_file(MutableData(self.data),
3396 version=MDMF_VERSION)
3398 assert isinstance(n, MutableFileNode)
3400 # Make MDMF node that has 255 shares.
3401 self.nm.default_encoding_parameters['n'] = 255
3402 self.nm.default_encoding_parameters['k'] = 127
3403 return self.nm.create_mutable_file(MutableData(self.data),
3404 version=MDMF_VERSION)
3405 d.addCallback(_then)
3407 assert isinstance(n, MutableFileNode)
3408 self.mdmf_max_shares_node = n
3409 d.addCallback(_then2)
3412 def _test_replace(self, offset, new_data):
3413 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3414 d0 = self.do_upload_mdmf()
3416 d = defer.succeed(None)
3417 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3418 d.addCallback(lambda ign: node.get_best_mutable_version())
3419 d.addCallback(lambda mv:
3420 mv.update(MutableData(new_data), offset))
3421 # close around node.
3422 d.addCallback(lambda ignored, node=node:
3423 node.download_best_version())
3424 def _check(results):
3425 if results != expected:
3427 print "got: %s ... %s" % (results[:20], results[-20:])
3428 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3429 self.fail("results != expected")
3430 d.addCallback(_check)
3432 d0.addCallback(_run)
3435 def test_append(self):
3436 # We should be able to append data to a mutable file and get
3438 return self._test_replace(len(self.data), "appended")
3440 def test_replace_middle(self):
3441 # We should be able to replace data in the middle of a mutable
3442 # file and get what we expect back.
3443 return self._test_replace(100, "replaced")
3445 def test_replace_beginning(self):
3446 # We should be able to replace data at the beginning of the file
3447 # without truncating the file
3448 return self._test_replace(0, "beginning")
3450 def test_replace_segstart1(self):
3451 return self._test_replace(128*1024+1, "NNNN")
3453 def test_replace_zero_length_beginning(self):
3454 return self._test_replace(0, "")
3456 def test_replace_zero_length_middle(self):
3457 return self._test_replace(50, "")
3459 def test_replace_zero_length_segstart1(self):
3460 return self._test_replace(128*1024+1, "")
3462 def test_replace_and_extend(self):
3463 # We should be able to replace data in the middle of a mutable
3464 # file and extend that mutable file and get what we expect.
3465 return self._test_replace(100, "modified " * 100000)
3468 def _check_differences(self, got, expected):
3469 # displaying arbitrary file corruption is tricky for a
3470 # 1MB file of repeating data,, so look for likely places
3471 # with problems and display them separately
3472 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3473 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3474 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3475 for (start,end) in gotmods]
3476 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3477 for (start,end) in expmods]
3478 #print "expecting: %s" % expspans
3482 print "differences:"
3483 for segnum in range(len(expected)//SEGSIZE):
3484 start = segnum * SEGSIZE
3485 end = (segnum+1) * SEGSIZE
3486 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3487 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3488 if got_ends != exp_ends:
3489 print "expected[%d]: %s" % (start, exp_ends)
3490 print "got [%d]: %s" % (start, got_ends)
3491 if expspans != gotspans:
3492 print "expected: %s" % expspans
3493 print "got : %s" % gotspans
3494 open("EXPECTED","wb").write(expected)
3495 open("GOT","wb").write(got)
3496 print "wrote data to EXPECTED and GOT"
3497 self.fail("didn't get expected data")
3500 def test_replace_locations(self):
3501 # exercise fencepost conditions
3503 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3504 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3505 d0 = self.do_upload_mdmf()
3507 expected = self.data
3508 d = defer.succeed(None)
3509 for offset in suspects:
3510 new_data = letters.next()*2 # "AA", then "BB", etc
3511 expected = expected[:offset]+new_data+expected[offset+2:]
3512 d.addCallback(lambda ign:
3513 self.mdmf_node.get_best_mutable_version())
3514 def _modify(mv, offset=offset, new_data=new_data):
3515 # close over 'offset','new_data'
3516 md = MutableData(new_data)
3517 return mv.update(md, offset)
3518 d.addCallback(_modify)
3519 d.addCallback(lambda ignored:
3520 self.mdmf_node.download_best_version())
3521 d.addCallback(self._check_differences, expected)
3523 d0.addCallback(_run)
3526 def test_replace_locations_max_shares(self):
3527 # exercise fencepost conditions
3529 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3530 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3531 d0 = self.do_upload_mdmf()
3533 expected = self.data
3534 d = defer.succeed(None)
3535 for offset in suspects:
3536 new_data = letters.next()*2 # "AA", then "BB", etc
3537 expected = expected[:offset]+new_data+expected[offset+2:]
3538 d.addCallback(lambda ign:
3539 self.mdmf_max_shares_node.get_best_mutable_version())
3540 def _modify(mv, offset=offset, new_data=new_data):
3541 # close over 'offset','new_data'
3542 md = MutableData(new_data)
3543 return mv.update(md, offset)
3544 d.addCallback(_modify)
3545 d.addCallback(lambda ignored:
3546 self.mdmf_max_shares_node.download_best_version())
3547 d.addCallback(self._check_differences, expected)
3549 d0.addCallback(_run)
3553 def test_append_power_of_two(self):
3554 # If we attempt to extend a mutable file so that its segment
3555 # count crosses a power-of-two boundary, the update operation
3556 # should know how to reencode the file.
3558 # Note that the data populating self.mdmf_node is about 900 KiB
3559 # long -- this is 7 segments in the default segment size. So we
3560 # need to add 2 segments worth of data to push it over a
3561 # power-of-two boundary.
3562 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3563 new_data = self.data + (segment * 2)
3564 d0 = self.do_upload_mdmf()
3566 d = defer.succeed(None)
3567 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3568 d.addCallback(lambda ign: node.get_best_mutable_version())
3569 d.addCallback(lambda mv:
3570 mv.update(MutableData(segment * 2), len(self.data)))
3571 d.addCallback(lambda ignored, node=node:
3572 node.download_best_version())
3573 d.addCallback(lambda results:
3574 self.failUnlessEqual(results, new_data))
3576 d0.addCallback(_run)
3579 def test_update_sdmf(self):
3580 # Running update on a single-segment file should still work.
3581 new_data = self.small_data + "appended"
3582 d0 = self.do_upload_sdmf()
3584 d = defer.succeed(None)
3585 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3586 d.addCallback(lambda ign: node.get_best_mutable_version())
3587 d.addCallback(lambda mv:
3588 mv.update(MutableData("appended"), len(self.small_data)))
3589 d.addCallback(lambda ignored, node=node:
3590 node.download_best_version())
3591 d.addCallback(lambda results:
3592 self.failUnlessEqual(results, new_data))
3594 d0.addCallback(_run)
3597 def test_replace_in_last_segment(self):
3598 # The wrapper should know how to handle the tail segment
3600 replace_offset = len(self.data) - 100
3601 new_data = self.data[:replace_offset] + "replaced"
3602 rest_offset = replace_offset + len("replaced")
3603 new_data += self.data[rest_offset:]
3604 d0 = self.do_upload_mdmf()
3606 d = defer.succeed(None)
3607 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3608 d.addCallback(lambda ign: node.get_best_mutable_version())
3609 d.addCallback(lambda mv:
3610 mv.update(MutableData("replaced"), replace_offset))
3611 d.addCallback(lambda ignored, node=node:
3612 node.download_best_version())
3613 d.addCallback(lambda results:
3614 self.failUnlessEqual(results, new_data))
3616 d0.addCallback(_run)
3619 def test_multiple_segment_replace(self):
3620 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3621 new_data = self.data[:replace_offset]
3622 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3623 new_data += 2 * new_segment
3624 new_data += "replaced"
3625 rest_offset = len(new_data)
3626 new_data += self.data[rest_offset:]
3627 d0 = self.do_upload_mdmf()
3629 d = defer.succeed(None)
3630 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3631 d.addCallback(lambda ign: node.get_best_mutable_version())
3632 d.addCallback(lambda mv:
3633 mv.update(MutableData((2 * new_segment) + "replaced"),
3635 d.addCallback(lambda ignored, node=node:
3636 node.download_best_version())
3637 d.addCallback(lambda results:
3638 self.failUnlessEqual(results, new_data))
3640 d0.addCallback(_run)
3643 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3644 sdmf_old_shares = {}
3645 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3646 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3647 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3648 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3649 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3650 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3651 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3652 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3653 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3654 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3655 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3656 sdmf_old_contents = "This is a test file.\n"
3657 def copy_sdmf_shares(self):
3658 # We'll basically be short-circuiting the upload process.
3659 servernums = self.g.servers_by_number.keys()
3660 assert len(servernums) == 10
3662 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3663 # Get the storage index.
3664 cap = uri.from_string(self.sdmf_old_cap)
3665 si = cap.get_storage_index()
3667 # Now execute each assignment by writing the storage.
3668 for (share, servernum) in assignments:
3669 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3670 storedir = self.get_serverdir(servernum)
3671 storage_path = os.path.join(storedir, "shares",
3672 storage_index_to_dir(si))
3673 fileutil.make_dirs(storage_path)
3674 fileutil.write(os.path.join(storage_path, "%d" % share),
3676 # ...and verify that the shares are there.
3677 shares = self.find_uri_shares(self.sdmf_old_cap)
3678 assert len(shares) == 10
3680 def test_new_downloader_can_read_old_shares(self):
3681 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3683 self.copy_sdmf_shares()
3684 nm = self.g.clients[0].nodemaker
3685 n = nm.create_from_cap(self.sdmf_old_cap)
3686 d = n.download_best_version()
3687 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3690 class DifferentEncoding(unittest.TestCase):
3692 self._storage = s = FakeStorage()
3693 self.nodemaker = make_nodemaker(s)
3695 def test_filenode(self):
3696 # create a file with 3-of-20, then modify it with a client configured
3697 # to do 3-of-10. #1510 tracks a failure here
3698 self.nodemaker.default_encoding_parameters["n"] = 20
3699 d = self.nodemaker.create_mutable_file("old contents")
3701 filecap = n.get_cap().to_string()
3702 del n # we want a new object, not the cached one
3703 self.nodemaker.default_encoding_parameters["n"] = 10
3704 n2 = self.nodemaker.create_from_cap(filecap)
3706 d.addCallback(_created)
3707 def modifier(old_contents, servermap, first_time):
3708 return "new contents"
3709 d.addCallback(lambda n: n.modify(modifier))