3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12 ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28 NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32 DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
41 # this "FakeStorage" exists to put the share data in RAM and avoid using real
42 # network connections, both to speed up the tests and to reduce the amount of
43 # non-mutable.py code being exercised.
46 # this class replaces the collection of storage servers, allowing the
47 # tests to examine and manipulate the published shares. It also lets us
48 # control the order in which read queries are answered, to exercise more
49 # of the error-handling code in Retrieve .
51 # Note that we ignore the storage index: this FakeStorage instance can
52 # only be used for a single storage index.
57 # _sequence is used to cause the responses to occur in a specific
58 # order. If it is in use, then we will defer queries instead of
59 # answering them right away, accumulating the Deferreds in a dict. We
60 # don't know exactly how many queries we'll get, so exactly one
61 # second after the first query arrives, we will release them all (in
65 self._pending_timer = None
67 def read(self, peerid, storage_index):
68 shares = self._peers.get(peerid, {})
69 if self._sequence is None:
70 return defer.succeed(shares)
73 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
74 self._pending[peerid] = (d, shares)
77 def _fire_readers(self):
78 self._pending_timer = None
79 pending = self._pending
81 for peerid in self._sequence:
83 d, shares = pending.pop(peerid)
84 eventually(d.callback, shares)
85 for (d, shares) in pending.values():
86 eventually(d.callback, shares)
88 def write(self, peerid, storage_index, shnum, offset, data):
89 if peerid not in self._peers:
90 self._peers[peerid] = {}
91 shares = self._peers[peerid]
93 f.write(shares.get(shnum, ""))
96 shares[shnum] = f.getvalue()
99 class FakeStorageServer:
100 def __init__(self, peerid, storage):
102 self.storage = storage
104 def callRemote(self, methname, *args, **kwargs):
107 meth = getattr(self, methname)
108 return meth(*args, **kwargs)
110 d.addCallback(lambda res: _call())
113 def callRemoteOnly(self, methname, *args, **kwargs):
115 d = self.callRemote(methname, *args, **kwargs)
116 d.addBoth(lambda ignore: None)
119 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
122 def slot_readv(self, storage_index, shnums, readv):
123 d = self.storage.read(self.peerid, storage_index)
127 if shnums and shnum not in shnums:
129 vector = response[shnum] = []
130 for (offset, length) in readv:
131 assert isinstance(offset, (int, long)), offset
132 assert isinstance(length, (int, long)), length
133 vector.append(shares[shnum][offset:offset+length])
138 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
139 tw_vectors, read_vector):
140 # always-pass: parrot the test vectors back to them.
142 for shnum, (testv, writev, new_length) in tw_vectors.items():
143 for (offset, length, op, specimen) in testv:
144 assert op in ("le", "eq", "ge")
145 # TODO: this isn't right, the read is controlled by read_vector,
147 readv[shnum] = [ specimen
148 for (offset, length, op, specimen)
150 for (offset, data) in writev:
151 self.storage.write(self.peerid, storage_index, shnum,
153 answer = (True, readv)
154 return fireEventually(answer)
157 def flip_bit(original, byte_offset):
158 return (original[:byte_offset] +
159 chr(ord(original[byte_offset]) ^ 0x01) +
160 original[byte_offset+1:])
162 def add_two(original, byte_offset):
163 # It isn't enough to simply flip the bit for the version number,
164 # because 1 is a valid version number. So we add two instead.
165 return (original[:byte_offset] +
166 chr(ord(original[byte_offset]) ^ 0x02) +
167 original[byte_offset+1:])
169 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
170 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
171 # list of shnums to corrupt.
173 for peerid in s._peers:
174 shares = s._peers[peerid]
176 if (shnums_to_corrupt is not None
177 and shnum not in shnums_to_corrupt):
180 # We're feeding the reader all of the share data, so it
181 # won't need to use the rref that we didn't provide, nor the
182 # storage index that we didn't provide. We do this because
183 # the reader will work for both MDMF and SDMF.
184 reader = MDMFSlotReadProxy(None, None, shnum, data)
185 # We need to get the offsets for the next part.
186 d = reader.get_verinfo()
187 def _do_corruption(verinfo, data, shnum):
193 k, n, prefix, o) = verinfo
194 if isinstance(offset, tuple):
195 offset1, offset2 = offset
199 if offset1 == "pubkey" and IV:
202 real_offset = o[offset1]
204 real_offset = offset1
205 real_offset = int(real_offset) + offset2 + offset_offset
206 assert isinstance(real_offset, int), offset
207 if offset1 == 0: # verbyte
211 shares[shnum] = f(data, real_offset)
212 d.addCallback(_do_corruption, data, shnum)
214 dl = defer.DeferredList(ds)
215 dl.addCallback(lambda ignored: res)
218 def make_storagebroker(s=None, num_peers=10):
221 peerids = [tagged_hash("peerid", "%d" % i)[:20]
222 for i in range(num_peers)]
223 storage_broker = StorageFarmBroker(None, True)
224 for peerid in peerids:
225 fss = FakeStorageServer(peerid, s)
226 storage_broker.test_add_rref(peerid, fss)
227 return storage_broker
229 def make_nodemaker(s=None, num_peers=10):
230 storage_broker = make_storagebroker(s, num_peers)
231 sh = client.SecretHolder("lease secret", "convergence secret")
232 keygen = client.KeyGenerator()
233 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
234 nodemaker = NodeMaker(storage_broker, sh, None,
236 {"k": 3, "n": 10}, keygen)
239 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
240 # this used to be in Publish, but we removed the limit. Some of
241 # these tests test whether the new code correctly allows files
242 # larger than the limit.
243 OLD_MAX_SEGMENT_SIZE = 3500000
245 self._storage = s = FakeStorage()
246 self.nodemaker = make_nodemaker(s)
248 def test_create(self):
249 d = self.nodemaker.create_mutable_file()
251 self.failUnless(isinstance(n, MutableFileNode))
252 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
253 sb = self.nodemaker.storage_broker
254 peer0 = sorted(sb.get_all_serverids())[0]
255 shnums = self._storage._peers[peer0].keys()
256 self.failUnlessEqual(len(shnums), 1)
257 d.addCallback(_created)
259 test_create.timeout = 15
262 def test_create_mdmf(self):
263 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
265 self.failUnless(isinstance(n, MutableFileNode))
266 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267 sb = self.nodemaker.storage_broker
268 peer0 = sorted(sb.get_all_serverids())[0]
269 shnums = self._storage._peers[peer0].keys()
270 self.failUnlessEqual(len(shnums), 1)
271 d.addCallback(_created)
274 def test_single_share(self):
275 # Make sure that we tolerate publishing a single share.
276 self.nodemaker.default_encoding_parameters['k'] = 1
277 self.nodemaker.default_encoding_parameters['happy'] = 1
278 self.nodemaker.default_encoding_parameters['n'] = 1
279 d = defer.succeed(None)
280 for v in (SDMF_VERSION, MDMF_VERSION):
281 d.addCallback(lambda ignored:
282 self.nodemaker.create_mutable_file(version=v))
284 self.failUnless(isinstance(n, MutableFileNode))
287 d.addCallback(_created)
288 d.addCallback(lambda n:
289 n.overwrite(MutableData("Contents" * 50000)))
290 d.addCallback(lambda ignored:
291 self._node.download_best_version())
292 d.addCallback(lambda contents:
293 self.failUnlessEqual(contents, "Contents" * 50000))
296 def test_max_shares(self):
297 self.nodemaker.default_encoding_parameters['n'] = 255
298 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
300 self.failUnless(isinstance(n, MutableFileNode))
301 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
302 sb = self.nodemaker.storage_broker
303 num_shares = sum([len(self._storage._peers[x].keys()) for x \
304 in sb.get_all_serverids()])
305 self.failUnlessEqual(num_shares, 255)
308 d.addCallback(_created)
309 # Now we upload some contents
310 d.addCallback(lambda n:
311 n.overwrite(MutableData("contents" * 50000)))
312 # ...then download contents
313 d.addCallback(lambda ignored:
314 self._node.download_best_version())
315 # ...and check to make sure everything went okay.
316 d.addCallback(lambda contents:
317 self.failUnlessEqual("contents" * 50000, contents))
320 def test_max_shares_mdmf(self):
321 # Test how files behave when there are 255 shares.
322 self.nodemaker.default_encoding_parameters['n'] = 255
323 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
325 self.failUnless(isinstance(n, MutableFileNode))
326 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
327 sb = self.nodemaker.storage_broker
328 num_shares = sum([len(self._storage._peers[x].keys()) for x \
329 in sb.get_all_serverids()])
330 self.failUnlessEqual(num_shares, 255)
333 d.addCallback(_created)
334 d.addCallback(lambda n:
335 n.overwrite(MutableData("contents" * 50000)))
336 d.addCallback(lambda ignored:
337 self._node.download_best_version())
338 d.addCallback(lambda contents:
339 self.failUnlessEqual(contents, "contents" * 50000))
342 def test_mdmf_filenode_cap(self):
343 # Test that an MDMF filenode, once created, returns an MDMF URI.
344 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
346 self.failUnless(isinstance(n, MutableFileNode))
348 self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
349 rcap = n.get_readcap()
350 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
351 vcap = n.get_verify_cap()
352 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
353 d.addCallback(_created)
357 def test_create_from_mdmf_writecap(self):
358 # Test that the nodemaker is capable of creating an MDMF
359 # filenode given an MDMF cap.
360 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
362 self.failUnless(isinstance(n, MutableFileNode))
364 self.failUnless(s.startswith("URI:MDMF"))
365 n2 = self.nodemaker.create_from_cap(s)
366 self.failUnless(isinstance(n2, MutableFileNode))
367 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
368 self.failUnlessEqual(n.get_uri(), n2.get_uri())
369 d.addCallback(_created)
373 def test_create_from_mdmf_writecap_with_extensions(self):
374 # Test that the nodemaker is capable of creating an MDMF
375 # filenode when given a writecap with extension parameters in
377 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
379 self.failUnless(isinstance(n, MutableFileNode))
381 # We need to cheat a little and delete the nodemaker's
382 # cache, otherwise we'll get the same node instance back.
383 self.failUnlessIn(":3:131073", s)
384 n2 = self.nodemaker.create_from_cap(s)
386 self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
387 self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
388 hints = n2._downloader_hints
389 self.failUnlessEqual(hints['k'], 3)
390 self.failUnlessEqual(hints['segsize'], 131073)
391 d.addCallback(_created)
395 def test_create_from_mdmf_readcap(self):
396 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
398 self.failUnless(isinstance(n, MutableFileNode))
399 s = n.get_readonly_uri()
400 n2 = self.nodemaker.create_from_cap(s)
401 self.failUnless(isinstance(n2, MutableFileNode))
403 # Check that it's a readonly node
404 self.failUnless(n2.is_readonly())
405 d.addCallback(_created)
409 def test_create_from_mdmf_readcap_with_extensions(self):
410 # We should be able to create an MDMF filenode with the
411 # extension parameters without it breaking.
412 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
414 self.failUnless(isinstance(n, MutableFileNode))
415 s = n.get_readonly_uri()
416 self.failUnlessIn(":3:131073", s)
418 n2 = self.nodemaker.create_from_cap(s)
419 self.failUnless(isinstance(n2, MutableFileNode))
420 self.failUnless(n2.is_readonly())
421 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
422 hints = n2._downloader_hints
423 self.failUnlessEqual(hints["k"], 3)
424 self.failUnlessEqual(hints["segsize"], 131073)
425 d.addCallback(_created)
429 def test_internal_version_from_cap(self):
430 # MutableFileNodes and MutableFileVersions have an internal
431 # switch that tells them whether they're dealing with an SDMF or
432 # MDMF mutable file when they start doing stuff. We want to make
433 # sure that this is set appropriately given an MDMF cap.
434 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
436 self.uri = n.get_uri()
437 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
439 n2 = self.nodemaker.create_from_cap(self.uri)
440 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
441 d.addCallback(_created)
445 def test_serialize(self):
446 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
448 def _callback(*args, **kwargs):
449 self.failUnlessEqual(args, (4,) )
450 self.failUnlessEqual(kwargs, {"foo": 5})
453 d = n._do_serialized(_callback, 4, foo=5)
454 def _check_callback(res):
455 self.failUnlessEqual(res, 6)
456 self.failUnlessEqual(calls, [1])
457 d.addCallback(_check_callback)
460 raise ValueError("heya")
461 d.addCallback(lambda res:
462 self.shouldFail(ValueError, "_check_errback", "heya",
463 n._do_serialized, _errback))
466 def test_upload_and_download(self):
467 d = self.nodemaker.create_mutable_file()
469 d = defer.succeed(None)
470 d.addCallback(lambda res: n.get_servermap(MODE_READ))
471 d.addCallback(lambda smap: smap.dump(StringIO()))
472 d.addCallback(lambda sio:
473 self.failUnless("3-of-10" in sio.getvalue()))
474 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
475 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
476 d.addCallback(lambda res: n.download_best_version())
477 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
478 d.addCallback(lambda res: n.get_size_of_best_version())
479 d.addCallback(lambda size:
480 self.failUnlessEqual(size, len("contents 1")))
481 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
482 d.addCallback(lambda res: n.download_best_version())
483 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
484 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
485 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
486 d.addCallback(lambda res: n.download_best_version())
487 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
488 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
489 d.addCallback(lambda smap:
490 n.download_version(smap,
491 smap.best_recoverable_version()))
492 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493 # test a file that is large enough to overcome the
494 # mapupdate-to-retrieve data caching (i.e. make the shares larger
495 # than the default readsize, which is 2000 bytes). A 15kB file
496 # will have 5kB shares.
497 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
498 d.addCallback(lambda res: n.download_best_version())
499 d.addCallback(lambda res:
500 self.failUnlessEqual(res, "large size file" * 1000))
502 d.addCallback(_created)
506 def test_upload_and_download_mdmf(self):
507 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
509 d = defer.succeed(None)
510 d.addCallback(lambda ignored:
511 n.get_servermap(MODE_READ))
512 def _then(servermap):
513 dumped = servermap.dump(StringIO())
514 self.failUnlessIn("3-of-10", dumped.getvalue())
516 # Now overwrite the contents with some new contents. We want
517 # to make them big enough to force the file to be uploaded
518 # in more than one segment.
519 big_contents = "contents1" * 100000 # about 900 KiB
520 big_contents_uploadable = MutableData(big_contents)
521 d.addCallback(lambda ignored:
522 n.overwrite(big_contents_uploadable))
523 d.addCallback(lambda ignored:
524 n.download_best_version())
525 d.addCallback(lambda data:
526 self.failUnlessEqual(data, big_contents))
527 # Overwrite the contents again with some new contents. As
528 # before, they need to be big enough to force multiple
529 # segments, so that we make the downloader deal with
531 bigger_contents = "contents2" * 1000000 # about 9MiB
532 bigger_contents_uploadable = MutableData(bigger_contents)
533 d.addCallback(lambda ignored:
534 n.overwrite(bigger_contents_uploadable))
535 d.addCallback(lambda ignored:
536 n.download_best_version())
537 d.addCallback(lambda data:
538 self.failUnlessEqual(data, bigger_contents))
540 d.addCallback(_created)
544 def test_retrieve_pause(self):
545 # We should make sure that the retriever is able to pause
547 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
551 return node.overwrite(MutableData("contents1" * 100000))
552 d.addCallback(_created)
553 # Now we'll retrieve it into a pausing consumer.
554 d.addCallback(lambda ignored:
555 self.node.get_best_mutable_version())
556 def _got_version(version):
557 self.c = PausingConsumer()
558 return version.read(self.c)
559 d.addCallback(_got_version)
560 d.addCallback(lambda ignored:
561 self.failUnlessEqual(self.c.data, "contents1" * 100000))
563 test_retrieve_pause.timeout = 25
566 def test_download_from_mdmf_cap(self):
567 # We should be able to download an MDMF file given its cap
568 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
570 self.uri = node.get_uri()
572 return node.overwrite(MutableData("contents1" * 100000))
574 node = self.nodemaker.create_from_cap(self.uri)
575 return node.download_best_version()
576 def _downloaded(data):
577 self.failUnlessEqual(data, "contents1" * 100000)
578 d.addCallback(_created)
580 d.addCallback(_downloaded)
584 def test_create_and_download_from_bare_mdmf_cap(self):
585 # MDMF caps have extension parameters on them by default. We
586 # need to make sure that they work without extension parameters.
587 contents = MutableData("contents" * 100000)
588 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
593 self.failUnlessIn(":3:131073", uri)
594 # Now strip that off the end of the uri, then try creating
595 # and downloading the node again.
596 bare_uri = uri.replace(":3:131073", "")
597 assert ":3:131073" not in bare_uri
599 return self.nodemaker.create_from_cap(bare_uri)
600 d.addCallback(_created)
601 def _created_bare(node):
602 self.failUnlessEqual(node.get_writekey(),
603 self._created.get_writekey())
604 self.failUnlessEqual(node.get_readkey(),
605 self._created.get_readkey())
606 self.failUnlessEqual(node.get_storage_index(),
607 self._created.get_storage_index())
608 return node.download_best_version()
609 d.addCallback(_created_bare)
610 d.addCallback(lambda data:
611 self.failUnlessEqual(data, "contents" * 100000))
615 def test_mdmf_write_count(self):
616 # Publishing an MDMF file should only cause one write for each
617 # share that is to be published. Otherwise, we introduce
618 # undesirable semantics that are a regression from SDMF
619 upload = MutableData("MDMF" * 100000) # about 400 KiB
620 d = self.nodemaker.create_mutable_file(upload,
621 version=MDMF_VERSION)
622 def _check_server_write_counts(ignored):
623 sb = self.nodemaker.storage_broker
624 for server in sb.servers.itervalues():
625 self.failUnlessEqual(server.get_rref().queries, 1)
626 d.addCallback(_check_server_write_counts)
630 def test_create_with_initial_contents(self):
631 upload1 = MutableData("contents 1")
632 d = self.nodemaker.create_mutable_file(upload1)
634 d = n.download_best_version()
635 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
636 upload2 = MutableData("contents 2")
637 d.addCallback(lambda res: n.overwrite(upload2))
638 d.addCallback(lambda res: n.download_best_version())
639 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
641 d.addCallback(_created)
643 test_create_with_initial_contents.timeout = 15
646 def test_create_mdmf_with_initial_contents(self):
647 initial_contents = "foobarbaz" * 131072 # 900KiB
648 initial_contents_uploadable = MutableData(initial_contents)
649 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
650 version=MDMF_VERSION)
652 d = n.download_best_version()
653 d.addCallback(lambda data:
654 self.failUnlessEqual(data, initial_contents))
655 uploadable2 = MutableData(initial_contents + "foobarbaz")
656 d.addCallback(lambda ignored:
657 n.overwrite(uploadable2))
658 d.addCallback(lambda ignored:
659 n.download_best_version())
660 d.addCallback(lambda data:
661 self.failUnlessEqual(data, initial_contents +
664 d.addCallback(_created)
666 test_create_mdmf_with_initial_contents.timeout = 20
669 def test_response_cache_memory_leak(self):
670 d = self.nodemaker.create_mutable_file("contents")
672 d = n.download_best_version()
673 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
674 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
676 def _check_cache(expected):
677 # The total size of cache entries should not increase on the second download;
678 # in fact the cache contents should be identical.
679 d2 = n.download_best_version()
680 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
682 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
684 d.addCallback(_created)
687 def test_create_with_initial_contents_function(self):
688 data = "initial contents"
689 def _make_contents(n):
690 self.failUnless(isinstance(n, MutableFileNode))
691 key = n.get_writekey()
692 self.failUnless(isinstance(key, str), key)
693 self.failUnlessEqual(len(key), 16) # AES key size
694 return MutableData(data)
695 d = self.nodemaker.create_mutable_file(_make_contents)
697 return n.download_best_version()
698 d.addCallback(_created)
699 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
703 def test_create_mdmf_with_initial_contents_function(self):
704 data = "initial contents" * 100000
705 def _make_contents(n):
706 self.failUnless(isinstance(n, MutableFileNode))
707 key = n.get_writekey()
708 self.failUnless(isinstance(key, str), key)
709 self.failUnlessEqual(len(key), 16)
710 return MutableData(data)
711 d = self.nodemaker.create_mutable_file(_make_contents,
712 version=MDMF_VERSION)
713 d.addCallback(lambda n:
714 n.download_best_version())
715 d.addCallback(lambda data2:
716 self.failUnlessEqual(data2, data))
720 def test_create_with_too_large_contents(self):
721 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
722 BIG_uploadable = MutableData(BIG)
723 d = self.nodemaker.create_mutable_file(BIG_uploadable)
725 other_BIG_uploadable = MutableData(BIG)
726 d = n.overwrite(other_BIG_uploadable)
728 d.addCallback(_created)
731 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
732 d = n.get_servermap(MODE_READ)
733 d.addCallback(lambda servermap: servermap.best_recoverable_version())
734 d.addCallback(lambda verinfo:
735 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
738 def test_modify(self):
739 def _modifier(old_contents, servermap, first_time):
740 new_contents = old_contents + "line2"
742 def _non_modifier(old_contents, servermap, first_time):
744 def _none_modifier(old_contents, servermap, first_time):
746 def _error_modifier(old_contents, servermap, first_time):
747 raise ValueError("oops")
748 def _toobig_modifier(old_contents, servermap, first_time):
749 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
752 def _ucw_error_modifier(old_contents, servermap, first_time):
753 # simulate an UncoordinatedWriteError once
756 raise UncoordinatedWriteError("simulated")
757 new_contents = old_contents + "line3"
759 def _ucw_error_non_modifier(old_contents, servermap, first_time):
760 # simulate an UncoordinatedWriteError once, and don't actually
761 # modify the contents on subsequent invocations
764 raise UncoordinatedWriteError("simulated")
767 initial_contents = "line1"
768 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
770 d = n.modify(_modifier)
771 d.addCallback(lambda res: n.download_best_version())
772 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
773 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
775 d.addCallback(lambda res: n.modify(_non_modifier))
776 d.addCallback(lambda res: n.download_best_version())
777 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
778 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
780 d.addCallback(lambda res: n.modify(_none_modifier))
781 d.addCallback(lambda res: n.download_best_version())
782 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
783 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
785 d.addCallback(lambda res:
786 self.shouldFail(ValueError, "error_modifier", None,
787 n.modify, _error_modifier))
788 d.addCallback(lambda res: n.download_best_version())
789 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
790 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
793 d.addCallback(lambda res: n.download_best_version())
794 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
795 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
797 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
798 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
799 d.addCallback(lambda res: n.download_best_version())
800 d.addCallback(lambda res: self.failUnlessEqual(res,
802 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
804 def _reset_ucw_error_modifier(res):
807 d.addCallback(_reset_ucw_error_modifier)
809 # in practice, this n.modify call should publish twice: the first
810 # one gets a UCWE, the second does not. But our test jig (in
811 # which the modifier raises the UCWE) skips over the first one,
812 # so in this test there will be only one publish, and the seqnum
813 # will only be one larger than the previous test, not two (i.e. 4
815 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
816 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
817 d.addCallback(lambda res: n.download_best_version())
818 d.addCallback(lambda res: self.failUnlessEqual(res,
820 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
821 d.addCallback(lambda res: n.modify(_toobig_modifier))
823 d.addCallback(_created)
825 test_modify.timeout = 15
828 def test_modify_backoffer(self):
829 def _modifier(old_contents, servermap, first_time):
830 return old_contents + "line2"
832 def _ucw_error_modifier(old_contents, servermap, first_time):
833 # simulate an UncoordinatedWriteError once
836 raise UncoordinatedWriteError("simulated")
837 return old_contents + "line3"
838 def _always_ucw_error_modifier(old_contents, servermap, first_time):
839 raise UncoordinatedWriteError("simulated")
840 def _backoff_stopper(node, f):
842 def _backoff_pauser(node, f):
844 reactor.callLater(0.5, d.callback, None)
847 # the give-up-er will hit its maximum retry count quickly
848 giveuper = BackoffAgent()
849 giveuper._delay = 0.1
852 d = self.nodemaker.create_mutable_file(MutableData("line1"))
854 d = n.modify(_modifier)
855 d.addCallback(lambda res: n.download_best_version())
856 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
857 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
859 d.addCallback(lambda res:
860 self.shouldFail(UncoordinatedWriteError,
861 "_backoff_stopper", None,
862 n.modify, _ucw_error_modifier,
864 d.addCallback(lambda res: n.download_best_version())
865 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
866 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
868 def _reset_ucw_error_modifier(res):
871 d.addCallback(_reset_ucw_error_modifier)
872 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
874 d.addCallback(lambda res: n.download_best_version())
875 d.addCallback(lambda res: self.failUnlessEqual(res,
877 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
879 d.addCallback(lambda res:
880 self.shouldFail(UncoordinatedWriteError,
882 n.modify, _always_ucw_error_modifier,
884 d.addCallback(lambda res: n.download_best_version())
885 d.addCallback(lambda res: self.failUnlessEqual(res,
887 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
890 d.addCallback(_created)
893 def test_upload_and_download_full_size_keys(self):
894 self.nodemaker.key_generator = client.KeyGenerator()
895 d = self.nodemaker.create_mutable_file()
897 d = defer.succeed(None)
898 d.addCallback(lambda res: n.get_servermap(MODE_READ))
899 d.addCallback(lambda smap: smap.dump(StringIO()))
900 d.addCallback(lambda sio:
901 self.failUnless("3-of-10" in sio.getvalue()))
902 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
903 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
904 d.addCallback(lambda res: n.download_best_version())
905 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
906 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
907 d.addCallback(lambda res: n.download_best_version())
908 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
909 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
910 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
911 d.addCallback(lambda res: n.download_best_version())
912 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
913 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
914 d.addCallback(lambda smap:
915 n.download_version(smap,
916 smap.best_recoverable_version()))
917 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
919 d.addCallback(_created)
923 def test_size_after_servermap_update(self):
924 # a mutable file node should have something to say about how big
925 # it is after a servermap update is performed, since this tells
926 # us how large the best version of that mutable file is.
927 d = self.nodemaker.create_mutable_file()
930 return n.get_servermap(MODE_READ)
931 d.addCallback(_created)
932 d.addCallback(lambda ignored:
933 self.failUnlessEqual(self.n.get_size(), 0))
934 d.addCallback(lambda ignored:
935 self.n.overwrite(MutableData("foobarbaz")))
936 d.addCallback(lambda ignored:
937 self.failUnlessEqual(self.n.get_size(), 9))
938 d.addCallback(lambda ignored:
939 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
940 d.addCallback(_created)
941 d.addCallback(lambda ignored:
942 self.failUnlessEqual(self.n.get_size(), 9))
947 def publish_one(self):
948 # publish a file and create shares, which can then be manipulated
950 self.CONTENTS = "New contents go here" * 1000
951 self.uploadable = MutableData(self.CONTENTS)
952 self._storage = FakeStorage()
953 self._nodemaker = make_nodemaker(self._storage)
954 self._storage_broker = self._nodemaker.storage_broker
955 d = self._nodemaker.create_mutable_file(self.uploadable)
958 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
959 d.addCallback(_created)
962 def publish_mdmf(self):
963 # like publish_one, except that the result is guaranteed to be
965 # self.CONTENTS should have more than one segment.
966 self.CONTENTS = "This is an MDMF file" * 100000
967 self.uploadable = MutableData(self.CONTENTS)
968 self._storage = FakeStorage()
969 self._nodemaker = make_nodemaker(self._storage)
970 self._storage_broker = self._nodemaker.storage_broker
971 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
974 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
975 d.addCallback(_created)
979 def publish_sdmf(self):
980 # like publish_one, except that the result is guaranteed to be
982 self.CONTENTS = "This is an SDMF file" * 1000
983 self.uploadable = MutableData(self.CONTENTS)
984 self._storage = FakeStorage()
985 self._nodemaker = make_nodemaker(self._storage)
986 self._storage_broker = self._nodemaker.storage_broker
987 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
990 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
991 d.addCallback(_created)
995 def publish_multiple(self, version=0):
996 self.CONTENTS = ["Contents 0",
1001 self.uploadables = [MutableData(d) for d in self.CONTENTS]
1002 self._copied_shares = {}
1003 self._storage = FakeStorage()
1004 self._nodemaker = make_nodemaker(self._storage)
1005 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1008 # now create multiple versions of the same file, and accumulate
1009 # their shares, so we can mix and match them later.
1010 d = defer.succeed(None)
1011 d.addCallback(self._copy_shares, 0)
1012 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1013 d.addCallback(self._copy_shares, 1)
1014 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1015 d.addCallback(self._copy_shares, 2)
1016 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1017 d.addCallback(self._copy_shares, 3)
1018 # now we replace all the shares with version s3, and upload a new
1019 # version to get s4b.
1020 rollback = dict([(i,2) for i in range(10)])
1021 d.addCallback(lambda res: self._set_versions(rollback))
1022 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1023 d.addCallback(self._copy_shares, 4)
1024 # we leave the storage in state 4
1026 d.addCallback(_created)
1030 def _copy_shares(self, ignored, index):
1031 shares = self._storage._peers
1032 # we need a deep copy
1034 for peerid in shares:
1035 new_shares[peerid] = {}
1036 for shnum in shares[peerid]:
1037 new_shares[peerid][shnum] = shares[peerid][shnum]
1038 self._copied_shares[index] = new_shares
1040 def _set_versions(self, versionmap):
1041 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1042 # share to be at. Any shnum which is left out of the map will stay at
1043 # its current version.
1044 shares = self._storage._peers
1045 oldshares = self._copied_shares
1046 for peerid in shares:
1047 for shnum in shares[peerid]:
1048 if shnum in versionmap:
1049 index = versionmap[shnum]
1050 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1052 class PausingConsumer:
1053 implements(IConsumer)
1056 self.already_paused = False
1058 def registerProducer(self, producer, streaming):
1059 self.producer = producer
1060 self.producer.resumeProducing()
1062 def unregisterProducer(self):
1063 self.producer = None
1065 def _unpause(self, ignored):
1066 self.producer.resumeProducing()
1068 def write(self, data):
1070 if not self.already_paused:
1071 self.producer.pauseProducing()
1072 self.already_paused = True
1073 reactor.callLater(15, self._unpause, None)
1076 class Servermap(unittest.TestCase, PublishMixin):
1078 return self.publish_one()
1080 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1085 sb = self._storage_broker
1086 smu = ServermapUpdater(fn, sb, Monitor(),
1087 ServerMap(), mode, update_range=update_range)
1091 def update_servermap(self, oldmap, mode=MODE_CHECK):
1092 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1097 def failUnlessOneRecoverable(self, sm, num_shares):
1098 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1099 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1100 best = sm.best_recoverable_version()
1101 self.failIfEqual(best, None)
1102 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1103 self.failUnlessEqual(len(sm.shares_available()), 1)
1104 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1105 shnum, peerids = sm.make_sharemap().items()[0]
1106 peerid = list(peerids)[0]
1107 self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1108 self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1111 def test_basic(self):
1112 d = defer.succeed(None)
1113 ms = self.make_servermap
1114 us = self.update_servermap
1116 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1117 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1118 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1119 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1120 d.addCallback(lambda res: ms(mode=MODE_READ))
1121 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1122 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1123 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1124 # this mode stops at 'k' shares
1125 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1127 # and can we re-use the same servermap? Note that these are sorted in
1128 # increasing order of number of servers queried, since once a server
1129 # gets into the servermap, we'll always ask it for an update.
1130 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1131 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1132 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1133 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1134 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1135 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1136 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1137 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1138 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1142 def test_fetch_privkey(self):
1143 d = defer.succeed(None)
1144 # use the sibling filenode (which hasn't been used yet), and make
1145 # sure it can fetch the privkey. The file is small, so the privkey
1146 # will be fetched on the first (query) pass.
1147 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1148 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1150 # create a new file, which is large enough to knock the privkey out
1151 # of the early part of the file
1152 LARGE = "These are Larger contents" * 200 # about 5KB
1153 LARGE_uploadable = MutableData(LARGE)
1154 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1155 def _created(large_fn):
1156 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1157 return self.make_servermap(MODE_WRITE, large_fn2)
1158 d.addCallback(_created)
1159 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1163 def test_mark_bad(self):
1164 d = defer.succeed(None)
1165 ms = self.make_servermap
1167 d.addCallback(lambda res: ms(mode=MODE_READ))
1168 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1170 v = sm.best_recoverable_version()
1171 vm = sm.make_versionmap()
1172 shares = list(vm[v])
1173 self.failUnlessEqual(len(shares), 6)
1174 self._corrupted = set()
1175 # mark the first 5 shares as corrupt, then update the servermap.
1176 # The map should not have the marked shares it in any more, and
1177 # new shares should be found to replace the missing ones.
1178 for (shnum, peerid, timestamp) in shares:
1180 self._corrupted.add( (peerid, shnum) )
1181 sm.mark_bad_share(peerid, shnum, "")
1182 return self.update_servermap(sm, MODE_WRITE)
1183 d.addCallback(_made_map)
1185 # this should find all 5 shares that weren't marked bad
1186 v = sm.best_recoverable_version()
1187 vm = sm.make_versionmap()
1188 shares = list(vm[v])
1189 for (peerid, shnum) in self._corrupted:
1190 peer_shares = sm.shares_on_peer(peerid)
1191 self.failIf(shnum in peer_shares,
1192 "%d was in %s" % (shnum, peer_shares))
1193 self.failUnlessEqual(len(shares), 5)
1194 d.addCallback(_check_map)
1197 def failUnlessNoneRecoverable(self, sm):
1198 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1199 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1200 best = sm.best_recoverable_version()
1201 self.failUnlessEqual(best, None)
1202 self.failUnlessEqual(len(sm.shares_available()), 0)
1204 def test_no_shares(self):
1205 self._storage._peers = {} # delete all shares
1206 ms = self.make_servermap
1207 d = defer.succeed(None)
1209 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1210 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1212 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1213 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1215 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1216 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1218 d.addCallback(lambda res: ms(mode=MODE_READ))
1219 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1223 def failUnlessNotQuiteEnough(self, sm):
1224 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1225 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1226 best = sm.best_recoverable_version()
1227 self.failUnlessEqual(best, None)
1228 self.failUnlessEqual(len(sm.shares_available()), 1)
1229 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1232 def test_not_quite_enough_shares(self):
1234 ms = self.make_servermap
1235 num_shares = len(s._peers)
1236 for peerid in s._peers:
1237 s._peers[peerid] = {}
1241 # now there ought to be only two shares left
1242 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1244 d = defer.succeed(None)
1246 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1247 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1248 d.addCallback(lambda sm:
1249 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1250 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1251 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1252 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1253 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1254 d.addCallback(lambda res: ms(mode=MODE_READ))
1255 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1260 def test_servermapupdater_finds_mdmf_files(self):
1261 # setUp already published an MDMF file for us. We just need to
1262 # make sure that when we run the ServermapUpdater, the file is
1263 # reported to have one recoverable version.
1264 d = defer.succeed(None)
1265 d.addCallback(lambda ignored:
1266 self.publish_mdmf())
1267 d.addCallback(lambda ignored:
1268 self.make_servermap(mode=MODE_CHECK))
1269 # Calling make_servermap also updates the servermap in the mode
1270 # that we specify, so we just need to see what it says.
1271 def _check_servermap(sm):
1272 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1273 d.addCallback(_check_servermap)
1277 def test_fetch_update(self):
1278 d = defer.succeed(None)
1279 d.addCallback(lambda ignored:
1280 self.publish_mdmf())
1281 d.addCallback(lambda ignored:
1282 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1283 def _check_servermap(sm):
1285 self.failUnlessEqual(len(sm.update_data), 10)
1287 for data in sm.update_data.itervalues():
1288 self.failUnlessEqual(len(data), 1)
1289 d.addCallback(_check_servermap)
1293 def test_servermapupdater_finds_sdmf_files(self):
1294 d = defer.succeed(None)
1295 d.addCallback(lambda ignored:
1296 self.publish_sdmf())
1297 d.addCallback(lambda ignored:
1298 self.make_servermap(mode=MODE_CHECK))
1299 d.addCallback(lambda servermap:
1300 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1304 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1306 return self.publish_one()
1308 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1310 oldmap = ServerMap()
1312 sb = self._storage_broker
1313 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1317 def abbrev_verinfo(self, verinfo):
1320 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1321 offsets_tuple) = verinfo
1322 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1324 def abbrev_verinfo_dict(self, verinfo_d):
1326 for verinfo,value in verinfo_d.items():
1327 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1328 offsets_tuple) = verinfo
1329 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1332 def dump_servermap(self, servermap):
1333 print "SERVERMAP", servermap
1334 print "RECOVERABLE", [self.abbrev_verinfo(v)
1335 for v in servermap.recoverable_versions()]
1336 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1337 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1339 def do_download(self, servermap, version=None):
1341 version = servermap.best_recoverable_version()
1342 r = Retrieve(self._fn, servermap, version)
1343 c = consumer.MemoryConsumer()
1344 d = r.download(consumer=c)
1345 d.addCallback(lambda mc: "".join(mc.chunks))
1349 def test_basic(self):
1350 d = self.make_servermap()
1351 def _do_retrieve(servermap):
1352 self._smap = servermap
1353 #self.dump_servermap(servermap)
1354 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1355 return self.do_download(servermap)
1356 d.addCallback(_do_retrieve)
1357 def _retrieved(new_contents):
1358 self.failUnlessEqual(new_contents, self.CONTENTS)
1359 d.addCallback(_retrieved)
1360 # we should be able to re-use the same servermap, both with and
1361 # without updating it.
1362 d.addCallback(lambda res: self.do_download(self._smap))
1363 d.addCallback(_retrieved)
1364 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1365 d.addCallback(lambda res: self.do_download(self._smap))
1366 d.addCallback(_retrieved)
1367 # clobbering the pubkey should make the servermap updater re-fetch it
1368 def _clobber_pubkey(res):
1369 self._fn._pubkey = None
1370 d.addCallback(_clobber_pubkey)
1371 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1372 d.addCallback(lambda res: self.do_download(self._smap))
1373 d.addCallback(_retrieved)
1376 def test_all_shares_vanished(self):
1377 d = self.make_servermap()
1378 def _remove_shares(servermap):
1379 for shares in self._storage._peers.values():
1381 d1 = self.shouldFail(NotEnoughSharesError,
1382 "test_all_shares_vanished",
1384 self.do_download, servermap)
1386 d.addCallback(_remove_shares)
1389 def test_no_servers(self):
1390 sb2 = make_storagebroker(num_peers=0)
1391 # if there are no servers, then a MODE_READ servermap should come
1393 d = self.make_servermap(sb=sb2)
1394 def _check_servermap(servermap):
1395 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1396 self.failIf(servermap.recoverable_versions())
1397 self.failIf(servermap.unrecoverable_versions())
1398 self.failIf(servermap.all_peers())
1399 d.addCallback(_check_servermap)
1401 test_no_servers.timeout = 15
1403 def test_no_servers_download(self):
1404 sb2 = make_storagebroker(num_peers=0)
1405 self._fn._storage_broker = sb2
1406 d = self.shouldFail(UnrecoverableFileError,
1407 "test_no_servers_download",
1408 "no recoverable versions",
1409 self._fn.download_best_version)
1411 # a failed download that occurs while we aren't connected to
1412 # anybody should not prevent a subsequent download from working.
1413 # This isn't quite the webapi-driven test that #463 wants, but it
1414 # should be close enough.
1415 self._fn._storage_broker = self._storage_broker
1416 return self._fn.download_best_version()
1417 def _retrieved(new_contents):
1418 self.failUnlessEqual(new_contents, self.CONTENTS)
1419 d.addCallback(_restore)
1420 d.addCallback(_retrieved)
1422 test_no_servers_download.timeout = 15
1425 def _test_corrupt_all(self, offset, substring,
1426 should_succeed=False,
1428 failure_checker=None,
1429 fetch_privkey=False):
1430 d = defer.succeed(None)
1432 d.addCallback(corrupt, self._storage, offset)
1433 d.addCallback(lambda res: self.make_servermap())
1434 if not corrupt_early:
1435 d.addCallback(corrupt, self._storage, offset)
1436 def _do_retrieve(servermap):
1437 ver = servermap.best_recoverable_version()
1438 if ver is None and not should_succeed:
1439 # no recoverable versions == not succeeding. The problem
1440 # should be noted in the servermap's list of problems.
1442 allproblems = [str(f) for f in servermap.problems]
1443 self.failUnlessIn(substring, "".join(allproblems))
1446 d1 = self._fn.download_version(servermap, ver,
1448 d1.addCallback(lambda new_contents:
1449 self.failUnlessEqual(new_contents, self.CONTENTS))
1451 d1 = self.shouldFail(NotEnoughSharesError,
1452 "_corrupt_all(offset=%s)" % (offset,),
1454 self._fn.download_version, servermap,
1458 d1.addCallback(failure_checker)
1459 d1.addCallback(lambda res: servermap)
1461 d.addCallback(_do_retrieve)
1464 def test_corrupt_all_verbyte(self):
1465 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1466 # error in unpack_share().
1467 d = self._test_corrupt_all(0, "UnknownVersionError")
1468 def _check_servermap(servermap):
1469 # and the dump should mention the problems
1471 dump = servermap.dump(s).getvalue()
1472 self.failUnless("30 PROBLEMS" in dump, dump)
1473 d.addCallback(_check_servermap)
1476 def test_corrupt_all_seqnum(self):
1477 # a corrupt sequence number will trigger a bad signature
1478 return self._test_corrupt_all(1, "signature is invalid")
1480 def test_corrupt_all_R(self):
1481 # a corrupt root hash will trigger a bad signature
1482 return self._test_corrupt_all(9, "signature is invalid")
1484 def test_corrupt_all_IV(self):
1485 # a corrupt salt/IV will trigger a bad signature
1486 return self._test_corrupt_all(41, "signature is invalid")
1488 def test_corrupt_all_k(self):
1489 # a corrupt 'k' will trigger a bad signature
1490 return self._test_corrupt_all(57, "signature is invalid")
1492 def test_corrupt_all_N(self):
1493 # a corrupt 'N' will trigger a bad signature
1494 return self._test_corrupt_all(58, "signature is invalid")
1496 def test_corrupt_all_segsize(self):
1497 # a corrupt segsize will trigger a bad signature
1498 return self._test_corrupt_all(59, "signature is invalid")
1500 def test_corrupt_all_datalen(self):
1501 # a corrupt data length will trigger a bad signature
1502 return self._test_corrupt_all(67, "signature is invalid")
1504 def test_corrupt_all_pubkey(self):
1505 # a corrupt pubkey won't match the URI's fingerprint. We need to
1506 # remove the pubkey from the filenode, or else it won't bother trying
1508 self._fn._pubkey = None
1509 return self._test_corrupt_all("pubkey",
1510 "pubkey doesn't match fingerprint")
1512 def test_corrupt_all_sig(self):
1513 # a corrupt signature is a bad one
1514 # the signature runs from about [543:799], depending upon the length
1516 return self._test_corrupt_all("signature", "signature is invalid")
1518 def test_corrupt_all_share_hash_chain_number(self):
1519 # a corrupt share hash chain entry will show up as a bad hash. If we
1520 # mangle the first byte, that will look like a bad hash number,
1521 # causing an IndexError
1522 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1524 def test_corrupt_all_share_hash_chain_hash(self):
1525 # a corrupt share hash chain entry will show up as a bad hash. If we
1526 # mangle a few bytes in, that will look like a bad hash.
1527 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1529 def test_corrupt_all_block_hash_tree(self):
1530 return self._test_corrupt_all("block_hash_tree",
1531 "block hash tree failure")
1533 def test_corrupt_all_block(self):
1534 return self._test_corrupt_all("share_data", "block hash tree failure")
1536 def test_corrupt_all_encprivkey(self):
1537 # a corrupted privkey won't even be noticed by the reader, only by a
1539 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1542 def test_corrupt_all_encprivkey_late(self):
1543 # this should work for the same reason as above, but we corrupt
1544 # after the servermap update to exercise the error handling
1546 # We need to remove the privkey from the node, or the retrieve
1547 # process won't know to update it.
1548 self._fn._privkey = None
1549 return self._test_corrupt_all("enc_privkey",
1550 None, # this shouldn't fail
1551 should_succeed=True,
1552 corrupt_early=False,
1556 def test_corrupt_all_seqnum_late(self):
1557 # corrupting the seqnum between mapupdate and retrieve should result
1558 # in NotEnoughSharesError, since each share will look invalid
1561 self.failUnless(f.check(NotEnoughSharesError))
1562 self.failUnless("uncoordinated write" in str(f))
1563 return self._test_corrupt_all(1, "ran out of peers",
1564 corrupt_early=False,
1565 failure_checker=_check)
1567 def test_corrupt_all_block_hash_tree_late(self):
1570 self.failUnless(f.check(NotEnoughSharesError))
1571 return self._test_corrupt_all("block_hash_tree",
1572 "block hash tree failure",
1573 corrupt_early=False,
1574 failure_checker=_check)
1577 def test_corrupt_all_block_late(self):
1580 self.failUnless(f.check(NotEnoughSharesError))
1581 return self._test_corrupt_all("share_data", "block hash tree failure",
1582 corrupt_early=False,
1583 failure_checker=_check)
1586 def test_basic_pubkey_at_end(self):
1587 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1588 # download to succeed but forcing a bunch of retries first. Note that
1589 # this is rather pessimistic: our Retrieve process will throw away
1590 # the whole share if the pubkey is bad, even though the rest of the
1591 # share might be good.
1593 self._fn._pubkey = None
1594 k = self._fn.get_required_shares()
1595 N = self._fn.get_total_shares()
1596 d = defer.succeed(None)
1597 d.addCallback(corrupt, self._storage, "pubkey",
1598 shnums_to_corrupt=range(0, N-k))
1599 d.addCallback(lambda res: self.make_servermap())
1600 def _do_retrieve(servermap):
1601 self.failUnless(servermap.problems)
1602 self.failUnless("pubkey doesn't match fingerprint"
1603 in str(servermap.problems[0]))
1604 ver = servermap.best_recoverable_version()
1605 r = Retrieve(self._fn, servermap, ver)
1606 c = consumer.MemoryConsumer()
1607 return r.download(c)
1608 d.addCallback(_do_retrieve)
1609 d.addCallback(lambda mc: "".join(mc.chunks))
1610 d.addCallback(lambda new_contents:
1611 self.failUnlessEqual(new_contents, self.CONTENTS))
1615 def _test_corrupt_some(self, offset, mdmf=False):
1617 d = self.publish_mdmf()
1619 d = defer.succeed(None)
1620 d.addCallback(lambda ignored:
1621 corrupt(None, self._storage, offset, range(5)))
1622 d.addCallback(lambda ignored:
1623 self.make_servermap())
1624 def _do_retrieve(servermap):
1625 ver = servermap.best_recoverable_version()
1626 self.failUnless(ver)
1627 return self._fn.download_best_version()
1628 d.addCallback(_do_retrieve)
1629 d.addCallback(lambda new_contents:
1630 self.failUnlessEqual(new_contents, self.CONTENTS))
1634 def test_corrupt_some(self):
1635 # corrupt the data of first five shares (so the servermap thinks
1636 # they're good but retrieve marks them as bad), so that the
1637 # MODE_READ set of 6 will be insufficient, forcing node.download to
1638 # retry with more servers.
1639 return self._test_corrupt_some("share_data")
1642 def test_download_fails(self):
1643 d = corrupt(None, self._storage, "signature")
1644 d.addCallback(lambda ignored:
1645 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1646 "no recoverable versions",
1647 self._fn.download_best_version))
1652 def test_corrupt_mdmf_block_hash_tree(self):
1653 d = self.publish_mdmf()
1654 d.addCallback(lambda ignored:
1655 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1656 "block hash tree failure",
1657 corrupt_early=False,
1658 should_succeed=False))
1662 def test_corrupt_mdmf_block_hash_tree_late(self):
1663 d = self.publish_mdmf()
1664 d.addCallback(lambda ignored:
1665 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1666 "block hash tree failure",
1668 should_succeed=False))
1672 def test_corrupt_mdmf_share_data(self):
1673 d = self.publish_mdmf()
1674 d.addCallback(lambda ignored:
1675 # TODO: Find out what the block size is and corrupt a
1676 # specific block, rather than just guessing.
1677 self._test_corrupt_all(("share_data", 12 * 40),
1678 "block hash tree failure",
1680 should_succeed=False))
1684 def test_corrupt_some_mdmf(self):
1685 return self._test_corrupt_some(("share_data", 12 * 40),
1690 def check_good(self, r, where):
1691 self.failUnless(r.is_healthy(), where)
1694 def check_bad(self, r, where):
1695 self.failIf(r.is_healthy(), where)
1698 def check_expected_failure(self, r, expected_exception, substring, where):
1699 for (peerid, storage_index, shnum, f) in r.problems:
1700 if f.check(expected_exception):
1701 self.failUnless(substring in str(f),
1702 "%s: substring '%s' not in '%s'" %
1703 (where, substring, str(f)))
1705 self.fail("%s: didn't see expected exception %s in problems %s" %
1706 (where, expected_exception, r.problems))
1709 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1711 return self.publish_one()
1714 def test_check_good(self):
1715 d = self._fn.check(Monitor())
1716 d.addCallback(self.check_good, "test_check_good")
1719 def test_check_mdmf_good(self):
1720 d = self.publish_mdmf()
1721 d.addCallback(lambda ignored:
1722 self._fn.check(Monitor()))
1723 d.addCallback(self.check_good, "test_check_mdmf_good")
1726 def test_check_no_shares(self):
1727 for shares in self._storage._peers.values():
1729 d = self._fn.check(Monitor())
1730 d.addCallback(self.check_bad, "test_check_no_shares")
1733 def test_check_mdmf_no_shares(self):
1734 d = self.publish_mdmf()
1736 for share in self._storage._peers.values():
1738 d.addCallback(_then)
1739 d.addCallback(lambda ignored:
1740 self._fn.check(Monitor()))
1741 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1744 def test_check_not_enough_shares(self):
1745 for shares in self._storage._peers.values():
1746 for shnum in shares.keys():
1749 d = self._fn.check(Monitor())
1750 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1753 def test_check_mdmf_not_enough_shares(self):
1754 d = self.publish_mdmf()
1756 for shares in self._storage._peers.values():
1757 for shnum in shares.keys():
1760 d.addCallback(_then)
1761 d.addCallback(lambda ignored:
1762 self._fn.check(Monitor()))
1763 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1767 def test_check_all_bad_sig(self):
1768 d = corrupt(None, self._storage, 1) # bad sig
1769 d.addCallback(lambda ignored:
1770 self._fn.check(Monitor()))
1771 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1774 def test_check_mdmf_all_bad_sig(self):
1775 d = self.publish_mdmf()
1776 d.addCallback(lambda ignored:
1777 corrupt(None, self._storage, 1))
1778 d.addCallback(lambda ignored:
1779 self._fn.check(Monitor()))
1780 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1783 def test_check_all_bad_blocks(self):
1784 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1785 # the Checker won't notice this.. it doesn't look at actual data
1786 d.addCallback(lambda ignored:
1787 self._fn.check(Monitor()))
1788 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1792 def test_check_mdmf_all_bad_blocks(self):
1793 d = self.publish_mdmf()
1794 d.addCallback(lambda ignored:
1795 corrupt(None, self._storage, "share_data"))
1796 d.addCallback(lambda ignored:
1797 self._fn.check(Monitor()))
1798 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1801 def test_verify_good(self):
1802 d = self._fn.check(Monitor(), verify=True)
1803 d.addCallback(self.check_good, "test_verify_good")
1805 test_verify_good.timeout = 15
1807 def test_verify_all_bad_sig(self):
1808 d = corrupt(None, self._storage, 1) # bad sig
1809 d.addCallback(lambda ignored:
1810 self._fn.check(Monitor(), verify=True))
1811 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1814 def test_verify_one_bad_sig(self):
1815 d = corrupt(None, self._storage, 1, [9]) # bad sig
1816 d.addCallback(lambda ignored:
1817 self._fn.check(Monitor(), verify=True))
1818 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1821 def test_verify_one_bad_block(self):
1822 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1823 # the Verifier *will* notice this, since it examines every byte
1824 d.addCallback(lambda ignored:
1825 self._fn.check(Monitor(), verify=True))
1826 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1827 d.addCallback(self.check_expected_failure,
1828 CorruptShareError, "block hash tree failure",
1829 "test_verify_one_bad_block")
1832 def test_verify_one_bad_sharehash(self):
1833 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1834 d.addCallback(lambda ignored:
1835 self._fn.check(Monitor(), verify=True))
1836 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1837 d.addCallback(self.check_expected_failure,
1838 CorruptShareError, "corrupt hashes",
1839 "test_verify_one_bad_sharehash")
1842 def test_verify_one_bad_encprivkey(self):
1843 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1844 d.addCallback(lambda ignored:
1845 self._fn.check(Monitor(), verify=True))
1846 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1847 d.addCallback(self.check_expected_failure,
1848 CorruptShareError, "invalid privkey",
1849 "test_verify_one_bad_encprivkey")
1852 def test_verify_one_bad_encprivkey_uncheckable(self):
1853 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1854 readonly_fn = self._fn.get_readonly()
1855 # a read-only node has no way to validate the privkey
1856 d.addCallback(lambda ignored:
1857 readonly_fn.check(Monitor(), verify=True))
1858 d.addCallback(self.check_good,
1859 "test_verify_one_bad_encprivkey_uncheckable")
1863 def test_verify_mdmf_good(self):
1864 d = self.publish_mdmf()
1865 d.addCallback(lambda ignored:
1866 self._fn.check(Monitor(), verify=True))
1867 d.addCallback(self.check_good, "test_verify_mdmf_good")
1871 def test_verify_mdmf_one_bad_block(self):
1872 d = self.publish_mdmf()
1873 d.addCallback(lambda ignored:
1874 corrupt(None, self._storage, "share_data", [1]))
1875 d.addCallback(lambda ignored:
1876 self._fn.check(Monitor(), verify=True))
1877 # We should find one bad block here
1878 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1879 d.addCallback(self.check_expected_failure,
1880 CorruptShareError, "block hash tree failure",
1881 "test_verify_mdmf_one_bad_block")
1885 def test_verify_mdmf_bad_encprivkey(self):
1886 d = self.publish_mdmf()
1887 d.addCallback(lambda ignored:
1888 corrupt(None, self._storage, "enc_privkey", [0]))
1889 d.addCallback(lambda ignored:
1890 self._fn.check(Monitor(), verify=True))
1891 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1892 d.addCallback(self.check_expected_failure,
1893 CorruptShareError, "privkey",
1894 "test_verify_mdmf_bad_encprivkey")
1898 def test_verify_mdmf_bad_sig(self):
1899 d = self.publish_mdmf()
1900 d.addCallback(lambda ignored:
1901 corrupt(None, self._storage, 1, [1]))
1902 d.addCallback(lambda ignored:
1903 self._fn.check(Monitor(), verify=True))
1904 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1908 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1909 d = self.publish_mdmf()
1910 d.addCallback(lambda ignored:
1911 corrupt(None, self._storage, "enc_privkey", [1]))
1912 d.addCallback(lambda ignored:
1913 self._fn.get_readonly())
1914 d.addCallback(lambda fn:
1915 fn.check(Monitor(), verify=True))
1916 d.addCallback(self.check_good,
1917 "test_verify_mdmf_bad_encprivkey_uncheckable")
1921 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1923 def get_shares(self, s):
1924 all_shares = {} # maps (peerid, shnum) to share data
1925 for peerid in s._peers:
1926 shares = s._peers[peerid]
1927 for shnum in shares:
1928 data = shares[shnum]
1929 all_shares[ (peerid, shnum) ] = data
1932 def copy_shares(self, ignored=None):
1933 self.old_shares.append(self.get_shares(self._storage))
1935 def test_repair_nop(self):
1936 self.old_shares = []
1937 d = self.publish_one()
1938 d.addCallback(self.copy_shares)
1939 d.addCallback(lambda res: self._fn.check(Monitor()))
1940 d.addCallback(lambda check_results: self._fn.repair(check_results))
1941 def _check_results(rres):
1942 self.failUnless(IRepairResults.providedBy(rres))
1943 self.failUnless(rres.get_successful())
1944 # TODO: examine results
1948 initial_shares = self.old_shares[0]
1949 new_shares = self.old_shares[1]
1950 # TODO: this really shouldn't change anything. When we implement
1951 # a "minimal-bandwidth" repairer", change this test to assert:
1952 #self.failUnlessEqual(new_shares, initial_shares)
1954 # all shares should be in the same place as before
1955 self.failUnlessEqual(set(initial_shares.keys()),
1956 set(new_shares.keys()))
1957 # but they should all be at a newer seqnum. The IV will be
1958 # different, so the roothash will be too.
1959 for key in initial_shares:
1964 k0, N0, segsize0, datalen0,
1965 o0) = unpack_header(initial_shares[key])
1970 k1, N1, segsize1, datalen1,
1971 o1) = unpack_header(new_shares[key])
1972 self.failUnlessEqual(version0, version1)
1973 self.failUnlessEqual(seqnum0+1, seqnum1)
1974 self.failUnlessEqual(k0, k1)
1975 self.failUnlessEqual(N0, N1)
1976 self.failUnlessEqual(segsize0, segsize1)
1977 self.failUnlessEqual(datalen0, datalen1)
1978 d.addCallback(_check_results)
1981 def failIfSharesChanged(self, ignored=None):
1982 old_shares = self.old_shares[-2]
1983 current_shares = self.old_shares[-1]
1984 self.failUnlessEqual(old_shares, current_shares)
1987 def test_unrepairable_0shares(self):
1988 d = self.publish_one()
1989 def _delete_all_shares(ign):
1990 shares = self._storage._peers
1991 for peerid in shares:
1993 d.addCallback(_delete_all_shares)
1994 d.addCallback(lambda ign: self._fn.check(Monitor()))
1995 d.addCallback(lambda check_results: self._fn.repair(check_results))
1997 self.failUnlessEqual(crr.get_successful(), False)
1998 d.addCallback(_check)
2001 def test_mdmf_unrepairable_0shares(self):
2002 d = self.publish_mdmf()
2003 def _delete_all_shares(ign):
2004 shares = self._storage._peers
2005 for peerid in shares:
2007 d.addCallback(_delete_all_shares)
2008 d.addCallback(lambda ign: self._fn.check(Monitor()))
2009 d.addCallback(lambda check_results: self._fn.repair(check_results))
2010 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2014 def test_unrepairable_1share(self):
2015 d = self.publish_one()
2016 def _delete_all_shares(ign):
2017 shares = self._storage._peers
2018 for peerid in shares:
2019 for shnum in list(shares[peerid]):
2021 del shares[peerid][shnum]
2022 d.addCallback(_delete_all_shares)
2023 d.addCallback(lambda ign: self._fn.check(Monitor()))
2024 d.addCallback(lambda check_results: self._fn.repair(check_results))
2026 self.failUnlessEqual(crr.get_successful(), False)
2027 d.addCallback(_check)
2030 def test_mdmf_unrepairable_1share(self):
2031 d = self.publish_mdmf()
2032 def _delete_all_shares(ign):
2033 shares = self._storage._peers
2034 for peerid in shares:
2035 for shnum in list(shares[peerid]):
2037 del shares[peerid][shnum]
2038 d.addCallback(_delete_all_shares)
2039 d.addCallback(lambda ign: self._fn.check(Monitor()))
2040 d.addCallback(lambda check_results: self._fn.repair(check_results))
2042 self.failUnlessEqual(crr.get_successful(), False)
2043 d.addCallback(_check)
2046 def test_repairable_5shares(self):
2047 d = self.publish_mdmf()
2048 def _delete_all_shares(ign):
2049 shares = self._storage._peers
2050 for peerid in shares:
2051 for shnum in list(shares[peerid]):
2053 del shares[peerid][shnum]
2054 d.addCallback(_delete_all_shares)
2055 d.addCallback(lambda ign: self._fn.check(Monitor()))
2056 d.addCallback(lambda check_results: self._fn.repair(check_results))
2058 self.failUnlessEqual(crr.get_successful(), True)
2059 d.addCallback(_check)
2062 def test_mdmf_repairable_5shares(self):
2063 d = self.publish_mdmf()
2064 def _delete_some_shares(ign):
2065 shares = self._storage._peers
2066 for peerid in shares:
2067 for shnum in list(shares[peerid]):
2069 del shares[peerid][shnum]
2070 d.addCallback(_delete_some_shares)
2071 d.addCallback(lambda ign: self._fn.check(Monitor()))
2073 self.failIf(cr.is_healthy())
2074 self.failUnless(cr.is_recoverable())
2076 d.addCallback(_check)
2077 d.addCallback(lambda check_results: self._fn.repair(check_results))
2079 self.failUnlessEqual(crr.get_successful(), True)
2080 d.addCallback(_check1)
2084 def test_merge(self):
2085 self.old_shares = []
2086 d = self.publish_multiple()
2087 # repair will refuse to merge multiple highest seqnums unless you
2089 d.addCallback(lambda res:
2090 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2091 1:4,3:4,5:4,7:4,9:4}))
2092 d.addCallback(self.copy_shares)
2093 d.addCallback(lambda res: self._fn.check(Monitor()))
2094 def _try_repair(check_results):
2095 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2096 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2097 self._fn.repair, check_results)
2098 d2.addCallback(self.copy_shares)
2099 d2.addCallback(self.failIfSharesChanged)
2100 d2.addCallback(lambda res: check_results)
2102 d.addCallback(_try_repair)
2103 d.addCallback(lambda check_results:
2104 self._fn.repair(check_results, force=True))
2105 # this should give us 10 shares of the highest roothash
2106 def _check_repair_results(rres):
2107 self.failUnless(rres.get_successful())
2109 d.addCallback(_check_repair_results)
2110 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2111 def _check_smap(smap):
2112 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2113 self.failIf(smap.unrecoverable_versions())
2114 # now, which should have won?
2115 roothash_s4a = self.get_roothash_for(3)
2116 roothash_s4b = self.get_roothash_for(4)
2117 if roothash_s4b > roothash_s4a:
2118 expected_contents = self.CONTENTS[4]
2120 expected_contents = self.CONTENTS[3]
2121 new_versionid = smap.best_recoverable_version()
2122 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2123 d2 = self._fn.download_version(smap, new_versionid)
2124 d2.addCallback(self.failUnlessEqual, expected_contents)
2126 d.addCallback(_check_smap)
2129 def test_non_merge(self):
2130 self.old_shares = []
2131 d = self.publish_multiple()
2132 # repair should not refuse a repair that doesn't need to merge. In
2133 # this case, we combine v2 with v3. The repair should ignore v2 and
2134 # copy v3 into a new v5.
2135 d.addCallback(lambda res:
2136 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2137 1:3,3:3,5:3,7:3,9:3}))
2138 d.addCallback(lambda res: self._fn.check(Monitor()))
2139 d.addCallback(lambda check_results: self._fn.repair(check_results))
2140 # this should give us 10 shares of v3
2141 def _check_repair_results(rres):
2142 self.failUnless(rres.get_successful())
2144 d.addCallback(_check_repair_results)
2145 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2146 def _check_smap(smap):
2147 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2148 self.failIf(smap.unrecoverable_versions())
2149 # now, which should have won?
2150 expected_contents = self.CONTENTS[3]
2151 new_versionid = smap.best_recoverable_version()
2152 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2153 d2 = self._fn.download_version(smap, new_versionid)
2154 d2.addCallback(self.failUnlessEqual, expected_contents)
2156 d.addCallback(_check_smap)
2159 def get_roothash_for(self, index):
2160 # return the roothash for the first share we see in the saved set
2161 shares = self._copied_shares[index]
2162 for peerid in shares:
2163 for shnum in shares[peerid]:
2164 share = shares[peerid][shnum]
2165 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2166 unpack_header(share)
2169 def test_check_and_repair_readcap(self):
2170 # we can't currently repair from a mutable readcap: #625
2171 self.old_shares = []
2172 d = self.publish_one()
2173 d.addCallback(self.copy_shares)
2174 def _get_readcap(res):
2175 self._fn3 = self._fn.get_readonly()
2176 # also delete some shares
2177 for peerid,shares in self._storage._peers.items():
2179 d.addCallback(_get_readcap)
2180 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2181 def _check_results(crr):
2182 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2183 # we should detect the unhealthy, but skip over mutable-readcap
2184 # repairs until #625 is fixed
2185 self.failIf(crr.get_pre_repair_results().is_healthy())
2186 self.failIf(crr.get_repair_attempted())
2187 self.failIf(crr.get_post_repair_results().is_healthy())
2188 d.addCallback(_check_results)
2191 class DevNullDictionary(dict):
2192 def __setitem__(self, key, value):
2195 class MultipleEncodings(unittest.TestCase):
2197 self.CONTENTS = "New contents go here"
2198 self.uploadable = MutableData(self.CONTENTS)
2199 self._storage = FakeStorage()
2200 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2201 self._storage_broker = self._nodemaker.storage_broker
2202 d = self._nodemaker.create_mutable_file(self.uploadable)
2205 d.addCallback(_created)
2208 def _encode(self, k, n, data, version=SDMF_VERSION):
2209 # encode 'data' into a peerid->shares dict.
2212 # disable the nodecache, since for these tests we explicitly need
2213 # multiple nodes pointing at the same file
2214 self._nodemaker._node_cache = DevNullDictionary()
2215 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2216 # then we copy over other fields that are normally fetched from the
2218 fn2._pubkey = fn._pubkey
2219 fn2._privkey = fn._privkey
2220 fn2._encprivkey = fn._encprivkey
2221 # and set the encoding parameters to something completely different
2222 fn2._required_shares = k
2223 fn2._total_shares = n
2226 s._peers = {} # clear existing storage
2227 p2 = Publish(fn2, self._storage_broker, None)
2228 uploadable = MutableData(data)
2229 d = p2.publish(uploadable)
2230 def _published(res):
2234 d.addCallback(_published)
2237 def make_servermap(self, mode=MODE_READ, oldmap=None):
2239 oldmap = ServerMap()
2240 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2245 def test_multiple_encodings(self):
2246 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2247 # then mix up the shares, to make sure that download survives seeing
2248 # a variety of encodings. This is actually kind of tricky to set up.
2250 contents1 = "Contents for encoding 1 (3-of-10) go here"
2251 contents2 = "Contents for encoding 2 (4-of-9) go here"
2252 contents3 = "Contents for encoding 3 (4-of-7) go here"
2254 # we make a retrieval object that doesn't know what encoding
2256 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2258 # now we upload a file through fn1, and grab its shares
2259 d = self._encode(3, 10, contents1)
2260 def _encoded_1(shares):
2261 self._shares1 = shares
2262 d.addCallback(_encoded_1)
2263 d.addCallback(lambda res: self._encode(4, 9, contents2))
2264 def _encoded_2(shares):
2265 self._shares2 = shares
2266 d.addCallback(_encoded_2)
2267 d.addCallback(lambda res: self._encode(4, 7, contents3))
2268 def _encoded_3(shares):
2269 self._shares3 = shares
2270 d.addCallback(_encoded_3)
2273 log.msg("merging sharelists")
2274 # we merge the shares from the two sets, leaving each shnum in
2275 # its original location, but using a share from set1 or set2
2276 # according to the following sequence:
2287 # so that neither form can be recovered until fetch [f], at which
2288 # point version-s1 (the 3-of-10 form) should be recoverable. If
2289 # the implementation latches on to the first version it sees,
2290 # then s2 will be recoverable at fetch [g].
2292 # Later, when we implement code that handles multiple versions,
2293 # we can use this framework to assert that all recoverable
2294 # versions are retrieved, and test that 'epsilon' does its job
2296 places = [2, 2, 3, 2, 1, 1, 1, 2]
2299 sb = self._storage_broker
2301 for peerid in sorted(sb.get_all_serverids()):
2302 for shnum in self._shares1.get(peerid, {}):
2303 if shnum < len(places):
2304 which = places[shnum]
2307 self._storage._peers[peerid] = peers = {}
2308 in_1 = shnum in self._shares1[peerid]
2309 in_2 = shnum in self._shares2.get(peerid, {})
2310 in_3 = shnum in self._shares3.get(peerid, {})
2313 peers[shnum] = self._shares1[peerid][shnum]
2314 sharemap[shnum] = peerid
2317 peers[shnum] = self._shares2[peerid][shnum]
2318 sharemap[shnum] = peerid
2321 peers[shnum] = self._shares3[peerid][shnum]
2322 sharemap[shnum] = peerid
2324 # we don't bother placing any other shares
2325 # now sort the sequence so that share 0 is returned first
2326 new_sequence = [sharemap[shnum]
2327 for shnum in sorted(sharemap.keys())]
2328 self._storage._sequence = new_sequence
2329 log.msg("merge done")
2330 d.addCallback(_merge)
2331 d.addCallback(lambda res: fn3.download_best_version())
2332 def _retrieved(new_contents):
2333 # the current specified behavior is "first version recoverable"
2334 self.failUnlessEqual(new_contents, contents1)
2335 d.addCallback(_retrieved)
2339 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2342 return self.publish_multiple()
2344 def test_multiple_versions(self):
2345 # if we see a mix of versions in the grid, download_best_version
2346 # should get the latest one
2347 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2348 d = self._fn.download_best_version()
2349 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2350 # and the checker should report problems
2351 d.addCallback(lambda res: self._fn.check(Monitor()))
2352 d.addCallback(self.check_bad, "test_multiple_versions")
2354 # but if everything is at version 2, that's what we should download
2355 d.addCallback(lambda res:
2356 self._set_versions(dict([(i,2) for i in range(10)])))
2357 d.addCallback(lambda res: self._fn.download_best_version())
2358 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2359 # if exactly one share is at version 3, we should still get v2
2360 d.addCallback(lambda res:
2361 self._set_versions({0:3}))
2362 d.addCallback(lambda res: self._fn.download_best_version())
2363 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2364 # but the servermap should see the unrecoverable version. This
2365 # depends upon the single newer share being queried early.
2366 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2367 def _check_smap(smap):
2368 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2369 newer = smap.unrecoverable_newer_versions()
2370 self.failUnlessEqual(len(newer), 1)
2371 verinfo, health = newer.items()[0]
2372 self.failUnlessEqual(verinfo[0], 4)
2373 self.failUnlessEqual(health, (1,3))
2374 self.failIf(smap.needs_merge())
2375 d.addCallback(_check_smap)
2376 # if we have a mix of two parallel versions (s4a and s4b), we could
2378 d.addCallback(lambda res:
2379 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2380 1:4,3:4,5:4,7:4,9:4}))
2381 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2382 def _check_smap_mixed(smap):
2383 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2384 newer = smap.unrecoverable_newer_versions()
2385 self.failUnlessEqual(len(newer), 0)
2386 self.failUnless(smap.needs_merge())
2387 d.addCallback(_check_smap_mixed)
2388 d.addCallback(lambda res: self._fn.download_best_version())
2389 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2390 res == self.CONTENTS[4]))
2393 def test_replace(self):
2394 # if we see a mix of versions in the grid, we should be able to
2395 # replace them all with a newer version
2397 # if exactly one share is at version 3, we should download (and
2398 # replace) v2, and the result should be v4. Note that the index we
2399 # give to _set_versions is different than the sequence number.
2400 target = dict([(i,2) for i in range(10)]) # seqnum3
2401 target[0] = 3 # seqnum4
2402 self._set_versions(target)
2404 def _modify(oldversion, servermap, first_time):
2405 return oldversion + " modified"
2406 d = self._fn.modify(_modify)
2407 d.addCallback(lambda res: self._fn.download_best_version())
2408 expected = self.CONTENTS[2] + " modified"
2409 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2410 # and the servermap should indicate that the outlier was replaced too
2411 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2412 def _check_smap(smap):
2413 self.failUnlessEqual(smap.highest_seqnum(), 5)
2414 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2415 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2416 d.addCallback(_check_smap)
2420 class Utils(unittest.TestCase):
2421 def test_cache(self):
2423 # xdata = base62.b2a(os.urandom(100))[:100]
2424 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2425 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2426 c.add("v1", 1, 0, xdata)
2427 c.add("v1", 1, 2000, ydata)
2428 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2429 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2430 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2431 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2432 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2433 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2434 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2435 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2436 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2437 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2438 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2439 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2440 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2441 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2442 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2443 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2444 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2445 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2447 # test joining fragments
2449 c.add("v1", 1, 0, xdata[:10])
2450 c.add("v1", 1, 10, xdata[10:20])
2451 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2453 class Exceptions(unittest.TestCase):
2454 def test_repr(self):
2455 nmde = NeedMoreDataError(100, 50, 100)
2456 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2457 ucwe = UncoordinatedWriteError()
2458 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2460 class SameKeyGenerator:
2461 def __init__(self, pubkey, privkey):
2462 self.pubkey = pubkey
2463 self.privkey = privkey
2464 def generate(self, keysize=None):
2465 return defer.succeed( (self.pubkey, self.privkey) )
2467 class FirstServerGetsKilled:
2469 def notify(self, retval, wrapper, methname):
2471 wrapper.broken = True
2475 class FirstServerGetsDeleted:
2478 self.silenced = None
2479 def notify(self, retval, wrapper, methname):
2481 # this query will work, but later queries should think the share
2484 self.silenced = wrapper
2486 if wrapper == self.silenced:
2487 assert methname == "slot_testv_and_readv_and_writev"
2491 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2492 def test_publish_surprise(self):
2493 self.basedir = "mutable/Problems/test_publish_surprise"
2495 nm = self.g.clients[0].nodemaker
2496 d = nm.create_mutable_file(MutableData("contents 1"))
2498 d = defer.succeed(None)
2499 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2500 def _got_smap1(smap):
2501 # stash the old state of the file
2503 d.addCallback(_got_smap1)
2504 # then modify the file, leaving the old map untouched
2505 d.addCallback(lambda res: log.msg("starting winning write"))
2506 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2507 # now attempt to modify the file with the old servermap. This
2508 # will look just like an uncoordinated write, in which every
2509 # single share got updated between our mapupdate and our publish
2510 d.addCallback(lambda res: log.msg("starting doomed write"))
2511 d.addCallback(lambda res:
2512 self.shouldFail(UncoordinatedWriteError,
2513 "test_publish_surprise", None,
2515 MutableData("contents 2a"), self.old_map))
2517 d.addCallback(_created)
2520 def test_retrieve_surprise(self):
2521 self.basedir = "mutable/Problems/test_retrieve_surprise"
2523 nm = self.g.clients[0].nodemaker
2524 d = nm.create_mutable_file(MutableData("contents 1"))
2526 d = defer.succeed(None)
2527 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2528 def _got_smap1(smap):
2529 # stash the old state of the file
2531 d.addCallback(_got_smap1)
2532 # then modify the file, leaving the old map untouched
2533 d.addCallback(lambda res: log.msg("starting winning write"))
2534 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2535 # now attempt to retrieve the old version with the old servermap.
2536 # This will look like someone has changed the file since we
2537 # updated the servermap.
2538 d.addCallback(lambda res: n._cache._clear())
2539 d.addCallback(lambda res: log.msg("starting doomed read"))
2540 d.addCallback(lambda res:
2541 self.shouldFail(NotEnoughSharesError,
2542 "test_retrieve_surprise",
2543 "ran out of peers: have 0 of 1",
2546 self.old_map.best_recoverable_version(),
2549 d.addCallback(_created)
2553 def test_unexpected_shares(self):
2554 # upload the file, take a servermap, shut down one of the servers,
2555 # upload it again (causing shares to appear on a new server), then
2556 # upload using the old servermap. The last upload should fail with an
2557 # UncoordinatedWriteError, because of the shares that didn't appear
2559 self.basedir = "mutable/Problems/test_unexpected_shares"
2561 nm = self.g.clients[0].nodemaker
2562 d = nm.create_mutable_file(MutableData("contents 1"))
2564 d = defer.succeed(None)
2565 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2566 def _got_smap1(smap):
2567 # stash the old state of the file
2569 # now shut down one of the servers
2570 peer0 = list(smap.make_sharemap()[0])[0]
2571 self.g.remove_server(peer0)
2572 # then modify the file, leaving the old map untouched
2573 log.msg("starting winning write")
2574 return n.overwrite(MutableData("contents 2"))
2575 d.addCallback(_got_smap1)
2576 # now attempt to modify the file with the old servermap. This
2577 # will look just like an uncoordinated write, in which every
2578 # single share got updated between our mapupdate and our publish
2579 d.addCallback(lambda res: log.msg("starting doomed write"))
2580 d.addCallback(lambda res:
2581 self.shouldFail(UncoordinatedWriteError,
2582 "test_surprise", None,
2584 MutableData("contents 2a"), self.old_map))
2586 d.addCallback(_created)
2588 test_unexpected_shares.timeout = 15
2590 def test_bad_server(self):
2591 # Break one server, then create the file: the initial publish should
2592 # complete with an alternate server. Breaking a second server should
2593 # not prevent an update from succeeding either.
2594 self.basedir = "mutable/Problems/test_bad_server"
2596 nm = self.g.clients[0].nodemaker
2598 # to make sure that one of the initial peers is broken, we have to
2599 # get creative. We create an RSA key and compute its storage-index.
2600 # Then we make a KeyGenerator that always returns that one key, and
2601 # use it to create the mutable file. This will get easier when we can
2602 # use #467 static-server-selection to disable permutation and force
2603 # the choice of server for share[0].
2605 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2606 def _got_key( (pubkey, privkey) ):
2607 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2608 pubkey_s = pubkey.serialize()
2609 privkey_s = privkey.serialize()
2610 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2611 ssk_pubkey_fingerprint_hash(pubkey_s))
2612 self._storage_index = u.get_storage_index()
2613 d.addCallback(_got_key)
2614 def _break_peer0(res):
2615 si = self._storage_index
2616 servers = nm.storage_broker.get_servers_for_psi(si)
2617 self.g.break_server(servers[0].get_serverid())
2618 self.server1 = servers[1]
2619 d.addCallback(_break_peer0)
2620 # now "create" the file, using the pre-established key, and let the
2621 # initial publish finally happen
2622 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2623 # that ought to work
2625 d = n.download_best_version()
2626 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2627 # now break the second peer
2628 def _break_peer1(res):
2629 self.g.break_server(self.server1.get_serverid())
2630 d.addCallback(_break_peer1)
2631 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2632 # that ought to work too
2633 d.addCallback(lambda res: n.download_best_version())
2634 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2635 def _explain_error(f):
2637 if f.check(NotEnoughServersError):
2638 print "first_error:", f.value.first_error
2640 d.addErrback(_explain_error)
2642 d.addCallback(_got_node)
2645 def test_bad_server_overlap(self):
2646 # like test_bad_server, but with no extra unused servers to fall back
2647 # upon. This means that we must re-use a server which we've already
2648 # used. If we don't remember the fact that we sent them one share
2649 # already, we'll mistakenly think we're experiencing an
2650 # UncoordinatedWriteError.
2652 # Break one server, then create the file: the initial publish should
2653 # complete with an alternate server. Breaking a second server should
2654 # not prevent an update from succeeding either.
2655 self.basedir = "mutable/Problems/test_bad_server_overlap"
2657 nm = self.g.clients[0].nodemaker
2658 sb = nm.storage_broker
2660 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2661 self.g.break_server(peerids[0])
2663 d = nm.create_mutable_file(MutableData("contents 1"))
2665 d = n.download_best_version()
2666 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2667 # now break one of the remaining servers
2668 def _break_second_server(res):
2669 self.g.break_server(peerids[1])
2670 d.addCallback(_break_second_server)
2671 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2672 # that ought to work too
2673 d.addCallback(lambda res: n.download_best_version())
2674 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2676 d.addCallback(_created)
2679 def test_publish_all_servers_bad(self):
2680 # Break all servers: the publish should fail
2681 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2683 nm = self.g.clients[0].nodemaker
2684 for s in nm.storage_broker.get_connected_servers():
2685 s.get_rref().broken = True
2687 d = self.shouldFail(NotEnoughServersError,
2688 "test_publish_all_servers_bad",
2689 "ran out of good servers",
2690 nm.create_mutable_file, MutableData("contents"))
2693 def test_publish_no_servers(self):
2694 # no servers at all: the publish should fail
2695 self.basedir = "mutable/Problems/test_publish_no_servers"
2696 self.set_up_grid(num_servers=0)
2697 nm = self.g.clients[0].nodemaker
2699 d = self.shouldFail(NotEnoughServersError,
2700 "test_publish_no_servers",
2701 "Ran out of non-bad servers",
2702 nm.create_mutable_file, MutableData("contents"))
2704 test_publish_no_servers.timeout = 30
2707 def test_privkey_query_error(self):
2708 # when a servermap is updated with MODE_WRITE, it tries to get the
2709 # privkey. Something might go wrong during this query attempt.
2710 # Exercise the code in _privkey_query_failed which tries to handle
2712 self.basedir = "mutable/Problems/test_privkey_query_error"
2713 self.set_up_grid(num_servers=20)
2714 nm = self.g.clients[0].nodemaker
2715 nm._node_cache = DevNullDictionary() # disable the nodecache
2717 # we need some contents that are large enough to push the privkey out
2718 # of the early part of the file
2719 LARGE = "These are Larger contents" * 2000 # about 50KB
2720 LARGE_uploadable = MutableData(LARGE)
2721 d = nm.create_mutable_file(LARGE_uploadable)
2723 self.uri = n.get_uri()
2724 self.n2 = nm.create_from_cap(self.uri)
2726 # When a mapupdate is performed on a node that doesn't yet know
2727 # the privkey, a short read is sent to a batch of servers, to get
2728 # the verinfo and (hopefully, if the file is short enough) the
2729 # encprivkey. Our file is too large to let this first read
2730 # contain the encprivkey. Each non-encprivkey-bearing response
2731 # that arrives (until the node gets the encprivkey) will trigger
2732 # a second read to specifically read the encprivkey.
2734 # So, to exercise this case:
2735 # 1. notice which server gets a read() call first
2736 # 2. tell that server to start throwing errors
2737 killer = FirstServerGetsKilled()
2738 for s in nm.storage_broker.get_connected_servers():
2739 s.get_rref().post_call_notifier = killer.notify
2740 d.addCallback(_created)
2742 # now we update a servermap from a new node (which doesn't have the
2743 # privkey yet, forcing it to use a separate privkey query). Note that
2744 # the map-update will succeed, since we'll just get a copy from one
2745 # of the other shares.
2746 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2750 def test_privkey_query_missing(self):
2751 # like test_privkey_query_error, but the shares are deleted by the
2752 # second query, instead of raising an exception.
2753 self.basedir = "mutable/Problems/test_privkey_query_missing"
2754 self.set_up_grid(num_servers=20)
2755 nm = self.g.clients[0].nodemaker
2756 LARGE = "These are Larger contents" * 2000 # about 50KiB
2757 LARGE_uploadable = MutableData(LARGE)
2758 nm._node_cache = DevNullDictionary() # disable the nodecache
2760 d = nm.create_mutable_file(LARGE_uploadable)
2762 self.uri = n.get_uri()
2763 self.n2 = nm.create_from_cap(self.uri)
2764 deleter = FirstServerGetsDeleted()
2765 for s in nm.storage_broker.get_connected_servers():
2766 s.get_rref().post_call_notifier = deleter.notify
2767 d.addCallback(_created)
2768 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2772 def test_block_and_hash_query_error(self):
2773 # This tests for what happens when a query to a remote server
2774 # fails in either the hash validation step or the block getting
2775 # step (because of batching, this is the same actual query).
2776 # We need to have the storage server persist up until the point
2777 # that its prefix is validated, then suddenly die. This
2778 # exercises some exception handling code in Retrieve.
2779 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2780 self.set_up_grid(num_servers=20)
2781 nm = self.g.clients[0].nodemaker
2782 CONTENTS = "contents" * 2000
2783 CONTENTS_uploadable = MutableData(CONTENTS)
2784 d = nm.create_mutable_file(CONTENTS_uploadable)
2787 d.addCallback(_created)
2788 d.addCallback(lambda ignored:
2789 self._node.get_servermap(MODE_READ))
2790 def _then(servermap):
2791 # we have our servermap. Now we set up the servers like the
2792 # tests above -- the first one that gets a read call should
2793 # start throwing errors, but only after returning its prefix
2794 # for validation. Since we'll download without fetching the
2795 # private key, the next query to the remote server will be
2796 # for either a block and salt or for hashes, either of which
2797 # will exercise the error handling code.
2798 killer = FirstServerGetsKilled()
2799 for s in nm.storage_broker.get_connected_servers():
2800 s.get_rref().post_call_notifier = killer.notify
2801 ver = servermap.best_recoverable_version()
2803 return self._node.download_version(servermap, ver)
2804 d.addCallback(_then)
2805 d.addCallback(lambda data:
2806 self.failUnlessEqual(data, CONTENTS))
2810 class FileHandle(unittest.TestCase):
2812 self.test_data = "Test Data" * 50000
2813 self.sio = StringIO(self.test_data)
2814 self.uploadable = MutableFileHandle(self.sio)
2817 def test_filehandle_read(self):
2818 self.basedir = "mutable/FileHandle/test_filehandle_read"
2820 for i in xrange(0, len(self.test_data), chunk_size):
2821 data = self.uploadable.read(chunk_size)
2822 data = "".join(data)
2824 end = i + chunk_size
2825 self.failUnlessEqual(data, self.test_data[start:end])
2828 def test_filehandle_get_size(self):
2829 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2830 actual_size = len(self.test_data)
2831 size = self.uploadable.get_size()
2832 self.failUnlessEqual(size, actual_size)
2835 def test_filehandle_get_size_out_of_order(self):
2836 # We should be able to call get_size whenever we want without
2837 # disturbing the location of the seek pointer.
2839 data = self.uploadable.read(chunk_size)
2840 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2843 size = self.uploadable.get_size()
2844 self.failUnlessEqual(size, len(self.test_data))
2846 # Now get more data. We should be right where we left off.
2847 more_data = self.uploadable.read(chunk_size)
2849 end = chunk_size * 2
2850 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2853 def test_filehandle_file(self):
2854 # Make sure that the MutableFileHandle works on a file as well
2855 # as a StringIO object, since in some cases it will be asked to
2857 self.basedir = self.mktemp()
2858 # necessary? What am I doing wrong here?
2859 os.mkdir(self.basedir)
2860 f_path = os.path.join(self.basedir, "test_file")
2861 f = open(f_path, "w")
2862 f.write(self.test_data)
2864 f = open(f_path, "r")
2866 uploadable = MutableFileHandle(f)
2868 data = uploadable.read(len(self.test_data))
2869 self.failUnlessEqual("".join(data), self.test_data)
2870 size = uploadable.get_size()
2871 self.failUnlessEqual(size, len(self.test_data))
2874 def test_close(self):
2875 # Make sure that the MutableFileHandle closes its handle when
2877 self.uploadable.close()
2878 self.failUnless(self.sio.closed)
2881 class DataHandle(unittest.TestCase):
2883 self.test_data = "Test Data" * 50000
2884 self.uploadable = MutableData(self.test_data)
2887 def test_datahandle_read(self):
2889 for i in xrange(0, len(self.test_data), chunk_size):
2890 data = self.uploadable.read(chunk_size)
2891 data = "".join(data)
2893 end = i + chunk_size
2894 self.failUnlessEqual(data, self.test_data[start:end])
2897 def test_datahandle_get_size(self):
2898 actual_size = len(self.test_data)
2899 size = self.uploadable.get_size()
2900 self.failUnlessEqual(size, actual_size)
2903 def test_datahandle_get_size_out_of_order(self):
2904 # We should be able to call get_size whenever we want without
2905 # disturbing the location of the seek pointer.
2907 data = self.uploadable.read(chunk_size)
2908 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2911 size = self.uploadable.get_size()
2912 self.failUnlessEqual(size, len(self.test_data))
2914 # Now get more data. We should be right where we left off.
2915 more_data = self.uploadable.read(chunk_size)
2917 end = chunk_size * 2
2918 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2921 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2924 GridTestMixin.setUp(self)
2925 self.basedir = self.mktemp()
2927 self.c = self.g.clients[0]
2928 self.nm = self.c.nodemaker
2929 self.data = "test data" * 100000 # about 900 KiB; MDMF
2930 self.small_data = "test data" * 10 # about 90 B; SDMF
2931 return self.do_upload()
2934 def do_upload(self):
2935 d1 = self.nm.create_mutable_file(MutableData(self.data),
2936 version=MDMF_VERSION)
2937 d2 = self.nm.create_mutable_file(MutableData(self.small_data))
2938 dl = gatherResults([d1, d2])
2939 def _then((n1, n2)):
2940 assert isinstance(n1, MutableFileNode)
2941 assert isinstance(n2, MutableFileNode)
2945 dl.addCallback(_then)
2949 def test_get_readonly_mutable_version(self):
2950 # Attempting to get a mutable version of a mutable file from a
2951 # filenode initialized with a readcap should return a readonly
2952 # version of that same node.
2953 ro = self.mdmf_node.get_readonly()
2954 d = ro.get_best_mutable_version()
2955 d.addCallback(lambda version:
2956 self.failUnless(version.is_readonly()))
2957 d.addCallback(lambda ignored:
2958 self.sdmf_node.get_readonly())
2959 d.addCallback(lambda version:
2960 self.failUnless(version.is_readonly()))
2964 def test_get_sequence_number(self):
2965 d = self.mdmf_node.get_best_readable_version()
2966 d.addCallback(lambda bv:
2967 self.failUnlessEqual(bv.get_sequence_number(), 1))
2968 d.addCallback(lambda ignored:
2969 self.sdmf_node.get_best_readable_version())
2970 d.addCallback(lambda bv:
2971 self.failUnlessEqual(bv.get_sequence_number(), 1))
2972 # Now update. The sequence number in both cases should be 1 in
2974 def _do_update(ignored):
2975 new_data = MutableData("foo bar baz" * 100000)
2976 new_small_data = MutableData("foo bar baz" * 10)
2977 d1 = self.mdmf_node.overwrite(new_data)
2978 d2 = self.sdmf_node.overwrite(new_small_data)
2979 dl = gatherResults([d1, d2])
2981 d.addCallback(_do_update)
2982 d.addCallback(lambda ignored:
2983 self.mdmf_node.get_best_readable_version())
2984 d.addCallback(lambda bv:
2985 self.failUnlessEqual(bv.get_sequence_number(), 2))
2986 d.addCallback(lambda ignored:
2987 self.sdmf_node.get_best_readable_version())
2988 d.addCallback(lambda bv:
2989 self.failUnlessEqual(bv.get_sequence_number(), 2))
2993 def test_version_extension_api(self):
2994 # We need to define an API by which an uploader can set the
2995 # extension parameters, and by which a downloader can retrieve
2997 d = self.mdmf_node.get_best_mutable_version()
2998 def _got_version(version):
2999 hints = version.get_downloader_hints()
3000 # Should be empty at this point.
3001 self.failUnlessIn("k", hints)
3002 self.failUnlessEqual(hints['k'], 3)
3003 self.failUnlessIn('segsize', hints)
3004 self.failUnlessEqual(hints['segsize'], 131073)
3005 d.addCallback(_got_version)
3009 def test_extensions_from_cap(self):
3010 # If we initialize a mutable file with a cap that has extension
3011 # parameters in it and then grab the extension parameters using
3012 # our API, we should see that they're set correctly.
3013 mdmf_uri = self.mdmf_node.get_uri()
3014 new_node = self.nm.create_from_cap(mdmf_uri)
3015 d = new_node.get_best_mutable_version()
3016 def _got_version(version):
3017 hints = version.get_downloader_hints()
3018 self.failUnlessIn("k", hints)
3019 self.failUnlessEqual(hints["k"], 3)
3020 self.failUnlessIn("segsize", hints)
3021 self.failUnlessEqual(hints["segsize"], 131073)
3022 d.addCallback(_got_version)
3026 def test_extensions_from_upload(self):
3027 # If we create a new mutable file with some contents, we should
3028 # get back an MDMF cap with the right hints in place.
3029 contents = "foo bar baz" * 100000
3030 d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3031 def _got_mutable_file(n):
3032 rw_uri = n.get_uri()
3033 expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3034 self.failUnlessIn(expected_k, rw_uri)
3035 # XXX: Get this more intelligently.
3036 self.failUnlessIn("131073", rw_uri)
3038 ro_uri = n.get_readonly_uri()
3039 self.failUnlessIn(expected_k, ro_uri)
3040 self.failUnlessIn("131073", ro_uri)
3041 d.addCallback(_got_mutable_file)
3045 def test_cap_after_upload(self):
3046 # If we create a new mutable file and upload things to it, and
3047 # it's an MDMF file, we should get an MDMF cap back from that
3048 # file and should be able to use that.
3049 # That's essentially what MDMF node is, so just check that.
3050 mdmf_uri = self.mdmf_node.get_uri()
3051 cap = uri.from_string(mdmf_uri)
3052 self.failUnless(isinstance(cap, uri.WritableMDMFFileURI))
3053 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3054 cap = uri.from_string(readonly_mdmf_uri)
3055 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3058 def test_get_writekey(self):
3059 d = self.mdmf_node.get_best_mutable_version()
3060 d.addCallback(lambda bv:
3061 self.failUnlessEqual(bv.get_writekey(),
3062 self.mdmf_node.get_writekey()))
3063 d.addCallback(lambda ignored:
3064 self.sdmf_node.get_best_mutable_version())
3065 d.addCallback(lambda bv:
3066 self.failUnlessEqual(bv.get_writekey(),
3067 self.sdmf_node.get_writekey()))
3071 def test_get_storage_index(self):
3072 d = self.mdmf_node.get_best_mutable_version()
3073 d.addCallback(lambda bv:
3074 self.failUnlessEqual(bv.get_storage_index(),
3075 self.mdmf_node.get_storage_index()))
3076 d.addCallback(lambda ignored:
3077 self.sdmf_node.get_best_mutable_version())
3078 d.addCallback(lambda bv:
3079 self.failUnlessEqual(bv.get_storage_index(),
3080 self.sdmf_node.get_storage_index()))
3084 def test_get_readonly_version(self):
3085 d = self.mdmf_node.get_best_readable_version()
3086 d.addCallback(lambda bv:
3087 self.failUnless(bv.is_readonly()))
3088 d.addCallback(lambda ignored:
3089 self.sdmf_node.get_best_readable_version())
3090 d.addCallback(lambda bv:
3091 self.failUnless(bv.is_readonly()))
3095 def test_get_mutable_version(self):
3096 d = self.mdmf_node.get_best_mutable_version()
3097 d.addCallback(lambda bv:
3098 self.failIf(bv.is_readonly()))
3099 d.addCallback(lambda ignored:
3100 self.sdmf_node.get_best_mutable_version())
3101 d.addCallback(lambda bv:
3102 self.failIf(bv.is_readonly()))
3106 def test_toplevel_overwrite(self):
3107 new_data = MutableData("foo bar baz" * 100000)
3108 new_small_data = MutableData("foo bar baz" * 10)
3109 d = self.mdmf_node.overwrite(new_data)
3110 d.addCallback(lambda ignored:
3111 self.mdmf_node.download_best_version())
3112 d.addCallback(lambda data:
3113 self.failUnlessEqual(data, "foo bar baz" * 100000))
3114 d.addCallback(lambda ignored:
3115 self.sdmf_node.overwrite(new_small_data))
3116 d.addCallback(lambda ignored:
3117 self.sdmf_node.download_best_version())
3118 d.addCallback(lambda data:
3119 self.failUnlessEqual(data, "foo bar baz" * 10))
3123 def test_toplevel_modify(self):
3124 def modifier(old_contents, servermap, first_time):
3125 return old_contents + "modified"
3126 d = self.mdmf_node.modify(modifier)
3127 d.addCallback(lambda ignored:
3128 self.mdmf_node.download_best_version())
3129 d.addCallback(lambda data:
3130 self.failUnlessIn("modified", data))
3131 d.addCallback(lambda ignored:
3132 self.sdmf_node.modify(modifier))
3133 d.addCallback(lambda ignored:
3134 self.sdmf_node.download_best_version())
3135 d.addCallback(lambda data:
3136 self.failUnlessIn("modified", data))
3140 def test_version_modify(self):
3141 # TODO: When we can publish multiple versions, alter this test
3142 # to modify a version other than the best usable version, then
3143 # test to see that the best recoverable version is that.
3144 def modifier(old_contents, servermap, first_time):
3145 return old_contents + "modified"
3146 d = self.mdmf_node.modify(modifier)
3147 d.addCallback(lambda ignored:
3148 self.mdmf_node.download_best_version())
3149 d.addCallback(lambda data:
3150 self.failUnlessIn("modified", data))
3151 d.addCallback(lambda ignored:
3152 self.sdmf_node.modify(modifier))
3153 d.addCallback(lambda ignored:
3154 self.sdmf_node.download_best_version())
3155 d.addCallback(lambda data:
3156 self.failUnlessIn("modified", data))
3160 def test_download_version(self):
3161 d = self.publish_multiple()
3162 # We want to have two recoverable versions on the grid.
3163 d.addCallback(lambda res:
3164 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3165 1:1,3:1,5:1,7:1,9:1}))
3166 # Now try to download each version. We should get the plaintext
3167 # associated with that version.
3168 d.addCallback(lambda ignored:
3169 self._fn.get_servermap(mode=MODE_READ))
3170 def _got_servermap(smap):
3171 versions = smap.recoverable_versions()
3172 assert len(versions) == 2
3174 self.servermap = smap
3175 self.version1, self.version2 = versions
3176 assert self.version1 != self.version2
3178 self.version1_seqnum = self.version1[0]
3179 self.version2_seqnum = self.version2[0]
3180 self.version1_index = self.version1_seqnum - 1
3181 self.version2_index = self.version2_seqnum - 1
3183 d.addCallback(_got_servermap)
3184 d.addCallback(lambda ignored:
3185 self._fn.download_version(self.servermap, self.version1))
3186 d.addCallback(lambda results:
3187 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3189 d.addCallback(lambda ignored:
3190 self._fn.download_version(self.servermap, self.version2))
3191 d.addCallback(lambda results:
3192 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3197 def test_download_nonexistent_version(self):
3198 d = self.mdmf_node.get_servermap(mode=MODE_WRITE)
3199 def _set_servermap(servermap):
3200 self.servermap = servermap
3201 d.addCallback(_set_servermap)
3202 d.addCallback(lambda ignored:
3203 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3205 self.mdmf_node.download_version, self.servermap,
3210 def test_partial_read(self):
3211 # read only a few bytes at a time, and see that the results are
3213 d = self.mdmf_node.get_best_readable_version()
3214 def _read_data(version):
3215 c = consumer.MemoryConsumer()
3216 d2 = defer.succeed(None)
3217 for i in xrange(0, len(self.data), 10000):
3218 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3219 d2.addCallback(lambda ignored:
3220 self.failUnlessEqual(self.data, "".join(c.chunks)))
3222 d.addCallback(_read_data)
3225 def test_partial_read_starting_on_segment_boundary(self):
3226 d = self.mdmf_node.get_best_readable_version()
3227 c = consumer.MemoryConsumer()
3228 offset = mathutil.next_multiple(128 * 1024, 3)
3229 d.addCallback(lambda version:
3230 version.read(c, offset, 50))
3231 expected = self.data[offset:offset+50]
3232 d.addCallback(lambda ignored:
3233 self.failUnlessEqual(expected, "".join(c.chunks)))
3236 def test_partial_read_ending_on_segment_boundary(self):
3237 d = self.mdmf_node.get_best_readable_version()
3238 c = consumer.MemoryConsumer()
3239 offset = mathutil.next_multiple(128 * 1024, 3)
3241 d.addCallback(lambda version:
3242 version.read(c, start, 51))
3243 expected = self.data[offset-50:offset+1]
3244 d.addCallback(lambda ignored:
3245 self.failUnlessEqual(expected, "".join(c.chunks)))
3248 def test_read(self):
3249 d = self.mdmf_node.get_best_readable_version()
3250 def _read_data(version):
3251 c = consumer.MemoryConsumer()
3252 d2 = defer.succeed(None)
3253 d2.addCallback(lambda ignored: version.read(c))
3254 d2.addCallback(lambda ignored:
3255 self.failUnlessEqual("".join(c.chunks), self.data))
3257 d.addCallback(_read_data)
3261 def test_download_best_version(self):
3262 d = self.mdmf_node.download_best_version()
3263 d.addCallback(lambda data:
3264 self.failUnlessEqual(data, self.data))
3265 d.addCallback(lambda ignored:
3266 self.sdmf_node.download_best_version())
3267 d.addCallback(lambda data:
3268 self.failUnlessEqual(data, self.small_data))
3272 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3274 GridTestMixin.setUp(self)
3275 self.basedir = self.mktemp()
3277 self.c = self.g.clients[0]
3278 self.nm = self.c.nodemaker
3279 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3280 self.small_data = "test data" * 10 # about 90 B; SDMF
3281 return self.do_upload()
3284 def do_upload(self):
3285 d1 = self.nm.create_mutable_file(MutableData(self.data),
3286 version=MDMF_VERSION)
3287 d2 = self.nm.create_mutable_file(MutableData(self.small_data))
3288 dl = gatherResults([d1, d2])
3289 def _then((n1, n2)):
3290 assert isinstance(n1, MutableFileNode)
3291 assert isinstance(n2, MutableFileNode)
3295 dl.addCallback(_then)
3296 # Make SDMF and MDMF mutable file nodes that have 255 shares.
3297 def _make_max_shares(ign):
3298 self.nm.default_encoding_parameters['n'] = 255
3299 self.nm.default_encoding_parameters['k'] = 127
3300 d1 = self.nm.create_mutable_file(MutableData(self.data),
3301 version=MDMF_VERSION)
3303 self.nm.create_mutable_file(MutableData(self.small_data))
3304 return gatherResults([d1, d2])
3305 dl.addCallback(_make_max_shares)
3306 def _stash((n1, n2)):
3307 assert isinstance(n1, MutableFileNode)
3308 assert isinstance(n2, MutableFileNode)
3310 self.mdmf_max_shares_node = n1
3311 self.sdmf_max_shares_node = n2
3312 dl.addCallback(_stash)
3315 def test_append(self):
3316 # We should be able to append data to the middle of a mutable
3317 # file and get what we expect.
3318 new_data = self.data + "appended"
3319 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3320 d = node.get_best_mutable_version()
3321 d.addCallback(lambda mv:
3322 mv.update(MutableData("appended"), len(self.data)))
3323 d.addCallback(lambda ignored, node=node:
3324 node.download_best_version())
3325 d.addCallback(lambda results:
3326 self.failUnlessEqual(results, new_data))
3329 def test_replace(self):
3330 # We should be able to replace data in the middle of a mutable
3331 # file and get what we expect back.
3332 new_data = self.data[:100]
3333 new_data += "appended"
3334 new_data += self.data[108:]
3335 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3336 d = node.get_best_mutable_version()
3337 d.addCallback(lambda mv:
3338 mv.update(MutableData("appended"), 100))
3339 d.addCallback(lambda ignored, node=node:
3340 node.download_best_version())
3341 d.addCallback(lambda results:
3342 self.failUnlessEqual(results, new_data))
3345 def test_replace_beginning(self):
3346 # We should be able to replace data at the beginning of the file
3347 # without truncating the file
3349 new_data = B + self.data[len(B):]
3350 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3351 d = node.get_best_mutable_version()
3352 d.addCallback(lambda mv: mv.update(MutableData(B), 0))
3353 d.addCallback(lambda ignored, node=node:
3354 node.download_best_version())
3355 d.addCallback(lambda results: self.failUnlessEqual(results, new_data))
3358 def test_replace_segstart1(self):
3361 expected = self.data[:offset]+new_data+self.data[offset+4:]
3362 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3363 d = node.get_best_mutable_version()
3364 d.addCallback(lambda mv:
3365 mv.update(MutableData(new_data), offset))
3366 # close around node.
3367 d.addCallback(lambda ignored, node=node:
3368 node.download_best_version())
3369 def _check(results):
3370 if results != expected:
3372 print "got: %s ... %s" % (results[:20], results[-20:])
3373 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3374 self.fail("results != expected")
3375 d.addCallback(_check)
3378 def _check_differences(self, got, expected):
3379 # displaying arbitrary file corruption is tricky for a
3380 # 1MB file of repeating data,, so look for likely places
3381 # with problems and display them separately
3382 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3383 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3384 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3385 for (start,end) in gotmods]
3386 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3387 for (start,end) in expmods]
3388 #print "expecting: %s" % expspans
3392 print "differences:"
3393 for segnum in range(len(expected)//SEGSIZE):
3394 start = segnum * SEGSIZE
3395 end = (segnum+1) * SEGSIZE
3396 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3397 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3398 if got_ends != exp_ends:
3399 print "expected[%d]: %s" % (start, exp_ends)
3400 print "got [%d]: %s" % (start, got_ends)
3401 if expspans != gotspans:
3402 print "expected: %s" % expspans
3403 print "got : %s" % gotspans
3404 open("EXPECTED","wb").write(expected)
3405 open("GOT","wb").write(got)
3406 print "wrote data to EXPECTED and GOT"
3407 self.fail("didn't get expected data")
3410 def test_replace_locations(self):
3411 # exercise fencepost conditions
3412 expected = self.data
3414 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3415 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3416 d = defer.succeed(None)
3417 for offset in suspects:
3418 new_data = letters.next()*2 # "AA", then "BB", etc
3419 expected = expected[:offset]+new_data+expected[offset+2:]
3420 d.addCallback(lambda ign:
3421 self.mdmf_node.get_best_mutable_version())
3422 def _modify(mv, offset=offset, new_data=new_data):
3423 # close over 'offset','new_data'
3424 md = MutableData(new_data)
3425 return mv.update(md, offset)
3426 d.addCallback(_modify)
3427 d.addCallback(lambda ignored:
3428 self.mdmf_node.download_best_version())
3429 d.addCallback(self._check_differences, expected)
3432 def test_replace_locations_max_shares(self):
3433 # exercise fencepost conditions
3434 expected = self.data
3436 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3437 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3438 d = defer.succeed(None)
3439 for offset in suspects:
3440 new_data = letters.next()*2 # "AA", then "BB", etc
3441 expected = expected[:offset]+new_data+expected[offset+2:]
3442 d.addCallback(lambda ign:
3443 self.mdmf_max_shares_node.get_best_mutable_version())
3444 def _modify(mv, offset=offset, new_data=new_data):
3445 # close over 'offset','new_data'
3446 md = MutableData(new_data)
3447 return mv.update(md, offset)
3448 d.addCallback(_modify)
3449 d.addCallback(lambda ignored:
3450 self.mdmf_max_shares_node.download_best_version())
3451 d.addCallback(self._check_differences, expected)
3454 def test_replace_and_extend(self):
3455 # We should be able to replace data in the middle of a mutable
3456 # file and extend that mutable file and get what we expect.
3457 new_data = self.data[:100]
3458 new_data += "modified " * 100000
3459 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3460 d = node.get_best_mutable_version()
3461 d.addCallback(lambda mv:
3462 mv.update(MutableData("modified " * 100000), 100))
3463 d.addCallback(lambda ignored, node=node:
3464 node.download_best_version())
3465 d.addCallback(lambda results:
3466 self.failUnlessEqual(results, new_data))
3470 def test_append_power_of_two(self):
3471 # If we attempt to extend a mutable file so that its segment
3472 # count crosses a power-of-two boundary, the update operation
3473 # should know how to reencode the file.
3475 # Note that the data populating self.mdmf_node is about 900 KiB
3476 # long -- this is 7 segments in the default segment size. So we
3477 # need to add 2 segments worth of data to push it over a
3478 # power-of-two boundary.
3479 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3480 new_data = self.data + (segment * 2)
3481 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3482 d = node.get_best_mutable_version()
3483 d.addCallback(lambda mv:
3484 mv.update(MutableData(segment * 2), len(self.data)))
3485 d.addCallback(lambda ignored, node=node:
3486 node.download_best_version())
3487 d.addCallback(lambda results:
3488 self.failUnlessEqual(results, new_data))
3490 test_append_power_of_two.timeout = 15
3493 def test_update_sdmf(self):
3494 # Running update on a single-segment file should still work.
3495 new_data = self.small_data + "appended"
3496 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3497 d = node.get_best_mutable_version()
3498 d.addCallback(lambda mv:
3499 mv.update(MutableData("appended"), len(self.small_data)))
3500 d.addCallback(lambda ignored, node=node:
3501 node.download_best_version())
3502 d.addCallback(lambda results:
3503 self.failUnlessEqual(results, new_data))
3506 def test_replace_in_last_segment(self):
3507 # The wrapper should know how to handle the tail segment
3509 replace_offset = len(self.data) - 100
3510 new_data = self.data[:replace_offset] + "replaced"
3511 rest_offset = replace_offset + len("replaced")
3512 new_data += self.data[rest_offset:]
3513 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3514 d = node.get_best_mutable_version()
3515 d.addCallback(lambda mv:
3516 mv.update(MutableData("replaced"), replace_offset))
3517 d.addCallback(lambda ignored, node=node:
3518 node.download_best_version())
3519 d.addCallback(lambda results:
3520 self.failUnlessEqual(results, new_data))
3524 def test_multiple_segment_replace(self):
3525 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3526 new_data = self.data[:replace_offset]
3527 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3528 new_data += 2 * new_segment
3529 new_data += "replaced"
3530 rest_offset = len(new_data)
3531 new_data += self.data[rest_offset:]
3532 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3533 d = node.get_best_mutable_version()
3534 d.addCallback(lambda mv:
3535 mv.update(MutableData((2 * new_segment) + "replaced"),
3537 d.addCallback(lambda ignored, node=node:
3538 node.download_best_version())
3539 d.addCallback(lambda results:
3540 self.failUnlessEqual(results, new_data))
3543 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3544 sdmf_old_shares = {}
3545 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3546 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3547 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3548 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3549 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3550 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3551 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3552 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3553 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3554 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3555 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3556 sdmf_old_contents = "This is a test file.\n"
3557 def copy_sdmf_shares(self):
3558 # We'll basically be short-circuiting the upload process.
3559 servernums = self.g.servers_by_number.keys()
3560 assert len(servernums) == 10
3562 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3563 # Get the storage index.
3564 cap = uri.from_string(self.sdmf_old_cap)
3565 si = cap.get_storage_index()
3567 # Now execute each assignment by writing the storage.
3568 for (share, servernum) in assignments:
3569 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3570 storedir = self.get_serverdir(servernum)
3571 storage_path = os.path.join(storedir, "shares",
3572 storage_index_to_dir(si))
3573 fileutil.make_dirs(storage_path)
3574 fileutil.write(os.path.join(storage_path, "%d" % share),
3576 # ...and verify that the shares are there.
3577 shares = self.find_uri_shares(self.sdmf_old_cap)
3578 assert len(shares) == 10
3580 def test_new_downloader_can_read_old_shares(self):
3581 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3583 self.copy_sdmf_shares()
3584 nm = self.g.clients[0].nodemaker
3585 n = nm.create_from_cap(self.sdmf_old_cap)
3586 d = n.download_best_version()
3587 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)