3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12 ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
23 from allmydata.scripts import debug
25 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
26 from allmydata.mutable.common import ResponseCache, \
27 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
28 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
29 NotEnoughServersError, CorruptShareError
30 from allmydata.mutable.retrieve import Retrieve
31 from allmydata.mutable.publish import Publish, MutableFileHandle, \
33 DEFAULT_MAX_SEGMENT_SIZE
34 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
35 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
36 from allmydata.mutable.repairer import MustForceRepairError
38 import allmydata.test.common_util as testutil
39 from allmydata.test.common import TEST_RSA_KEY_SIZE
42 # this "FakeStorage" exists to put the share data in RAM and avoid using real
43 # network connections, both to speed up the tests and to reduce the amount of
44 # non-mutable.py code being exercised.
47 # this class replaces the collection of storage servers, allowing the
48 # tests to examine and manipulate the published shares. It also lets us
49 # control the order in which read queries are answered, to exercise more
50 # of the error-handling code in Retrieve .
52 # Note that we ignore the storage index: this FakeStorage instance can
53 # only be used for a single storage index.
58 # _sequence is used to cause the responses to occur in a specific
59 # order. If it is in use, then we will defer queries instead of
60 # answering them right away, accumulating the Deferreds in a dict. We
61 # don't know exactly how many queries we'll get, so exactly one
62 # second after the first query arrives, we will release them all (in
66 self._pending_timer = None
68 def read(self, peerid, storage_index):
69 shares = self._peers.get(peerid, {})
70 if self._sequence is None:
71 return defer.succeed(shares)
74 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
75 self._pending[peerid] = (d, shares)
78 def _fire_readers(self):
79 self._pending_timer = None
80 pending = self._pending
82 for peerid in self._sequence:
84 d, shares = pending.pop(peerid)
85 eventually(d.callback, shares)
86 for (d, shares) in pending.values():
87 eventually(d.callback, shares)
89 def write(self, peerid, storage_index, shnum, offset, data):
90 if peerid not in self._peers:
91 self._peers[peerid] = {}
92 shares = self._peers[peerid]
94 f.write(shares.get(shnum, ""))
97 shares[shnum] = f.getvalue()
100 class FakeStorageServer:
101 def __init__(self, peerid, storage):
103 self.storage = storage
105 def callRemote(self, methname, *args, **kwargs):
108 meth = getattr(self, methname)
109 return meth(*args, **kwargs)
111 d.addCallback(lambda res: _call())
114 def callRemoteOnly(self, methname, *args, **kwargs):
116 d = self.callRemote(methname, *args, **kwargs)
117 d.addBoth(lambda ignore: None)
120 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
123 def slot_readv(self, storage_index, shnums, readv):
124 d = self.storage.read(self.peerid, storage_index)
128 if shnums and shnum not in shnums:
130 vector = response[shnum] = []
131 for (offset, length) in readv:
132 assert isinstance(offset, (int, long)), offset
133 assert isinstance(length, (int, long)), length
134 vector.append(shares[shnum][offset:offset+length])
139 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
140 tw_vectors, read_vector):
141 # always-pass: parrot the test vectors back to them.
143 for shnum, (testv, writev, new_length) in tw_vectors.items():
144 for (offset, length, op, specimen) in testv:
145 assert op in ("le", "eq", "ge")
146 # TODO: this isn't right, the read is controlled by read_vector,
148 readv[shnum] = [ specimen
149 for (offset, length, op, specimen)
151 for (offset, data) in writev:
152 self.storage.write(self.peerid, storage_index, shnum,
154 answer = (True, readv)
155 return fireEventually(answer)
158 def flip_bit(original, byte_offset):
159 return (original[:byte_offset] +
160 chr(ord(original[byte_offset]) ^ 0x01) +
161 original[byte_offset+1:])
163 def add_two(original, byte_offset):
164 # It isn't enough to simply flip the bit for the version number,
165 # because 1 is a valid version number. So we add two instead.
166 return (original[:byte_offset] +
167 chr(ord(original[byte_offset]) ^ 0x02) +
168 original[byte_offset+1:])
170 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
171 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
172 # list of shnums to corrupt.
174 for peerid in s._peers:
175 shares = s._peers[peerid]
177 if (shnums_to_corrupt is not None
178 and shnum not in shnums_to_corrupt):
181 # We're feeding the reader all of the share data, so it
182 # won't need to use the rref that we didn't provide, nor the
183 # storage index that we didn't provide. We do this because
184 # the reader will work for both MDMF and SDMF.
185 reader = MDMFSlotReadProxy(None, None, shnum, data)
186 # We need to get the offsets for the next part.
187 d = reader.get_verinfo()
188 def _do_corruption(verinfo, data, shnum):
194 k, n, prefix, o) = verinfo
195 if isinstance(offset, tuple):
196 offset1, offset2 = offset
200 if offset1 == "pubkey" and IV:
203 real_offset = o[offset1]
205 real_offset = offset1
206 real_offset = int(real_offset) + offset2 + offset_offset
207 assert isinstance(real_offset, int), offset
208 if offset1 == 0: # verbyte
212 shares[shnum] = f(data, real_offset)
213 d.addCallback(_do_corruption, data, shnum)
215 dl = defer.DeferredList(ds)
216 dl.addCallback(lambda ignored: res)
219 def make_storagebroker(s=None, num_peers=10):
222 peerids = [tagged_hash("peerid", "%d" % i)[:20]
223 for i in range(num_peers)]
224 storage_broker = StorageFarmBroker(None, True)
225 for peerid in peerids:
226 fss = FakeStorageServer(peerid, s)
227 storage_broker.test_add_rref(peerid, fss)
228 return storage_broker
230 def make_nodemaker(s=None, num_peers=10):
231 storage_broker = make_storagebroker(s, num_peers)
232 sh = client.SecretHolder("lease secret", "convergence secret")
233 keygen = client.KeyGenerator()
234 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
235 nodemaker = NodeMaker(storage_broker, sh, None,
237 {"k": 3, "n": 10}, keygen)
240 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
241 # this used to be in Publish, but we removed the limit. Some of
242 # these tests test whether the new code correctly allows files
243 # larger than the limit.
244 OLD_MAX_SEGMENT_SIZE = 3500000
246 self._storage = s = FakeStorage()
247 self.nodemaker = make_nodemaker(s)
249 def test_create(self):
250 d = self.nodemaker.create_mutable_file()
252 self.failUnless(isinstance(n, MutableFileNode))
253 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
254 sb = self.nodemaker.storage_broker
255 peer0 = sorted(sb.get_all_serverids())[0]
256 shnums = self._storage._peers[peer0].keys()
257 self.failUnlessEqual(len(shnums), 1)
258 d.addCallback(_created)
262 def test_create_mdmf(self):
263 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
265 self.failUnless(isinstance(n, MutableFileNode))
266 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
267 sb = self.nodemaker.storage_broker
268 peer0 = sorted(sb.get_all_serverids())[0]
269 shnums = self._storage._peers[peer0].keys()
270 self.failUnlessEqual(len(shnums), 1)
271 d.addCallback(_created)
274 def test_single_share(self):
275 # Make sure that we tolerate publishing a single share.
276 self.nodemaker.default_encoding_parameters['k'] = 1
277 self.nodemaker.default_encoding_parameters['happy'] = 1
278 self.nodemaker.default_encoding_parameters['n'] = 1
279 d = defer.succeed(None)
280 for v in (SDMF_VERSION, MDMF_VERSION):
281 d.addCallback(lambda ignored:
282 self.nodemaker.create_mutable_file(version=v))
284 self.failUnless(isinstance(n, MutableFileNode))
287 d.addCallback(_created)
288 d.addCallback(lambda n:
289 n.overwrite(MutableData("Contents" * 50000)))
290 d.addCallback(lambda ignored:
291 self._node.download_best_version())
292 d.addCallback(lambda contents:
293 self.failUnlessEqual(contents, "Contents" * 50000))
296 def test_max_shares(self):
297 self.nodemaker.default_encoding_parameters['n'] = 255
298 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
300 self.failUnless(isinstance(n, MutableFileNode))
301 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
302 sb = self.nodemaker.storage_broker
303 num_shares = sum([len(self._storage._peers[x].keys()) for x \
304 in sb.get_all_serverids()])
305 self.failUnlessEqual(num_shares, 255)
308 d.addCallback(_created)
309 # Now we upload some contents
310 d.addCallback(lambda n:
311 n.overwrite(MutableData("contents" * 50000)))
312 # ...then download contents
313 d.addCallback(lambda ignored:
314 self._node.download_best_version())
315 # ...and check to make sure everything went okay.
316 d.addCallback(lambda contents:
317 self.failUnlessEqual("contents" * 50000, contents))
320 def test_max_shares_mdmf(self):
321 # Test how files behave when there are 255 shares.
322 self.nodemaker.default_encoding_parameters['n'] = 255
323 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
325 self.failUnless(isinstance(n, MutableFileNode))
326 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
327 sb = self.nodemaker.storage_broker
328 num_shares = sum([len(self._storage._peers[x].keys()) for x \
329 in sb.get_all_serverids()])
330 self.failUnlessEqual(num_shares, 255)
333 d.addCallback(_created)
334 d.addCallback(lambda n:
335 n.overwrite(MutableData("contents" * 50000)))
336 d.addCallback(lambda ignored:
337 self._node.download_best_version())
338 d.addCallback(lambda contents:
339 self.failUnlessEqual(contents, "contents" * 50000))
342 def test_mdmf_filenode_cap(self):
343 # Test that an MDMF filenode, once created, returns an MDMF URI.
344 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
346 self.failUnless(isinstance(n, MutableFileNode))
348 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
349 rcap = n.get_readcap()
350 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
351 vcap = n.get_verify_cap()
352 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
353 d.addCallback(_created)
357 def test_create_from_mdmf_writecap(self):
358 # Test that the nodemaker is capable of creating an MDMF
359 # filenode given an MDMF cap.
360 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
362 self.failUnless(isinstance(n, MutableFileNode))
364 self.failUnless(s.startswith("URI:MDMF"))
365 n2 = self.nodemaker.create_from_cap(s)
366 self.failUnless(isinstance(n2, MutableFileNode))
367 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
368 self.failUnlessEqual(n.get_uri(), n2.get_uri())
369 d.addCallback(_created)
373 def test_create_from_mdmf_writecap_with_extensions(self):
374 # Test that the nodemaker is capable of creating an MDMF
375 # filenode when given a writecap with extension parameters in
377 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
379 self.failUnless(isinstance(n, MutableFileNode))
381 # We need to cheat a little and delete the nodemaker's
382 # cache, otherwise we'll get the same node instance back.
383 self.failUnlessIn(":3:131073", s)
384 n2 = self.nodemaker.create_from_cap(s)
386 self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
387 self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
388 hints = n2._downloader_hints
389 self.failUnlessEqual(hints['k'], 3)
390 self.failUnlessEqual(hints['segsize'], 131073)
391 d.addCallback(_created)
395 def test_create_from_mdmf_readcap(self):
396 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
398 self.failUnless(isinstance(n, MutableFileNode))
399 s = n.get_readonly_uri()
400 n2 = self.nodemaker.create_from_cap(s)
401 self.failUnless(isinstance(n2, MutableFileNode))
403 # Check that it's a readonly node
404 self.failUnless(n2.is_readonly())
405 d.addCallback(_created)
409 def test_create_from_mdmf_readcap_with_extensions(self):
410 # We should be able to create an MDMF filenode with the
411 # extension parameters without it breaking.
412 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
414 self.failUnless(isinstance(n, MutableFileNode))
415 s = n.get_readonly_uri()
416 self.failUnlessIn(":3:131073", s)
418 n2 = self.nodemaker.create_from_cap(s)
419 self.failUnless(isinstance(n2, MutableFileNode))
420 self.failUnless(n2.is_readonly())
421 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
422 hints = n2._downloader_hints
423 self.failUnlessEqual(hints["k"], 3)
424 self.failUnlessEqual(hints["segsize"], 131073)
425 d.addCallback(_created)
429 def test_internal_version_from_cap(self):
430 # MutableFileNodes and MutableFileVersions have an internal
431 # switch that tells them whether they're dealing with an SDMF or
432 # MDMF mutable file when they start doing stuff. We want to make
433 # sure that this is set appropriately given an MDMF cap.
434 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
436 self.uri = n.get_uri()
437 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
439 n2 = self.nodemaker.create_from_cap(self.uri)
440 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
441 d.addCallback(_created)
445 def test_serialize(self):
446 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
448 def _callback(*args, **kwargs):
449 self.failUnlessEqual(args, (4,) )
450 self.failUnlessEqual(kwargs, {"foo": 5})
453 d = n._do_serialized(_callback, 4, foo=5)
454 def _check_callback(res):
455 self.failUnlessEqual(res, 6)
456 self.failUnlessEqual(calls, [1])
457 d.addCallback(_check_callback)
460 raise ValueError("heya")
461 d.addCallback(lambda res:
462 self.shouldFail(ValueError, "_check_errback", "heya",
463 n._do_serialized, _errback))
466 def test_upload_and_download(self):
467 d = self.nodemaker.create_mutable_file()
469 d = defer.succeed(None)
470 d.addCallback(lambda res: n.get_servermap(MODE_READ))
471 d.addCallback(lambda smap: smap.dump(StringIO()))
472 d.addCallback(lambda sio:
473 self.failUnless("3-of-10" in sio.getvalue()))
474 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
475 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
476 d.addCallback(lambda res: n.download_best_version())
477 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
478 d.addCallback(lambda res: n.get_size_of_best_version())
479 d.addCallback(lambda size:
480 self.failUnlessEqual(size, len("contents 1")))
481 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
482 d.addCallback(lambda res: n.download_best_version())
483 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
484 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
485 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
486 d.addCallback(lambda res: n.download_best_version())
487 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
488 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
489 d.addCallback(lambda smap:
490 n.download_version(smap,
491 smap.best_recoverable_version()))
492 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
493 # test a file that is large enough to overcome the
494 # mapupdate-to-retrieve data caching (i.e. make the shares larger
495 # than the default readsize, which is 2000 bytes). A 15kB file
496 # will have 5kB shares.
497 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
498 d.addCallback(lambda res: n.download_best_version())
499 d.addCallback(lambda res:
500 self.failUnlessEqual(res, "large size file" * 1000))
502 d.addCallback(_created)
506 def test_upload_and_download_mdmf(self):
507 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
509 d = defer.succeed(None)
510 d.addCallback(lambda ignored:
511 n.get_servermap(MODE_READ))
512 def _then(servermap):
513 dumped = servermap.dump(StringIO())
514 self.failUnlessIn("3-of-10", dumped.getvalue())
516 # Now overwrite the contents with some new contents. We want
517 # to make them big enough to force the file to be uploaded
518 # in more than one segment.
519 big_contents = "contents1" * 100000 # about 900 KiB
520 big_contents_uploadable = MutableData(big_contents)
521 d.addCallback(lambda ignored:
522 n.overwrite(big_contents_uploadable))
523 d.addCallback(lambda ignored:
524 n.download_best_version())
525 d.addCallback(lambda data:
526 self.failUnlessEqual(data, big_contents))
527 # Overwrite the contents again with some new contents. As
528 # before, they need to be big enough to force multiple
529 # segments, so that we make the downloader deal with
531 bigger_contents = "contents2" * 1000000 # about 9MiB
532 bigger_contents_uploadable = MutableData(bigger_contents)
533 d.addCallback(lambda ignored:
534 n.overwrite(bigger_contents_uploadable))
535 d.addCallback(lambda ignored:
536 n.download_best_version())
537 d.addCallback(lambda data:
538 self.failUnlessEqual(data, bigger_contents))
540 d.addCallback(_created)
544 def test_retrieve_pause(self):
545 # We should make sure that the retriever is able to pause
547 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
551 return node.overwrite(MutableData("contents1" * 100000))
552 d.addCallback(_created)
553 # Now we'll retrieve it into a pausing consumer.
554 d.addCallback(lambda ignored:
555 self.node.get_best_mutable_version())
556 def _got_version(version):
557 self.c = PausingConsumer()
558 return version.read(self.c)
559 d.addCallback(_got_version)
560 d.addCallback(lambda ignored:
561 self.failUnlessEqual(self.c.data, "contents1" * 100000))
565 def test_download_from_mdmf_cap(self):
566 # We should be able to download an MDMF file given its cap
567 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
569 self.uri = node.get_uri()
571 return node.overwrite(MutableData("contents1" * 100000))
573 node = self.nodemaker.create_from_cap(self.uri)
574 return node.download_best_version()
575 def _downloaded(data):
576 self.failUnlessEqual(data, "contents1" * 100000)
577 d.addCallback(_created)
579 d.addCallback(_downloaded)
583 def test_create_and_download_from_bare_mdmf_cap(self):
584 # MDMF caps have extension parameters on them by default. We
585 # need to make sure that they work without extension parameters.
586 contents = MutableData("contents" * 100000)
587 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
592 self.failUnlessIn(":3:131073", uri)
593 # Now strip that off the end of the uri, then try creating
594 # and downloading the node again.
595 bare_uri = uri.replace(":3:131073", "")
596 assert ":3:131073" not in bare_uri
598 return self.nodemaker.create_from_cap(bare_uri)
599 d.addCallback(_created)
600 def _created_bare(node):
601 self.failUnlessEqual(node.get_writekey(),
602 self._created.get_writekey())
603 self.failUnlessEqual(node.get_readkey(),
604 self._created.get_readkey())
605 self.failUnlessEqual(node.get_storage_index(),
606 self._created.get_storage_index())
607 return node.download_best_version()
608 d.addCallback(_created_bare)
609 d.addCallback(lambda data:
610 self.failUnlessEqual(data, "contents" * 100000))
614 def test_mdmf_write_count(self):
615 # Publishing an MDMF file should only cause one write for each
616 # share that is to be published. Otherwise, we introduce
617 # undesirable semantics that are a regression from SDMF
618 upload = MutableData("MDMF" * 100000) # about 400 KiB
619 d = self.nodemaker.create_mutable_file(upload,
620 version=MDMF_VERSION)
621 def _check_server_write_counts(ignored):
622 sb = self.nodemaker.storage_broker
623 for server in sb.servers.itervalues():
624 self.failUnlessEqual(server.get_rref().queries, 1)
625 d.addCallback(_check_server_write_counts)
629 def test_create_with_initial_contents(self):
630 upload1 = MutableData("contents 1")
631 d = self.nodemaker.create_mutable_file(upload1)
633 d = n.download_best_version()
634 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
635 upload2 = MutableData("contents 2")
636 d.addCallback(lambda res: n.overwrite(upload2))
637 d.addCallback(lambda res: n.download_best_version())
638 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
640 d.addCallback(_created)
644 def test_create_mdmf_with_initial_contents(self):
645 initial_contents = "foobarbaz" * 131072 # 900KiB
646 initial_contents_uploadable = MutableData(initial_contents)
647 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
648 version=MDMF_VERSION)
650 d = n.download_best_version()
651 d.addCallback(lambda data:
652 self.failUnlessEqual(data, initial_contents))
653 uploadable2 = MutableData(initial_contents + "foobarbaz")
654 d.addCallback(lambda ignored:
655 n.overwrite(uploadable2))
656 d.addCallback(lambda ignored:
657 n.download_best_version())
658 d.addCallback(lambda data:
659 self.failUnlessEqual(data, initial_contents +
662 d.addCallback(_created)
666 def test_response_cache_memory_leak(self):
667 d = self.nodemaker.create_mutable_file("contents")
669 d = n.download_best_version()
670 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
671 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
673 def _check_cache(expected):
674 # The total size of cache entries should not increase on the second download;
675 # in fact the cache contents should be identical.
676 d2 = n.download_best_version()
677 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
679 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
681 d.addCallback(_created)
684 def test_create_with_initial_contents_function(self):
685 data = "initial contents"
686 def _make_contents(n):
687 self.failUnless(isinstance(n, MutableFileNode))
688 key = n.get_writekey()
689 self.failUnless(isinstance(key, str), key)
690 self.failUnlessEqual(len(key), 16) # AES key size
691 return MutableData(data)
692 d = self.nodemaker.create_mutable_file(_make_contents)
694 return n.download_best_version()
695 d.addCallback(_created)
696 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
700 def test_create_mdmf_with_initial_contents_function(self):
701 data = "initial contents" * 100000
702 def _make_contents(n):
703 self.failUnless(isinstance(n, MutableFileNode))
704 key = n.get_writekey()
705 self.failUnless(isinstance(key, str), key)
706 self.failUnlessEqual(len(key), 16)
707 return MutableData(data)
708 d = self.nodemaker.create_mutable_file(_make_contents,
709 version=MDMF_VERSION)
710 d.addCallback(lambda n:
711 n.download_best_version())
712 d.addCallback(lambda data2:
713 self.failUnlessEqual(data2, data))
717 def test_create_with_too_large_contents(self):
718 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
719 BIG_uploadable = MutableData(BIG)
720 d = self.nodemaker.create_mutable_file(BIG_uploadable)
722 other_BIG_uploadable = MutableData(BIG)
723 d = n.overwrite(other_BIG_uploadable)
725 d.addCallback(_created)
728 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
729 d = n.get_servermap(MODE_READ)
730 d.addCallback(lambda servermap: servermap.best_recoverable_version())
731 d.addCallback(lambda verinfo:
732 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
735 def test_modify(self):
736 def _modifier(old_contents, servermap, first_time):
737 new_contents = old_contents + "line2"
739 def _non_modifier(old_contents, servermap, first_time):
741 def _none_modifier(old_contents, servermap, first_time):
743 def _error_modifier(old_contents, servermap, first_time):
744 raise ValueError("oops")
745 def _toobig_modifier(old_contents, servermap, first_time):
746 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
749 def _ucw_error_modifier(old_contents, servermap, first_time):
750 # simulate an UncoordinatedWriteError once
753 raise UncoordinatedWriteError("simulated")
754 new_contents = old_contents + "line3"
756 def _ucw_error_non_modifier(old_contents, servermap, first_time):
757 # simulate an UncoordinatedWriteError once, and don't actually
758 # modify the contents on subsequent invocations
761 raise UncoordinatedWriteError("simulated")
764 initial_contents = "line1"
765 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
767 d = n.modify(_modifier)
768 d.addCallback(lambda res: n.download_best_version())
769 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
770 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
772 d.addCallback(lambda res: n.modify(_non_modifier))
773 d.addCallback(lambda res: n.download_best_version())
774 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
775 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
777 d.addCallback(lambda res: n.modify(_none_modifier))
778 d.addCallback(lambda res: n.download_best_version())
779 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
780 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
782 d.addCallback(lambda res:
783 self.shouldFail(ValueError, "error_modifier", None,
784 n.modify, _error_modifier))
785 d.addCallback(lambda res: n.download_best_version())
786 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
787 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
790 d.addCallback(lambda res: n.download_best_version())
791 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
792 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
794 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
795 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
796 d.addCallback(lambda res: n.download_best_version())
797 d.addCallback(lambda res: self.failUnlessEqual(res,
799 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
801 def _reset_ucw_error_modifier(res):
804 d.addCallback(_reset_ucw_error_modifier)
806 # in practice, this n.modify call should publish twice: the first
807 # one gets a UCWE, the second does not. But our test jig (in
808 # which the modifier raises the UCWE) skips over the first one,
809 # so in this test there will be only one publish, and the seqnum
810 # will only be one larger than the previous test, not two (i.e. 4
812 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
813 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
814 d.addCallback(lambda res: n.download_best_version())
815 d.addCallback(lambda res: self.failUnlessEqual(res,
817 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
818 d.addCallback(lambda res: n.modify(_toobig_modifier))
820 d.addCallback(_created)
824 def test_modify_backoffer(self):
825 def _modifier(old_contents, servermap, first_time):
826 return old_contents + "line2"
828 def _ucw_error_modifier(old_contents, servermap, first_time):
829 # simulate an UncoordinatedWriteError once
832 raise UncoordinatedWriteError("simulated")
833 return old_contents + "line3"
834 def _always_ucw_error_modifier(old_contents, servermap, first_time):
835 raise UncoordinatedWriteError("simulated")
836 def _backoff_stopper(node, f):
838 def _backoff_pauser(node, f):
840 reactor.callLater(0.5, d.callback, None)
843 # the give-up-er will hit its maximum retry count quickly
844 giveuper = BackoffAgent()
845 giveuper._delay = 0.1
848 d = self.nodemaker.create_mutable_file(MutableData("line1"))
850 d = n.modify(_modifier)
851 d.addCallback(lambda res: n.download_best_version())
852 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
853 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
855 d.addCallback(lambda res:
856 self.shouldFail(UncoordinatedWriteError,
857 "_backoff_stopper", None,
858 n.modify, _ucw_error_modifier,
860 d.addCallback(lambda res: n.download_best_version())
861 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
862 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
864 def _reset_ucw_error_modifier(res):
867 d.addCallback(_reset_ucw_error_modifier)
868 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
870 d.addCallback(lambda res: n.download_best_version())
871 d.addCallback(lambda res: self.failUnlessEqual(res,
873 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
875 d.addCallback(lambda res:
876 self.shouldFail(UncoordinatedWriteError,
878 n.modify, _always_ucw_error_modifier,
880 d.addCallback(lambda res: n.download_best_version())
881 d.addCallback(lambda res: self.failUnlessEqual(res,
883 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
886 d.addCallback(_created)
889 def test_upload_and_download_full_size_keys(self):
890 self.nodemaker.key_generator = client.KeyGenerator()
891 d = self.nodemaker.create_mutable_file()
893 d = defer.succeed(None)
894 d.addCallback(lambda res: n.get_servermap(MODE_READ))
895 d.addCallback(lambda smap: smap.dump(StringIO()))
896 d.addCallback(lambda sio:
897 self.failUnless("3-of-10" in sio.getvalue()))
898 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
899 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
900 d.addCallback(lambda res: n.download_best_version())
901 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
902 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
903 d.addCallback(lambda res: n.download_best_version())
904 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
905 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
906 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
907 d.addCallback(lambda res: n.download_best_version())
908 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
909 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
910 d.addCallback(lambda smap:
911 n.download_version(smap,
912 smap.best_recoverable_version()))
913 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
915 d.addCallback(_created)
919 def test_size_after_servermap_update(self):
920 # a mutable file node should have something to say about how big
921 # it is after a servermap update is performed, since this tells
922 # us how large the best version of that mutable file is.
923 d = self.nodemaker.create_mutable_file()
926 return n.get_servermap(MODE_READ)
927 d.addCallback(_created)
928 d.addCallback(lambda ignored:
929 self.failUnlessEqual(self.n.get_size(), 0))
930 d.addCallback(lambda ignored:
931 self.n.overwrite(MutableData("foobarbaz")))
932 d.addCallback(lambda ignored:
933 self.failUnlessEqual(self.n.get_size(), 9))
934 d.addCallback(lambda ignored:
935 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
936 d.addCallback(_created)
937 d.addCallback(lambda ignored:
938 self.failUnlessEqual(self.n.get_size(), 9))
943 def publish_one(self):
944 # publish a file and create shares, which can then be manipulated
946 self.CONTENTS = "New contents go here" * 1000
947 self.uploadable = MutableData(self.CONTENTS)
948 self._storage = FakeStorage()
949 self._nodemaker = make_nodemaker(self._storage)
950 self._storage_broker = self._nodemaker.storage_broker
951 d = self._nodemaker.create_mutable_file(self.uploadable)
954 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
955 d.addCallback(_created)
958 def publish_mdmf(self):
959 # like publish_one, except that the result is guaranteed to be
961 # self.CONTENTS should have more than one segment.
962 self.CONTENTS = "This is an MDMF file" * 100000
963 self.uploadable = MutableData(self.CONTENTS)
964 self._storage = FakeStorage()
965 self._nodemaker = make_nodemaker(self._storage)
966 self._storage_broker = self._nodemaker.storage_broker
967 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
970 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
971 d.addCallback(_created)
975 def publish_sdmf(self):
976 # like publish_one, except that the result is guaranteed to be
978 self.CONTENTS = "This is an SDMF file" * 1000
979 self.uploadable = MutableData(self.CONTENTS)
980 self._storage = FakeStorage()
981 self._nodemaker = make_nodemaker(self._storage)
982 self._storage_broker = self._nodemaker.storage_broker
983 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
986 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
987 d.addCallback(_created)
991 def publish_multiple(self, version=0):
992 self.CONTENTS = ["Contents 0",
997 self.uploadables = [MutableData(d) for d in self.CONTENTS]
998 self._copied_shares = {}
999 self._storage = FakeStorage()
1000 self._nodemaker = make_nodemaker(self._storage)
1001 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1004 # now create multiple versions of the same file, and accumulate
1005 # their shares, so we can mix and match them later.
1006 d = defer.succeed(None)
1007 d.addCallback(self._copy_shares, 0)
1008 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1009 d.addCallback(self._copy_shares, 1)
1010 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1011 d.addCallback(self._copy_shares, 2)
1012 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1013 d.addCallback(self._copy_shares, 3)
1014 # now we replace all the shares with version s3, and upload a new
1015 # version to get s4b.
1016 rollback = dict([(i,2) for i in range(10)])
1017 d.addCallback(lambda res: self._set_versions(rollback))
1018 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1019 d.addCallback(self._copy_shares, 4)
1020 # we leave the storage in state 4
1022 d.addCallback(_created)
1026 def _copy_shares(self, ignored, index):
1027 shares = self._storage._peers
1028 # we need a deep copy
1030 for peerid in shares:
1031 new_shares[peerid] = {}
1032 for shnum in shares[peerid]:
1033 new_shares[peerid][shnum] = shares[peerid][shnum]
1034 self._copied_shares[index] = new_shares
1036 def _set_versions(self, versionmap):
1037 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1038 # share to be at. Any shnum which is left out of the map will stay at
1039 # its current version.
1040 shares = self._storage._peers
1041 oldshares = self._copied_shares
1042 for peerid in shares:
1043 for shnum in shares[peerid]:
1044 if shnum in versionmap:
1045 index = versionmap[shnum]
1046 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1048 class PausingConsumer:
1049 implements(IConsumer)
1052 self.already_paused = False
1054 def registerProducer(self, producer, streaming):
1055 self.producer = producer
1056 self.producer.resumeProducing()
1058 def unregisterProducer(self):
1059 self.producer = None
1061 def _unpause(self, ignored):
1062 self.producer.resumeProducing()
1064 def write(self, data):
1066 if not self.already_paused:
1067 self.producer.pauseProducing()
1068 self.already_paused = True
1069 reactor.callLater(15, self._unpause, None)
1072 class Servermap(unittest.TestCase, PublishMixin):
1074 return self.publish_one()
1076 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1081 sb = self._storage_broker
1082 smu = ServermapUpdater(fn, sb, Monitor(),
1083 ServerMap(), mode, update_range=update_range)
1087 def update_servermap(self, oldmap, mode=MODE_CHECK):
1088 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1093 def failUnlessOneRecoverable(self, sm, num_shares):
1094 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1095 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1096 best = sm.best_recoverable_version()
1097 self.failIfEqual(best, None)
1098 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1099 self.failUnlessEqual(len(sm.shares_available()), 1)
1100 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1101 shnum, peerids = sm.make_sharemap().items()[0]
1102 peerid = list(peerids)[0]
1103 self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1104 self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1107 def test_basic(self):
1108 d = defer.succeed(None)
1109 ms = self.make_servermap
1110 us = self.update_servermap
1112 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1113 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1114 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1115 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1116 d.addCallback(lambda res: ms(mode=MODE_READ))
1117 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1118 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1119 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1120 # this mode stops at 'k' shares
1121 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1123 # and can we re-use the same servermap? Note that these are sorted in
1124 # increasing order of number of servers queried, since once a server
1125 # gets into the servermap, we'll always ask it for an update.
1126 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1127 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1128 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1129 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1130 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1131 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1132 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1133 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1134 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1138 def test_fetch_privkey(self):
1139 d = defer.succeed(None)
1140 # use the sibling filenode (which hasn't been used yet), and make
1141 # sure it can fetch the privkey. The file is small, so the privkey
1142 # will be fetched on the first (query) pass.
1143 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1144 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1146 # create a new file, which is large enough to knock the privkey out
1147 # of the early part of the file
1148 LARGE = "These are Larger contents" * 200 # about 5KB
1149 LARGE_uploadable = MutableData(LARGE)
1150 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1151 def _created(large_fn):
1152 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1153 return self.make_servermap(MODE_WRITE, large_fn2)
1154 d.addCallback(_created)
1155 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1159 def test_mark_bad(self):
1160 d = defer.succeed(None)
1161 ms = self.make_servermap
1163 d.addCallback(lambda res: ms(mode=MODE_READ))
1164 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1166 v = sm.best_recoverable_version()
1167 vm = sm.make_versionmap()
1168 shares = list(vm[v])
1169 self.failUnlessEqual(len(shares), 6)
1170 self._corrupted = set()
1171 # mark the first 5 shares as corrupt, then update the servermap.
1172 # The map should not have the marked shares it in any more, and
1173 # new shares should be found to replace the missing ones.
1174 for (shnum, peerid, timestamp) in shares:
1176 self._corrupted.add( (peerid, shnum) )
1177 sm.mark_bad_share(peerid, shnum, "")
1178 return self.update_servermap(sm, MODE_WRITE)
1179 d.addCallback(_made_map)
1181 # this should find all 5 shares that weren't marked bad
1182 v = sm.best_recoverable_version()
1183 vm = sm.make_versionmap()
1184 shares = list(vm[v])
1185 for (peerid, shnum) in self._corrupted:
1186 peer_shares = sm.shares_on_peer(peerid)
1187 self.failIf(shnum in peer_shares,
1188 "%d was in %s" % (shnum, peer_shares))
1189 self.failUnlessEqual(len(shares), 5)
1190 d.addCallback(_check_map)
1193 def failUnlessNoneRecoverable(self, sm):
1194 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1195 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1196 best = sm.best_recoverable_version()
1197 self.failUnlessEqual(best, None)
1198 self.failUnlessEqual(len(sm.shares_available()), 0)
1200 def test_no_shares(self):
1201 self._storage._peers = {} # delete all shares
1202 ms = self.make_servermap
1203 d = defer.succeed(None)
1205 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1206 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1208 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1209 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1211 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1212 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1214 d.addCallback(lambda res: ms(mode=MODE_READ))
1215 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1219 def failUnlessNotQuiteEnough(self, sm):
1220 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1221 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1222 best = sm.best_recoverable_version()
1223 self.failUnlessEqual(best, None)
1224 self.failUnlessEqual(len(sm.shares_available()), 1)
1225 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1228 def test_not_quite_enough_shares(self):
1230 ms = self.make_servermap
1231 num_shares = len(s._peers)
1232 for peerid in s._peers:
1233 s._peers[peerid] = {}
1237 # now there ought to be only two shares left
1238 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1240 d = defer.succeed(None)
1242 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1243 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1244 d.addCallback(lambda sm:
1245 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1246 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1247 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1248 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1249 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1250 d.addCallback(lambda res: ms(mode=MODE_READ))
1251 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1256 def test_servermapupdater_finds_mdmf_files(self):
1257 # setUp already published an MDMF file for us. We just need to
1258 # make sure that when we run the ServermapUpdater, the file is
1259 # reported to have one recoverable version.
1260 d = defer.succeed(None)
1261 d.addCallback(lambda ignored:
1262 self.publish_mdmf())
1263 d.addCallback(lambda ignored:
1264 self.make_servermap(mode=MODE_CHECK))
1265 # Calling make_servermap also updates the servermap in the mode
1266 # that we specify, so we just need to see what it says.
1267 def _check_servermap(sm):
1268 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1269 d.addCallback(_check_servermap)
1273 def test_fetch_update(self):
1274 d = defer.succeed(None)
1275 d.addCallback(lambda ignored:
1276 self.publish_mdmf())
1277 d.addCallback(lambda ignored:
1278 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1279 def _check_servermap(sm):
1281 self.failUnlessEqual(len(sm.update_data), 10)
1283 for data in sm.update_data.itervalues():
1284 self.failUnlessEqual(len(data), 1)
1285 d.addCallback(_check_servermap)
1289 def test_servermapupdater_finds_sdmf_files(self):
1290 d = defer.succeed(None)
1291 d.addCallback(lambda ignored:
1292 self.publish_sdmf())
1293 d.addCallback(lambda ignored:
1294 self.make_servermap(mode=MODE_CHECK))
1295 d.addCallback(lambda servermap:
1296 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1300 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1302 return self.publish_one()
1304 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1306 oldmap = ServerMap()
1308 sb = self._storage_broker
1309 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1313 def abbrev_verinfo(self, verinfo):
1316 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1317 offsets_tuple) = verinfo
1318 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1320 def abbrev_verinfo_dict(self, verinfo_d):
1322 for verinfo,value in verinfo_d.items():
1323 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1324 offsets_tuple) = verinfo
1325 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1328 def dump_servermap(self, servermap):
1329 print "SERVERMAP", servermap
1330 print "RECOVERABLE", [self.abbrev_verinfo(v)
1331 for v in servermap.recoverable_versions()]
1332 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1333 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1335 def do_download(self, servermap, version=None):
1337 version = servermap.best_recoverable_version()
1338 r = Retrieve(self._fn, servermap, version)
1339 c = consumer.MemoryConsumer()
1340 d = r.download(consumer=c)
1341 d.addCallback(lambda mc: "".join(mc.chunks))
1345 def test_basic(self):
1346 d = self.make_servermap()
1347 def _do_retrieve(servermap):
1348 self._smap = servermap
1349 #self.dump_servermap(servermap)
1350 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1351 return self.do_download(servermap)
1352 d.addCallback(_do_retrieve)
1353 def _retrieved(new_contents):
1354 self.failUnlessEqual(new_contents, self.CONTENTS)
1355 d.addCallback(_retrieved)
1356 # we should be able to re-use the same servermap, both with and
1357 # without updating it.
1358 d.addCallback(lambda res: self.do_download(self._smap))
1359 d.addCallback(_retrieved)
1360 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1361 d.addCallback(lambda res: self.do_download(self._smap))
1362 d.addCallback(_retrieved)
1363 # clobbering the pubkey should make the servermap updater re-fetch it
1364 def _clobber_pubkey(res):
1365 self._fn._pubkey = None
1366 d.addCallback(_clobber_pubkey)
1367 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1368 d.addCallback(lambda res: self.do_download(self._smap))
1369 d.addCallback(_retrieved)
1372 def test_all_shares_vanished(self):
1373 d = self.make_servermap()
1374 def _remove_shares(servermap):
1375 for shares in self._storage._peers.values():
1377 d1 = self.shouldFail(NotEnoughSharesError,
1378 "test_all_shares_vanished",
1380 self.do_download, servermap)
1382 d.addCallback(_remove_shares)
1385 def test_no_servers(self):
1386 sb2 = make_storagebroker(num_peers=0)
1387 # if there are no servers, then a MODE_READ servermap should come
1389 d = self.make_servermap(sb=sb2)
1390 def _check_servermap(servermap):
1391 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1392 self.failIf(servermap.recoverable_versions())
1393 self.failIf(servermap.unrecoverable_versions())
1394 self.failIf(servermap.all_peers())
1395 d.addCallback(_check_servermap)
1398 def test_no_servers_download(self):
1399 sb2 = make_storagebroker(num_peers=0)
1400 self._fn._storage_broker = sb2
1401 d = self.shouldFail(UnrecoverableFileError,
1402 "test_no_servers_download",
1403 "no recoverable versions",
1404 self._fn.download_best_version)
1406 # a failed download that occurs while we aren't connected to
1407 # anybody should not prevent a subsequent download from working.
1408 # This isn't quite the webapi-driven test that #463 wants, but it
1409 # should be close enough.
1410 self._fn._storage_broker = self._storage_broker
1411 return self._fn.download_best_version()
1412 def _retrieved(new_contents):
1413 self.failUnlessEqual(new_contents, self.CONTENTS)
1414 d.addCallback(_restore)
1415 d.addCallback(_retrieved)
1419 def _test_corrupt_all(self, offset, substring,
1420 should_succeed=False,
1422 failure_checker=None,
1423 fetch_privkey=False):
1424 d = defer.succeed(None)
1426 d.addCallback(corrupt, self._storage, offset)
1427 d.addCallback(lambda res: self.make_servermap())
1428 if not corrupt_early:
1429 d.addCallback(corrupt, self._storage, offset)
1430 def _do_retrieve(servermap):
1431 ver = servermap.best_recoverable_version()
1432 if ver is None and not should_succeed:
1433 # no recoverable versions == not succeeding. The problem
1434 # should be noted in the servermap's list of problems.
1436 allproblems = [str(f) for f in servermap.problems]
1437 self.failUnlessIn(substring, "".join(allproblems))
1440 d1 = self._fn.download_version(servermap, ver,
1442 d1.addCallback(lambda new_contents:
1443 self.failUnlessEqual(new_contents, self.CONTENTS))
1445 d1 = self.shouldFail(NotEnoughSharesError,
1446 "_corrupt_all(offset=%s)" % (offset,),
1448 self._fn.download_version, servermap,
1452 d1.addCallback(failure_checker)
1453 d1.addCallback(lambda res: servermap)
1455 d.addCallback(_do_retrieve)
1458 def test_corrupt_all_verbyte(self):
1459 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1460 # error in unpack_share().
1461 d = self._test_corrupt_all(0, "UnknownVersionError")
1462 def _check_servermap(servermap):
1463 # and the dump should mention the problems
1465 dump = servermap.dump(s).getvalue()
1466 self.failUnless("30 PROBLEMS" in dump, dump)
1467 d.addCallback(_check_servermap)
1470 def test_corrupt_all_seqnum(self):
1471 # a corrupt sequence number will trigger a bad signature
1472 return self._test_corrupt_all(1, "signature is invalid")
1474 def test_corrupt_all_R(self):
1475 # a corrupt root hash will trigger a bad signature
1476 return self._test_corrupt_all(9, "signature is invalid")
1478 def test_corrupt_all_IV(self):
1479 # a corrupt salt/IV will trigger a bad signature
1480 return self._test_corrupt_all(41, "signature is invalid")
1482 def test_corrupt_all_k(self):
1483 # a corrupt 'k' will trigger a bad signature
1484 return self._test_corrupt_all(57, "signature is invalid")
1486 def test_corrupt_all_N(self):
1487 # a corrupt 'N' will trigger a bad signature
1488 return self._test_corrupt_all(58, "signature is invalid")
1490 def test_corrupt_all_segsize(self):
1491 # a corrupt segsize will trigger a bad signature
1492 return self._test_corrupt_all(59, "signature is invalid")
1494 def test_corrupt_all_datalen(self):
1495 # a corrupt data length will trigger a bad signature
1496 return self._test_corrupt_all(67, "signature is invalid")
1498 def test_corrupt_all_pubkey(self):
1499 # a corrupt pubkey won't match the URI's fingerprint. We need to
1500 # remove the pubkey from the filenode, or else it won't bother trying
1502 self._fn._pubkey = None
1503 return self._test_corrupt_all("pubkey",
1504 "pubkey doesn't match fingerprint")
1506 def test_corrupt_all_sig(self):
1507 # a corrupt signature is a bad one
1508 # the signature runs from about [543:799], depending upon the length
1510 return self._test_corrupt_all("signature", "signature is invalid")
1512 def test_corrupt_all_share_hash_chain_number(self):
1513 # a corrupt share hash chain entry will show up as a bad hash. If we
1514 # mangle the first byte, that will look like a bad hash number,
1515 # causing an IndexError
1516 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1518 def test_corrupt_all_share_hash_chain_hash(self):
1519 # a corrupt share hash chain entry will show up as a bad hash. If we
1520 # mangle a few bytes in, that will look like a bad hash.
1521 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1523 def test_corrupt_all_block_hash_tree(self):
1524 return self._test_corrupt_all("block_hash_tree",
1525 "block hash tree failure")
1527 def test_corrupt_all_block(self):
1528 return self._test_corrupt_all("share_data", "block hash tree failure")
1530 def test_corrupt_all_encprivkey(self):
1531 # a corrupted privkey won't even be noticed by the reader, only by a
1533 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1536 def test_corrupt_all_encprivkey_late(self):
1537 # this should work for the same reason as above, but we corrupt
1538 # after the servermap update to exercise the error handling
1540 # We need to remove the privkey from the node, or the retrieve
1541 # process won't know to update it.
1542 self._fn._privkey = None
1543 return self._test_corrupt_all("enc_privkey",
1544 None, # this shouldn't fail
1545 should_succeed=True,
1546 corrupt_early=False,
1550 def test_corrupt_all_seqnum_late(self):
1551 # corrupting the seqnum between mapupdate and retrieve should result
1552 # in NotEnoughSharesError, since each share will look invalid
1555 self.failUnless(f.check(NotEnoughSharesError))
1556 self.failUnless("uncoordinated write" in str(f))
1557 return self._test_corrupt_all(1, "ran out of peers",
1558 corrupt_early=False,
1559 failure_checker=_check)
1561 def test_corrupt_all_block_hash_tree_late(self):
1564 self.failUnless(f.check(NotEnoughSharesError))
1565 return self._test_corrupt_all("block_hash_tree",
1566 "block hash tree failure",
1567 corrupt_early=False,
1568 failure_checker=_check)
1571 def test_corrupt_all_block_late(self):
1574 self.failUnless(f.check(NotEnoughSharesError))
1575 return self._test_corrupt_all("share_data", "block hash tree failure",
1576 corrupt_early=False,
1577 failure_checker=_check)
1580 def test_basic_pubkey_at_end(self):
1581 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1582 # download to succeed but forcing a bunch of retries first. Note that
1583 # this is rather pessimistic: our Retrieve process will throw away
1584 # the whole share if the pubkey is bad, even though the rest of the
1585 # share might be good.
1587 self._fn._pubkey = None
1588 k = self._fn.get_required_shares()
1589 N = self._fn.get_total_shares()
1590 d = defer.succeed(None)
1591 d.addCallback(corrupt, self._storage, "pubkey",
1592 shnums_to_corrupt=range(0, N-k))
1593 d.addCallback(lambda res: self.make_servermap())
1594 def _do_retrieve(servermap):
1595 self.failUnless(servermap.problems)
1596 self.failUnless("pubkey doesn't match fingerprint"
1597 in str(servermap.problems[0]))
1598 ver = servermap.best_recoverable_version()
1599 r = Retrieve(self._fn, servermap, ver)
1600 c = consumer.MemoryConsumer()
1601 return r.download(c)
1602 d.addCallback(_do_retrieve)
1603 d.addCallback(lambda mc: "".join(mc.chunks))
1604 d.addCallback(lambda new_contents:
1605 self.failUnlessEqual(new_contents, self.CONTENTS))
1609 def _test_corrupt_some(self, offset, mdmf=False):
1611 d = self.publish_mdmf()
1613 d = defer.succeed(None)
1614 d.addCallback(lambda ignored:
1615 corrupt(None, self._storage, offset, range(5)))
1616 d.addCallback(lambda ignored:
1617 self.make_servermap())
1618 def _do_retrieve(servermap):
1619 ver = servermap.best_recoverable_version()
1620 self.failUnless(ver)
1621 return self._fn.download_best_version()
1622 d.addCallback(_do_retrieve)
1623 d.addCallback(lambda new_contents:
1624 self.failUnlessEqual(new_contents, self.CONTENTS))
1628 def test_corrupt_some(self):
1629 # corrupt the data of first five shares (so the servermap thinks
1630 # they're good but retrieve marks them as bad), so that the
1631 # MODE_READ set of 6 will be insufficient, forcing node.download to
1632 # retry with more servers.
1633 return self._test_corrupt_some("share_data")
1636 def test_download_fails(self):
1637 d = corrupt(None, self._storage, "signature")
1638 d.addCallback(lambda ignored:
1639 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1640 "no recoverable versions",
1641 self._fn.download_best_version))
1646 def test_corrupt_mdmf_block_hash_tree(self):
1647 d = self.publish_mdmf()
1648 d.addCallback(lambda ignored:
1649 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1650 "block hash tree failure",
1651 corrupt_early=False,
1652 should_succeed=False))
1656 def test_corrupt_mdmf_block_hash_tree_late(self):
1657 d = self.publish_mdmf()
1658 d.addCallback(lambda ignored:
1659 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1660 "block hash tree failure",
1662 should_succeed=False))
1666 def test_corrupt_mdmf_share_data(self):
1667 d = self.publish_mdmf()
1668 d.addCallback(lambda ignored:
1669 # TODO: Find out what the block size is and corrupt a
1670 # specific block, rather than just guessing.
1671 self._test_corrupt_all(("share_data", 12 * 40),
1672 "block hash tree failure",
1674 should_succeed=False))
1678 def test_corrupt_some_mdmf(self):
1679 return self._test_corrupt_some(("share_data", 12 * 40),
1684 def check_good(self, r, where):
1685 self.failUnless(r.is_healthy(), where)
1688 def check_bad(self, r, where):
1689 self.failIf(r.is_healthy(), where)
1692 def check_expected_failure(self, r, expected_exception, substring, where):
1693 for (peerid, storage_index, shnum, f) in r.problems:
1694 if f.check(expected_exception):
1695 self.failUnless(substring in str(f),
1696 "%s: substring '%s' not in '%s'" %
1697 (where, substring, str(f)))
1699 self.fail("%s: didn't see expected exception %s in problems %s" %
1700 (where, expected_exception, r.problems))
1703 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1705 return self.publish_one()
1708 def test_check_good(self):
1709 d = self._fn.check(Monitor())
1710 d.addCallback(self.check_good, "test_check_good")
1713 def test_check_mdmf_good(self):
1714 d = self.publish_mdmf()
1715 d.addCallback(lambda ignored:
1716 self._fn.check(Monitor()))
1717 d.addCallback(self.check_good, "test_check_mdmf_good")
1720 def test_check_no_shares(self):
1721 for shares in self._storage._peers.values():
1723 d = self._fn.check(Monitor())
1724 d.addCallback(self.check_bad, "test_check_no_shares")
1727 def test_check_mdmf_no_shares(self):
1728 d = self.publish_mdmf()
1730 for share in self._storage._peers.values():
1732 d.addCallback(_then)
1733 d.addCallback(lambda ignored:
1734 self._fn.check(Monitor()))
1735 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1738 def test_check_not_enough_shares(self):
1739 for shares in self._storage._peers.values():
1740 for shnum in shares.keys():
1743 d = self._fn.check(Monitor())
1744 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1747 def test_check_mdmf_not_enough_shares(self):
1748 d = self.publish_mdmf()
1750 for shares in self._storage._peers.values():
1751 for shnum in shares.keys():
1754 d.addCallback(_then)
1755 d.addCallback(lambda ignored:
1756 self._fn.check(Monitor()))
1757 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1761 def test_check_all_bad_sig(self):
1762 d = corrupt(None, self._storage, 1) # bad sig
1763 d.addCallback(lambda ignored:
1764 self._fn.check(Monitor()))
1765 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1768 def test_check_mdmf_all_bad_sig(self):
1769 d = self.publish_mdmf()
1770 d.addCallback(lambda ignored:
1771 corrupt(None, self._storage, 1))
1772 d.addCallback(lambda ignored:
1773 self._fn.check(Monitor()))
1774 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1777 def test_check_all_bad_blocks(self):
1778 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1779 # the Checker won't notice this.. it doesn't look at actual data
1780 d.addCallback(lambda ignored:
1781 self._fn.check(Monitor()))
1782 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1786 def test_check_mdmf_all_bad_blocks(self):
1787 d = self.publish_mdmf()
1788 d.addCallback(lambda ignored:
1789 corrupt(None, self._storage, "share_data"))
1790 d.addCallback(lambda ignored:
1791 self._fn.check(Monitor()))
1792 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1795 def test_verify_good(self):
1796 d = self._fn.check(Monitor(), verify=True)
1797 d.addCallback(self.check_good, "test_verify_good")
1800 def test_verify_all_bad_sig(self):
1801 d = corrupt(None, self._storage, 1) # bad sig
1802 d.addCallback(lambda ignored:
1803 self._fn.check(Monitor(), verify=True))
1804 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1807 def test_verify_one_bad_sig(self):
1808 d = corrupt(None, self._storage, 1, [9]) # bad sig
1809 d.addCallback(lambda ignored:
1810 self._fn.check(Monitor(), verify=True))
1811 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1814 def test_verify_one_bad_block(self):
1815 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1816 # the Verifier *will* notice this, since it examines every byte
1817 d.addCallback(lambda ignored:
1818 self._fn.check(Monitor(), verify=True))
1819 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1820 d.addCallback(self.check_expected_failure,
1821 CorruptShareError, "block hash tree failure",
1822 "test_verify_one_bad_block")
1825 def test_verify_one_bad_sharehash(self):
1826 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1827 d.addCallback(lambda ignored:
1828 self._fn.check(Monitor(), verify=True))
1829 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1830 d.addCallback(self.check_expected_failure,
1831 CorruptShareError, "corrupt hashes",
1832 "test_verify_one_bad_sharehash")
1835 def test_verify_one_bad_encprivkey(self):
1836 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1837 d.addCallback(lambda ignored:
1838 self._fn.check(Monitor(), verify=True))
1839 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1840 d.addCallback(self.check_expected_failure,
1841 CorruptShareError, "invalid privkey",
1842 "test_verify_one_bad_encprivkey")
1845 def test_verify_one_bad_encprivkey_uncheckable(self):
1846 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1847 readonly_fn = self._fn.get_readonly()
1848 # a read-only node has no way to validate the privkey
1849 d.addCallback(lambda ignored:
1850 readonly_fn.check(Monitor(), verify=True))
1851 d.addCallback(self.check_good,
1852 "test_verify_one_bad_encprivkey_uncheckable")
1856 def test_verify_mdmf_good(self):
1857 d = self.publish_mdmf()
1858 d.addCallback(lambda ignored:
1859 self._fn.check(Monitor(), verify=True))
1860 d.addCallback(self.check_good, "test_verify_mdmf_good")
1864 def test_verify_mdmf_one_bad_block(self):
1865 d = self.publish_mdmf()
1866 d.addCallback(lambda ignored:
1867 corrupt(None, self._storage, "share_data", [1]))
1868 d.addCallback(lambda ignored:
1869 self._fn.check(Monitor(), verify=True))
1870 # We should find one bad block here
1871 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1872 d.addCallback(self.check_expected_failure,
1873 CorruptShareError, "block hash tree failure",
1874 "test_verify_mdmf_one_bad_block")
1878 def test_verify_mdmf_bad_encprivkey(self):
1879 d = self.publish_mdmf()
1880 d.addCallback(lambda ignored:
1881 corrupt(None, self._storage, "enc_privkey", [0]))
1882 d.addCallback(lambda ignored:
1883 self._fn.check(Monitor(), verify=True))
1884 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1885 d.addCallback(self.check_expected_failure,
1886 CorruptShareError, "privkey",
1887 "test_verify_mdmf_bad_encprivkey")
1891 def test_verify_mdmf_bad_sig(self):
1892 d = self.publish_mdmf()
1893 d.addCallback(lambda ignored:
1894 corrupt(None, self._storage, 1, [1]))
1895 d.addCallback(lambda ignored:
1896 self._fn.check(Monitor(), verify=True))
1897 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1901 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1902 d = self.publish_mdmf()
1903 d.addCallback(lambda ignored:
1904 corrupt(None, self._storage, "enc_privkey", [1]))
1905 d.addCallback(lambda ignored:
1906 self._fn.get_readonly())
1907 d.addCallback(lambda fn:
1908 fn.check(Monitor(), verify=True))
1909 d.addCallback(self.check_good,
1910 "test_verify_mdmf_bad_encprivkey_uncheckable")
1914 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1916 def get_shares(self, s):
1917 all_shares = {} # maps (peerid, shnum) to share data
1918 for peerid in s._peers:
1919 shares = s._peers[peerid]
1920 for shnum in shares:
1921 data = shares[shnum]
1922 all_shares[ (peerid, shnum) ] = data
1925 def copy_shares(self, ignored=None):
1926 self.old_shares.append(self.get_shares(self._storage))
1928 def test_repair_nop(self):
1929 self.old_shares = []
1930 d = self.publish_one()
1931 d.addCallback(self.copy_shares)
1932 d.addCallback(lambda res: self._fn.check(Monitor()))
1933 d.addCallback(lambda check_results: self._fn.repair(check_results))
1934 def _check_results(rres):
1935 self.failUnless(IRepairResults.providedBy(rres))
1936 self.failUnless(rres.get_successful())
1937 # TODO: examine results
1941 initial_shares = self.old_shares[0]
1942 new_shares = self.old_shares[1]
1943 # TODO: this really shouldn't change anything. When we implement
1944 # a "minimal-bandwidth" repairer", change this test to assert:
1945 #self.failUnlessEqual(new_shares, initial_shares)
1947 # all shares should be in the same place as before
1948 self.failUnlessEqual(set(initial_shares.keys()),
1949 set(new_shares.keys()))
1950 # but they should all be at a newer seqnum. The IV will be
1951 # different, so the roothash will be too.
1952 for key in initial_shares:
1957 k0, N0, segsize0, datalen0,
1958 o0) = unpack_header(initial_shares[key])
1963 k1, N1, segsize1, datalen1,
1964 o1) = unpack_header(new_shares[key])
1965 self.failUnlessEqual(version0, version1)
1966 self.failUnlessEqual(seqnum0+1, seqnum1)
1967 self.failUnlessEqual(k0, k1)
1968 self.failUnlessEqual(N0, N1)
1969 self.failUnlessEqual(segsize0, segsize1)
1970 self.failUnlessEqual(datalen0, datalen1)
1971 d.addCallback(_check_results)
1974 def failIfSharesChanged(self, ignored=None):
1975 old_shares = self.old_shares[-2]
1976 current_shares = self.old_shares[-1]
1977 self.failUnlessEqual(old_shares, current_shares)
1980 def test_unrepairable_0shares(self):
1981 d = self.publish_one()
1982 def _delete_all_shares(ign):
1983 shares = self._storage._peers
1984 for peerid in shares:
1986 d.addCallback(_delete_all_shares)
1987 d.addCallback(lambda ign: self._fn.check(Monitor()))
1988 d.addCallback(lambda check_results: self._fn.repair(check_results))
1990 self.failUnlessEqual(crr.get_successful(), False)
1991 d.addCallback(_check)
1994 def test_mdmf_unrepairable_0shares(self):
1995 d = self.publish_mdmf()
1996 def _delete_all_shares(ign):
1997 shares = self._storage._peers
1998 for peerid in shares:
2000 d.addCallback(_delete_all_shares)
2001 d.addCallback(lambda ign: self._fn.check(Monitor()))
2002 d.addCallback(lambda check_results: self._fn.repair(check_results))
2003 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2007 def test_unrepairable_1share(self):
2008 d = self.publish_one()
2009 def _delete_all_shares(ign):
2010 shares = self._storage._peers
2011 for peerid in shares:
2012 for shnum in list(shares[peerid]):
2014 del shares[peerid][shnum]
2015 d.addCallback(_delete_all_shares)
2016 d.addCallback(lambda ign: self._fn.check(Monitor()))
2017 d.addCallback(lambda check_results: self._fn.repair(check_results))
2019 self.failUnlessEqual(crr.get_successful(), False)
2020 d.addCallback(_check)
2023 def test_mdmf_unrepairable_1share(self):
2024 d = self.publish_mdmf()
2025 def _delete_all_shares(ign):
2026 shares = self._storage._peers
2027 for peerid in shares:
2028 for shnum in list(shares[peerid]):
2030 del shares[peerid][shnum]
2031 d.addCallback(_delete_all_shares)
2032 d.addCallback(lambda ign: self._fn.check(Monitor()))
2033 d.addCallback(lambda check_results: self._fn.repair(check_results))
2035 self.failUnlessEqual(crr.get_successful(), False)
2036 d.addCallback(_check)
2039 def test_repairable_5shares(self):
2040 d = self.publish_mdmf()
2041 def _delete_all_shares(ign):
2042 shares = self._storage._peers
2043 for peerid in shares:
2044 for shnum in list(shares[peerid]):
2046 del shares[peerid][shnum]
2047 d.addCallback(_delete_all_shares)
2048 d.addCallback(lambda ign: self._fn.check(Monitor()))
2049 d.addCallback(lambda check_results: self._fn.repair(check_results))
2051 self.failUnlessEqual(crr.get_successful(), True)
2052 d.addCallback(_check)
2055 def test_mdmf_repairable_5shares(self):
2056 d = self.publish_mdmf()
2057 def _delete_some_shares(ign):
2058 shares = self._storage._peers
2059 for peerid in shares:
2060 for shnum in list(shares[peerid]):
2062 del shares[peerid][shnum]
2063 d.addCallback(_delete_some_shares)
2064 d.addCallback(lambda ign: self._fn.check(Monitor()))
2066 self.failIf(cr.is_healthy())
2067 self.failUnless(cr.is_recoverable())
2069 d.addCallback(_check)
2070 d.addCallback(lambda check_results: self._fn.repair(check_results))
2072 self.failUnlessEqual(crr.get_successful(), True)
2073 d.addCallback(_check1)
2077 def test_merge(self):
2078 self.old_shares = []
2079 d = self.publish_multiple()
2080 # repair will refuse to merge multiple highest seqnums unless you
2082 d.addCallback(lambda res:
2083 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2084 1:4,3:4,5:4,7:4,9:4}))
2085 d.addCallback(self.copy_shares)
2086 d.addCallback(lambda res: self._fn.check(Monitor()))
2087 def _try_repair(check_results):
2088 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2089 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2090 self._fn.repair, check_results)
2091 d2.addCallback(self.copy_shares)
2092 d2.addCallback(self.failIfSharesChanged)
2093 d2.addCallback(lambda res: check_results)
2095 d.addCallback(_try_repair)
2096 d.addCallback(lambda check_results:
2097 self._fn.repair(check_results, force=True))
2098 # this should give us 10 shares of the highest roothash
2099 def _check_repair_results(rres):
2100 self.failUnless(rres.get_successful())
2102 d.addCallback(_check_repair_results)
2103 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2104 def _check_smap(smap):
2105 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2106 self.failIf(smap.unrecoverable_versions())
2107 # now, which should have won?
2108 roothash_s4a = self.get_roothash_for(3)
2109 roothash_s4b = self.get_roothash_for(4)
2110 if roothash_s4b > roothash_s4a:
2111 expected_contents = self.CONTENTS[4]
2113 expected_contents = self.CONTENTS[3]
2114 new_versionid = smap.best_recoverable_version()
2115 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2116 d2 = self._fn.download_version(smap, new_versionid)
2117 d2.addCallback(self.failUnlessEqual, expected_contents)
2119 d.addCallback(_check_smap)
2122 def test_non_merge(self):
2123 self.old_shares = []
2124 d = self.publish_multiple()
2125 # repair should not refuse a repair that doesn't need to merge. In
2126 # this case, we combine v2 with v3. The repair should ignore v2 and
2127 # copy v3 into a new v5.
2128 d.addCallback(lambda res:
2129 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2130 1:3,3:3,5:3,7:3,9:3}))
2131 d.addCallback(lambda res: self._fn.check(Monitor()))
2132 d.addCallback(lambda check_results: self._fn.repair(check_results))
2133 # this should give us 10 shares of v3
2134 def _check_repair_results(rres):
2135 self.failUnless(rres.get_successful())
2137 d.addCallback(_check_repair_results)
2138 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2139 def _check_smap(smap):
2140 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2141 self.failIf(smap.unrecoverable_versions())
2142 # now, which should have won?
2143 expected_contents = self.CONTENTS[3]
2144 new_versionid = smap.best_recoverable_version()
2145 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2146 d2 = self._fn.download_version(smap, new_versionid)
2147 d2.addCallback(self.failUnlessEqual, expected_contents)
2149 d.addCallback(_check_smap)
2152 def get_roothash_for(self, index):
2153 # return the roothash for the first share we see in the saved set
2154 shares = self._copied_shares[index]
2155 for peerid in shares:
2156 for shnum in shares[peerid]:
2157 share = shares[peerid][shnum]
2158 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2159 unpack_header(share)
2162 def test_check_and_repair_readcap(self):
2163 # we can't currently repair from a mutable readcap: #625
2164 self.old_shares = []
2165 d = self.publish_one()
2166 d.addCallback(self.copy_shares)
2167 def _get_readcap(res):
2168 self._fn3 = self._fn.get_readonly()
2169 # also delete some shares
2170 for peerid,shares in self._storage._peers.items():
2172 d.addCallback(_get_readcap)
2173 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2174 def _check_results(crr):
2175 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2176 # we should detect the unhealthy, but skip over mutable-readcap
2177 # repairs until #625 is fixed
2178 self.failIf(crr.get_pre_repair_results().is_healthy())
2179 self.failIf(crr.get_repair_attempted())
2180 self.failIf(crr.get_post_repair_results().is_healthy())
2181 d.addCallback(_check_results)
2184 class DevNullDictionary(dict):
2185 def __setitem__(self, key, value):
2188 class MultipleEncodings(unittest.TestCase):
2190 self.CONTENTS = "New contents go here"
2191 self.uploadable = MutableData(self.CONTENTS)
2192 self._storage = FakeStorage()
2193 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2194 self._storage_broker = self._nodemaker.storage_broker
2195 d = self._nodemaker.create_mutable_file(self.uploadable)
2198 d.addCallback(_created)
2201 def _encode(self, k, n, data, version=SDMF_VERSION):
2202 # encode 'data' into a peerid->shares dict.
2205 # disable the nodecache, since for these tests we explicitly need
2206 # multiple nodes pointing at the same file
2207 self._nodemaker._node_cache = DevNullDictionary()
2208 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2209 # then we copy over other fields that are normally fetched from the
2211 fn2._pubkey = fn._pubkey
2212 fn2._privkey = fn._privkey
2213 fn2._encprivkey = fn._encprivkey
2214 # and set the encoding parameters to something completely different
2215 fn2._required_shares = k
2216 fn2._total_shares = n
2219 s._peers = {} # clear existing storage
2220 p2 = Publish(fn2, self._storage_broker, None)
2221 uploadable = MutableData(data)
2222 d = p2.publish(uploadable)
2223 def _published(res):
2227 d.addCallback(_published)
2230 def make_servermap(self, mode=MODE_READ, oldmap=None):
2232 oldmap = ServerMap()
2233 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2238 def test_multiple_encodings(self):
2239 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2240 # then mix up the shares, to make sure that download survives seeing
2241 # a variety of encodings. This is actually kind of tricky to set up.
2243 contents1 = "Contents for encoding 1 (3-of-10) go here"
2244 contents2 = "Contents for encoding 2 (4-of-9) go here"
2245 contents3 = "Contents for encoding 3 (4-of-7) go here"
2247 # we make a retrieval object that doesn't know what encoding
2249 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2251 # now we upload a file through fn1, and grab its shares
2252 d = self._encode(3, 10, contents1)
2253 def _encoded_1(shares):
2254 self._shares1 = shares
2255 d.addCallback(_encoded_1)
2256 d.addCallback(lambda res: self._encode(4, 9, contents2))
2257 def _encoded_2(shares):
2258 self._shares2 = shares
2259 d.addCallback(_encoded_2)
2260 d.addCallback(lambda res: self._encode(4, 7, contents3))
2261 def _encoded_3(shares):
2262 self._shares3 = shares
2263 d.addCallback(_encoded_3)
2266 log.msg("merging sharelists")
2267 # we merge the shares from the two sets, leaving each shnum in
2268 # its original location, but using a share from set1 or set2
2269 # according to the following sequence:
2280 # so that neither form can be recovered until fetch [f], at which
2281 # point version-s1 (the 3-of-10 form) should be recoverable. If
2282 # the implementation latches on to the first version it sees,
2283 # then s2 will be recoverable at fetch [g].
2285 # Later, when we implement code that handles multiple versions,
2286 # we can use this framework to assert that all recoverable
2287 # versions are retrieved, and test that 'epsilon' does its job
2289 places = [2, 2, 3, 2, 1, 1, 1, 2]
2292 sb = self._storage_broker
2294 for peerid in sorted(sb.get_all_serverids()):
2295 for shnum in self._shares1.get(peerid, {}):
2296 if shnum < len(places):
2297 which = places[shnum]
2300 self._storage._peers[peerid] = peers = {}
2301 in_1 = shnum in self._shares1[peerid]
2302 in_2 = shnum in self._shares2.get(peerid, {})
2303 in_3 = shnum in self._shares3.get(peerid, {})
2306 peers[shnum] = self._shares1[peerid][shnum]
2307 sharemap[shnum] = peerid
2310 peers[shnum] = self._shares2[peerid][shnum]
2311 sharemap[shnum] = peerid
2314 peers[shnum] = self._shares3[peerid][shnum]
2315 sharemap[shnum] = peerid
2317 # we don't bother placing any other shares
2318 # now sort the sequence so that share 0 is returned first
2319 new_sequence = [sharemap[shnum]
2320 for shnum in sorted(sharemap.keys())]
2321 self._storage._sequence = new_sequence
2322 log.msg("merge done")
2323 d.addCallback(_merge)
2324 d.addCallback(lambda res: fn3.download_best_version())
2325 def _retrieved(new_contents):
2326 # the current specified behavior is "first version recoverable"
2327 self.failUnlessEqual(new_contents, contents1)
2328 d.addCallback(_retrieved)
2332 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2335 return self.publish_multiple()
2337 def test_multiple_versions(self):
2338 # if we see a mix of versions in the grid, download_best_version
2339 # should get the latest one
2340 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2341 d = self._fn.download_best_version()
2342 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2343 # and the checker should report problems
2344 d.addCallback(lambda res: self._fn.check(Monitor()))
2345 d.addCallback(self.check_bad, "test_multiple_versions")
2347 # but if everything is at version 2, that's what we should download
2348 d.addCallback(lambda res:
2349 self._set_versions(dict([(i,2) for i in range(10)])))
2350 d.addCallback(lambda res: self._fn.download_best_version())
2351 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2352 # if exactly one share is at version 3, we should still get v2
2353 d.addCallback(lambda res:
2354 self._set_versions({0:3}))
2355 d.addCallback(lambda res: self._fn.download_best_version())
2356 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2357 # but the servermap should see the unrecoverable version. This
2358 # depends upon the single newer share being queried early.
2359 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2360 def _check_smap(smap):
2361 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2362 newer = smap.unrecoverable_newer_versions()
2363 self.failUnlessEqual(len(newer), 1)
2364 verinfo, health = newer.items()[0]
2365 self.failUnlessEqual(verinfo[0], 4)
2366 self.failUnlessEqual(health, (1,3))
2367 self.failIf(smap.needs_merge())
2368 d.addCallback(_check_smap)
2369 # if we have a mix of two parallel versions (s4a and s4b), we could
2371 d.addCallback(lambda res:
2372 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2373 1:4,3:4,5:4,7:4,9:4}))
2374 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2375 def _check_smap_mixed(smap):
2376 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2377 newer = smap.unrecoverable_newer_versions()
2378 self.failUnlessEqual(len(newer), 0)
2379 self.failUnless(smap.needs_merge())
2380 d.addCallback(_check_smap_mixed)
2381 d.addCallback(lambda res: self._fn.download_best_version())
2382 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2383 res == self.CONTENTS[4]))
2386 def test_replace(self):
2387 # if we see a mix of versions in the grid, we should be able to
2388 # replace them all with a newer version
2390 # if exactly one share is at version 3, we should download (and
2391 # replace) v2, and the result should be v4. Note that the index we
2392 # give to _set_versions is different than the sequence number.
2393 target = dict([(i,2) for i in range(10)]) # seqnum3
2394 target[0] = 3 # seqnum4
2395 self._set_versions(target)
2397 def _modify(oldversion, servermap, first_time):
2398 return oldversion + " modified"
2399 d = self._fn.modify(_modify)
2400 d.addCallback(lambda res: self._fn.download_best_version())
2401 expected = self.CONTENTS[2] + " modified"
2402 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2403 # and the servermap should indicate that the outlier was replaced too
2404 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2405 def _check_smap(smap):
2406 self.failUnlessEqual(smap.highest_seqnum(), 5)
2407 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2408 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2409 d.addCallback(_check_smap)
2413 class Utils(unittest.TestCase):
2414 def test_cache(self):
2416 # xdata = base62.b2a(os.urandom(100))[:100]
2417 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2418 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2419 c.add("v1", 1, 0, xdata)
2420 c.add("v1", 1, 2000, ydata)
2421 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2422 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2423 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2424 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2425 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2426 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2427 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2428 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2429 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2430 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2431 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2432 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2433 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2434 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2435 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2436 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2437 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2438 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2440 # test joining fragments
2442 c.add("v1", 1, 0, xdata[:10])
2443 c.add("v1", 1, 10, xdata[10:20])
2444 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2446 class Exceptions(unittest.TestCase):
2447 def test_repr(self):
2448 nmde = NeedMoreDataError(100, 50, 100)
2449 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2450 ucwe = UncoordinatedWriteError()
2451 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2453 class SameKeyGenerator:
2454 def __init__(self, pubkey, privkey):
2455 self.pubkey = pubkey
2456 self.privkey = privkey
2457 def generate(self, keysize=None):
2458 return defer.succeed( (self.pubkey, self.privkey) )
2460 class FirstServerGetsKilled:
2462 def notify(self, retval, wrapper, methname):
2464 wrapper.broken = True
2468 class FirstServerGetsDeleted:
2471 self.silenced = None
2472 def notify(self, retval, wrapper, methname):
2474 # this query will work, but later queries should think the share
2477 self.silenced = wrapper
2479 if wrapper == self.silenced:
2480 assert methname == "slot_testv_and_readv_and_writev"
2484 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2485 def test_publish_surprise(self):
2486 self.basedir = "mutable/Problems/test_publish_surprise"
2488 nm = self.g.clients[0].nodemaker
2489 d = nm.create_mutable_file(MutableData("contents 1"))
2491 d = defer.succeed(None)
2492 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2493 def _got_smap1(smap):
2494 # stash the old state of the file
2496 d.addCallback(_got_smap1)
2497 # then modify the file, leaving the old map untouched
2498 d.addCallback(lambda res: log.msg("starting winning write"))
2499 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2500 # now attempt to modify the file with the old servermap. This
2501 # will look just like an uncoordinated write, in which every
2502 # single share got updated between our mapupdate and our publish
2503 d.addCallback(lambda res: log.msg("starting doomed write"))
2504 d.addCallback(lambda res:
2505 self.shouldFail(UncoordinatedWriteError,
2506 "test_publish_surprise", None,
2508 MutableData("contents 2a"), self.old_map))
2510 d.addCallback(_created)
2513 def test_retrieve_surprise(self):
2514 self.basedir = "mutable/Problems/test_retrieve_surprise"
2516 nm = self.g.clients[0].nodemaker
2517 d = nm.create_mutable_file(MutableData("contents 1"))
2519 d = defer.succeed(None)
2520 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2521 def _got_smap1(smap):
2522 # stash the old state of the file
2524 d.addCallback(_got_smap1)
2525 # then modify the file, leaving the old map untouched
2526 d.addCallback(lambda res: log.msg("starting winning write"))
2527 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2528 # now attempt to retrieve the old version with the old servermap.
2529 # This will look like someone has changed the file since we
2530 # updated the servermap.
2531 d.addCallback(lambda res: n._cache._clear())
2532 d.addCallback(lambda res: log.msg("starting doomed read"))
2533 d.addCallback(lambda res:
2534 self.shouldFail(NotEnoughSharesError,
2535 "test_retrieve_surprise",
2536 "ran out of peers: have 0 of 1",
2539 self.old_map.best_recoverable_version(),
2542 d.addCallback(_created)
2546 def test_unexpected_shares(self):
2547 # upload the file, take a servermap, shut down one of the servers,
2548 # upload it again (causing shares to appear on a new server), then
2549 # upload using the old servermap. The last upload should fail with an
2550 # UncoordinatedWriteError, because of the shares that didn't appear
2552 self.basedir = "mutable/Problems/test_unexpected_shares"
2554 nm = self.g.clients[0].nodemaker
2555 d = nm.create_mutable_file(MutableData("contents 1"))
2557 d = defer.succeed(None)
2558 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2559 def _got_smap1(smap):
2560 # stash the old state of the file
2562 # now shut down one of the servers
2563 peer0 = list(smap.make_sharemap()[0])[0]
2564 self.g.remove_server(peer0)
2565 # then modify the file, leaving the old map untouched
2566 log.msg("starting winning write")
2567 return n.overwrite(MutableData("contents 2"))
2568 d.addCallback(_got_smap1)
2569 # now attempt to modify the file with the old servermap. This
2570 # will look just like an uncoordinated write, in which every
2571 # single share got updated between our mapupdate and our publish
2572 d.addCallback(lambda res: log.msg("starting doomed write"))
2573 d.addCallback(lambda res:
2574 self.shouldFail(UncoordinatedWriteError,
2575 "test_surprise", None,
2577 MutableData("contents 2a"), self.old_map))
2579 d.addCallback(_created)
2582 def test_bad_server(self):
2583 # Break one server, then create the file: the initial publish should
2584 # complete with an alternate server. Breaking a second server should
2585 # not prevent an update from succeeding either.
2586 self.basedir = "mutable/Problems/test_bad_server"
2588 nm = self.g.clients[0].nodemaker
2590 # to make sure that one of the initial peers is broken, we have to
2591 # get creative. We create an RSA key and compute its storage-index.
2592 # Then we make a KeyGenerator that always returns that one key, and
2593 # use it to create the mutable file. This will get easier when we can
2594 # use #467 static-server-selection to disable permutation and force
2595 # the choice of server for share[0].
2597 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2598 def _got_key( (pubkey, privkey) ):
2599 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2600 pubkey_s = pubkey.serialize()
2601 privkey_s = privkey.serialize()
2602 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2603 ssk_pubkey_fingerprint_hash(pubkey_s))
2604 self._storage_index = u.get_storage_index()
2605 d.addCallback(_got_key)
2606 def _break_peer0(res):
2607 si = self._storage_index
2608 servers = nm.storage_broker.get_servers_for_psi(si)
2609 self.g.break_server(servers[0].get_serverid())
2610 self.server1 = servers[1]
2611 d.addCallback(_break_peer0)
2612 # now "create" the file, using the pre-established key, and let the
2613 # initial publish finally happen
2614 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2615 # that ought to work
2617 d = n.download_best_version()
2618 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2619 # now break the second peer
2620 def _break_peer1(res):
2621 self.g.break_server(self.server1.get_serverid())
2622 d.addCallback(_break_peer1)
2623 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2624 # that ought to work too
2625 d.addCallback(lambda res: n.download_best_version())
2626 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2627 def _explain_error(f):
2629 if f.check(NotEnoughServersError):
2630 print "first_error:", f.value.first_error
2632 d.addErrback(_explain_error)
2634 d.addCallback(_got_node)
2637 def test_bad_server_overlap(self):
2638 # like test_bad_server, but with no extra unused servers to fall back
2639 # upon. This means that we must re-use a server which we've already
2640 # used. If we don't remember the fact that we sent them one share
2641 # already, we'll mistakenly think we're experiencing an
2642 # UncoordinatedWriteError.
2644 # Break one server, then create the file: the initial publish should
2645 # complete with an alternate server. Breaking a second server should
2646 # not prevent an update from succeeding either.
2647 self.basedir = "mutable/Problems/test_bad_server_overlap"
2649 nm = self.g.clients[0].nodemaker
2650 sb = nm.storage_broker
2652 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2653 self.g.break_server(peerids[0])
2655 d = nm.create_mutable_file(MutableData("contents 1"))
2657 d = n.download_best_version()
2658 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2659 # now break one of the remaining servers
2660 def _break_second_server(res):
2661 self.g.break_server(peerids[1])
2662 d.addCallback(_break_second_server)
2663 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2664 # that ought to work too
2665 d.addCallback(lambda res: n.download_best_version())
2666 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2668 d.addCallback(_created)
2671 def test_publish_all_servers_bad(self):
2672 # Break all servers: the publish should fail
2673 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2675 nm = self.g.clients[0].nodemaker
2676 for s in nm.storage_broker.get_connected_servers():
2677 s.get_rref().broken = True
2679 d = self.shouldFail(NotEnoughServersError,
2680 "test_publish_all_servers_bad",
2681 "ran out of good servers",
2682 nm.create_mutable_file, MutableData("contents"))
2685 def test_publish_no_servers(self):
2686 # no servers at all: the publish should fail
2687 self.basedir = "mutable/Problems/test_publish_no_servers"
2688 self.set_up_grid(num_servers=0)
2689 nm = self.g.clients[0].nodemaker
2691 d = self.shouldFail(NotEnoughServersError,
2692 "test_publish_no_servers",
2693 "Ran out of non-bad servers",
2694 nm.create_mutable_file, MutableData("contents"))
2698 def test_privkey_query_error(self):
2699 # when a servermap is updated with MODE_WRITE, it tries to get the
2700 # privkey. Something might go wrong during this query attempt.
2701 # Exercise the code in _privkey_query_failed which tries to handle
2703 self.basedir = "mutable/Problems/test_privkey_query_error"
2704 self.set_up_grid(num_servers=20)
2705 nm = self.g.clients[0].nodemaker
2706 nm._node_cache = DevNullDictionary() # disable the nodecache
2708 # we need some contents that are large enough to push the privkey out
2709 # of the early part of the file
2710 LARGE = "These are Larger contents" * 2000 # about 50KB
2711 LARGE_uploadable = MutableData(LARGE)
2712 d = nm.create_mutable_file(LARGE_uploadable)
2714 self.uri = n.get_uri()
2715 self.n2 = nm.create_from_cap(self.uri)
2717 # When a mapupdate is performed on a node that doesn't yet know
2718 # the privkey, a short read is sent to a batch of servers, to get
2719 # the verinfo and (hopefully, if the file is short enough) the
2720 # encprivkey. Our file is too large to let this first read
2721 # contain the encprivkey. Each non-encprivkey-bearing response
2722 # that arrives (until the node gets the encprivkey) will trigger
2723 # a second read to specifically read the encprivkey.
2725 # So, to exercise this case:
2726 # 1. notice which server gets a read() call first
2727 # 2. tell that server to start throwing errors
2728 killer = FirstServerGetsKilled()
2729 for s in nm.storage_broker.get_connected_servers():
2730 s.get_rref().post_call_notifier = killer.notify
2731 d.addCallback(_created)
2733 # now we update a servermap from a new node (which doesn't have the
2734 # privkey yet, forcing it to use a separate privkey query). Note that
2735 # the map-update will succeed, since we'll just get a copy from one
2736 # of the other shares.
2737 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2741 def test_privkey_query_missing(self):
2742 # like test_privkey_query_error, but the shares are deleted by the
2743 # second query, instead of raising an exception.
2744 self.basedir = "mutable/Problems/test_privkey_query_missing"
2745 self.set_up_grid(num_servers=20)
2746 nm = self.g.clients[0].nodemaker
2747 LARGE = "These are Larger contents" * 2000 # about 50KiB
2748 LARGE_uploadable = MutableData(LARGE)
2749 nm._node_cache = DevNullDictionary() # disable the nodecache
2751 d = nm.create_mutable_file(LARGE_uploadable)
2753 self.uri = n.get_uri()
2754 self.n2 = nm.create_from_cap(self.uri)
2755 deleter = FirstServerGetsDeleted()
2756 for s in nm.storage_broker.get_connected_servers():
2757 s.get_rref().post_call_notifier = deleter.notify
2758 d.addCallback(_created)
2759 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2763 def test_block_and_hash_query_error(self):
2764 # This tests for what happens when a query to a remote server
2765 # fails in either the hash validation step or the block getting
2766 # step (because of batching, this is the same actual query).
2767 # We need to have the storage server persist up until the point
2768 # that its prefix is validated, then suddenly die. This
2769 # exercises some exception handling code in Retrieve.
2770 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2771 self.set_up_grid(num_servers=20)
2772 nm = self.g.clients[0].nodemaker
2773 CONTENTS = "contents" * 2000
2774 CONTENTS_uploadable = MutableData(CONTENTS)
2775 d = nm.create_mutable_file(CONTENTS_uploadable)
2778 d.addCallback(_created)
2779 d.addCallback(lambda ignored:
2780 self._node.get_servermap(MODE_READ))
2781 def _then(servermap):
2782 # we have our servermap. Now we set up the servers like the
2783 # tests above -- the first one that gets a read call should
2784 # start throwing errors, but only after returning its prefix
2785 # for validation. Since we'll download without fetching the
2786 # private key, the next query to the remote server will be
2787 # for either a block and salt or for hashes, either of which
2788 # will exercise the error handling code.
2789 killer = FirstServerGetsKilled()
2790 for s in nm.storage_broker.get_connected_servers():
2791 s.get_rref().post_call_notifier = killer.notify
2792 ver = servermap.best_recoverable_version()
2794 return self._node.download_version(servermap, ver)
2795 d.addCallback(_then)
2796 d.addCallback(lambda data:
2797 self.failUnlessEqual(data, CONTENTS))
2801 class FileHandle(unittest.TestCase):
2803 self.test_data = "Test Data" * 50000
2804 self.sio = StringIO(self.test_data)
2805 self.uploadable = MutableFileHandle(self.sio)
2808 def test_filehandle_read(self):
2809 self.basedir = "mutable/FileHandle/test_filehandle_read"
2811 for i in xrange(0, len(self.test_data), chunk_size):
2812 data = self.uploadable.read(chunk_size)
2813 data = "".join(data)
2815 end = i + chunk_size
2816 self.failUnlessEqual(data, self.test_data[start:end])
2819 def test_filehandle_get_size(self):
2820 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2821 actual_size = len(self.test_data)
2822 size = self.uploadable.get_size()
2823 self.failUnlessEqual(size, actual_size)
2826 def test_filehandle_get_size_out_of_order(self):
2827 # We should be able to call get_size whenever we want without
2828 # disturbing the location of the seek pointer.
2830 data = self.uploadable.read(chunk_size)
2831 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2834 size = self.uploadable.get_size()
2835 self.failUnlessEqual(size, len(self.test_data))
2837 # Now get more data. We should be right where we left off.
2838 more_data = self.uploadable.read(chunk_size)
2840 end = chunk_size * 2
2841 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2844 def test_filehandle_file(self):
2845 # Make sure that the MutableFileHandle works on a file as well
2846 # as a StringIO object, since in some cases it will be asked to
2848 self.basedir = self.mktemp()
2849 # necessary? What am I doing wrong here?
2850 os.mkdir(self.basedir)
2851 f_path = os.path.join(self.basedir, "test_file")
2852 f = open(f_path, "w")
2853 f.write(self.test_data)
2855 f = open(f_path, "r")
2857 uploadable = MutableFileHandle(f)
2859 data = uploadable.read(len(self.test_data))
2860 self.failUnlessEqual("".join(data), self.test_data)
2861 size = uploadable.get_size()
2862 self.failUnlessEqual(size, len(self.test_data))
2865 def test_close(self):
2866 # Make sure that the MutableFileHandle closes its handle when
2868 self.uploadable.close()
2869 self.failUnless(self.sio.closed)
2872 class DataHandle(unittest.TestCase):
2874 self.test_data = "Test Data" * 50000
2875 self.uploadable = MutableData(self.test_data)
2878 def test_datahandle_read(self):
2880 for i in xrange(0, len(self.test_data), chunk_size):
2881 data = self.uploadable.read(chunk_size)
2882 data = "".join(data)
2884 end = i + chunk_size
2885 self.failUnlessEqual(data, self.test_data[start:end])
2888 def test_datahandle_get_size(self):
2889 actual_size = len(self.test_data)
2890 size = self.uploadable.get_size()
2891 self.failUnlessEqual(size, actual_size)
2894 def test_datahandle_get_size_out_of_order(self):
2895 # We should be able to call get_size whenever we want without
2896 # disturbing the location of the seek pointer.
2898 data = self.uploadable.read(chunk_size)
2899 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2902 size = self.uploadable.get_size()
2903 self.failUnlessEqual(size, len(self.test_data))
2905 # Now get more data. We should be right where we left off.
2906 more_data = self.uploadable.read(chunk_size)
2908 end = chunk_size * 2
2909 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2912 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2915 GridTestMixin.setUp(self)
2916 self.basedir = self.mktemp()
2918 self.c = self.g.clients[0]
2919 self.nm = self.c.nodemaker
2920 self.data = "test data" * 100000 # about 900 KiB; MDMF
2921 self.small_data = "test data" * 10 # about 90 B; SDMF
2924 def do_upload_mdmf(self):
2925 d = self.nm.create_mutable_file(MutableData(self.data),
2926 version=MDMF_VERSION)
2928 assert isinstance(n, MutableFileNode)
2929 assert n._protocol_version == MDMF_VERSION
2932 d.addCallback(_then)
2935 def do_upload_sdmf(self):
2936 d = self.nm.create_mutable_file(MutableData(self.small_data))
2938 assert isinstance(n, MutableFileNode)
2939 assert n._protocol_version == SDMF_VERSION
2942 d.addCallback(_then)
2945 def do_upload_empty_sdmf(self):
2946 d = self.nm.create_mutable_file(MutableData(""))
2948 assert isinstance(n, MutableFileNode)
2949 self.sdmf_zero_length_node = n
2950 assert n._protocol_version == SDMF_VERSION
2952 d.addCallback(_then)
2955 def do_upload(self):
2956 d = self.do_upload_mdmf()
2957 d.addCallback(lambda ign: self.do_upload_sdmf())
2960 def test_debug(self):
2961 d = self.do_upload_mdmf()
2963 fso = debug.FindSharesOptions()
2964 storage_index = base32.b2a(n.get_storage_index())
2965 fso.si_s = storage_index
2966 fso.nodedirs = [unicode(os.path.dirname(os.path.abspath(storedir)))
2968 in self.iterate_servers()]
2969 fso.stdout = StringIO()
2970 fso.stderr = StringIO()
2971 debug.find_shares(fso)
2972 sharefiles = fso.stdout.getvalue().splitlines()
2973 expected = self.nm.default_encoding_parameters["n"]
2974 self.failUnlessEqual(len(sharefiles), expected)
2975 d.addCallback(_debug)
2978 def test_get_sequence_number(self):
2979 d = self.do_upload()
2980 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
2981 d.addCallback(lambda bv:
2982 self.failUnlessEqual(bv.get_sequence_number(), 1))
2983 d.addCallback(lambda ignored:
2984 self.sdmf_node.get_best_readable_version())
2985 d.addCallback(lambda bv:
2986 self.failUnlessEqual(bv.get_sequence_number(), 1))
2987 # Now update. The sequence number in both cases should be 1 in
2989 def _do_update(ignored):
2990 new_data = MutableData("foo bar baz" * 100000)
2991 new_small_data = MutableData("foo bar baz" * 10)
2992 d1 = self.mdmf_node.overwrite(new_data)
2993 d2 = self.sdmf_node.overwrite(new_small_data)
2994 dl = gatherResults([d1, d2])
2996 d.addCallback(_do_update)
2997 d.addCallback(lambda ignored:
2998 self.mdmf_node.get_best_readable_version())
2999 d.addCallback(lambda bv:
3000 self.failUnlessEqual(bv.get_sequence_number(), 2))
3001 d.addCallback(lambda ignored:
3002 self.sdmf_node.get_best_readable_version())
3003 d.addCallback(lambda bv:
3004 self.failUnlessEqual(bv.get_sequence_number(), 2))
3008 def test_version_extension_api(self):
3009 # We need to define an API by which an uploader can set the
3010 # extension parameters, and by which a downloader can retrieve
3012 d = self.do_upload_mdmf()
3013 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3014 def _got_version(version):
3015 hints = version.get_downloader_hints()
3016 # Should be empty at this point.
3017 self.failUnlessIn("k", hints)
3018 self.failUnlessEqual(hints['k'], 3)
3019 self.failUnlessIn('segsize', hints)
3020 self.failUnlessEqual(hints['segsize'], 131073)
3021 d.addCallback(_got_version)
3025 def test_extensions_from_cap(self):
3026 # If we initialize a mutable file with a cap that has extension
3027 # parameters in it and then grab the extension parameters using
3028 # our API, we should see that they're set correctly.
3029 d = self.do_upload_mdmf()
3031 mdmf_uri = self.mdmf_node.get_uri()
3032 new_node = self.nm.create_from_cap(mdmf_uri)
3033 return new_node.get_best_mutable_version()
3034 d.addCallback(_then)
3035 def _got_version(version):
3036 hints = version.get_downloader_hints()
3037 self.failUnlessIn("k", hints)
3038 self.failUnlessEqual(hints["k"], 3)
3039 self.failUnlessIn("segsize", hints)
3040 self.failUnlessEqual(hints["segsize"], 131073)
3041 d.addCallback(_got_version)
3045 def test_extensions_from_upload(self):
3046 # If we create a new mutable file with some contents, we should
3047 # get back an MDMF cap with the right hints in place.
3048 contents = "foo bar baz" * 100000
3049 d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3050 def _got_mutable_file(n):
3051 rw_uri = n.get_uri()
3052 expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3053 self.failUnlessIn(expected_k, rw_uri)
3054 # XXX: Get this more intelligently.
3055 self.failUnlessIn("131073", rw_uri)
3057 ro_uri = n.get_readonly_uri()
3058 self.failUnlessIn(expected_k, ro_uri)
3059 self.failUnlessIn("131073", ro_uri)
3060 d.addCallback(_got_mutable_file)
3064 def test_cap_after_upload(self):
3065 # If we create a new mutable file and upload things to it, and
3066 # it's an MDMF file, we should get an MDMF cap back from that
3067 # file and should be able to use that.
3068 # That's essentially what MDMF node is, so just check that.
3069 d = self.do_upload_mdmf()
3071 mdmf_uri = self.mdmf_node.get_uri()
3072 cap = uri.from_string(mdmf_uri)
3073 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3074 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3075 cap = uri.from_string(readonly_mdmf_uri)
3076 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3077 d.addCallback(_then)
3080 def test_mutable_version(self):
3081 # assert that getting parameters from the IMutableVersion object
3082 # gives us the same data as getting them from the filenode itself
3083 d = self.do_upload()
3084 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3085 def _check_mdmf(bv):
3087 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3088 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3089 self.failIf(bv.is_readonly())
3090 d.addCallback(_check_mdmf)
3091 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3092 def _check_sdmf(bv):
3094 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3095 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3096 self.failIf(bv.is_readonly())
3097 d.addCallback(_check_sdmf)
3101 def test_get_readonly_version(self):
3102 d = self.do_upload()
3103 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3104 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3106 # Attempting to get a mutable version of a mutable file from a
3107 # filenode initialized with a readcap should return a readonly
3108 # version of that same node.
3109 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3110 d.addCallback(lambda ro: ro.get_best_mutable_version())
3111 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3113 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3114 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3116 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3117 d.addCallback(lambda ro: ro.get_best_mutable_version())
3118 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3122 def test_toplevel_overwrite(self):
3123 new_data = MutableData("foo bar baz" * 100000)
3124 new_small_data = MutableData("foo bar baz" * 10)
3125 d = self.do_upload()
3126 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3127 d.addCallback(lambda ignored:
3128 self.mdmf_node.download_best_version())
3129 d.addCallback(lambda data:
3130 self.failUnlessEqual(data, "foo bar baz" * 100000))
3131 d.addCallback(lambda ignored:
3132 self.sdmf_node.overwrite(new_small_data))
3133 d.addCallback(lambda ignored:
3134 self.sdmf_node.download_best_version())
3135 d.addCallback(lambda data:
3136 self.failUnlessEqual(data, "foo bar baz" * 10))
3140 def test_toplevel_modify(self):
3141 d = self.do_upload()
3142 def modifier(old_contents, servermap, first_time):
3143 return old_contents + "modified"
3144 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3145 d.addCallback(lambda ignored:
3146 self.mdmf_node.download_best_version())
3147 d.addCallback(lambda data:
3148 self.failUnlessIn("modified", data))
3149 d.addCallback(lambda ignored:
3150 self.sdmf_node.modify(modifier))
3151 d.addCallback(lambda ignored:
3152 self.sdmf_node.download_best_version())
3153 d.addCallback(lambda data:
3154 self.failUnlessIn("modified", data))
3158 def test_version_modify(self):
3159 # TODO: When we can publish multiple versions, alter this test
3160 # to modify a version other than the best usable version, then
3161 # test to see that the best recoverable version is that.
3162 d = self.do_upload()
3163 def modifier(old_contents, servermap, first_time):
3164 return old_contents + "modified"
3165 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3166 d.addCallback(lambda ignored:
3167 self.mdmf_node.download_best_version())
3168 d.addCallback(lambda data:
3169 self.failUnlessIn("modified", data))
3170 d.addCallback(lambda ignored:
3171 self.sdmf_node.modify(modifier))
3172 d.addCallback(lambda ignored:
3173 self.sdmf_node.download_best_version())
3174 d.addCallback(lambda data:
3175 self.failUnlessIn("modified", data))
3179 def test_download_version(self):
3180 d = self.publish_multiple()
3181 # We want to have two recoverable versions on the grid.
3182 d.addCallback(lambda res:
3183 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3184 1:1,3:1,5:1,7:1,9:1}))
3185 # Now try to download each version. We should get the plaintext
3186 # associated with that version.
3187 d.addCallback(lambda ignored:
3188 self._fn.get_servermap(mode=MODE_READ))
3189 def _got_servermap(smap):
3190 versions = smap.recoverable_versions()
3191 assert len(versions) == 2
3193 self.servermap = smap
3194 self.version1, self.version2 = versions
3195 assert self.version1 != self.version2
3197 self.version1_seqnum = self.version1[0]
3198 self.version2_seqnum = self.version2[0]
3199 self.version1_index = self.version1_seqnum - 1
3200 self.version2_index = self.version2_seqnum - 1
3202 d.addCallback(_got_servermap)
3203 d.addCallback(lambda ignored:
3204 self._fn.download_version(self.servermap, self.version1))
3205 d.addCallback(lambda results:
3206 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3208 d.addCallback(lambda ignored:
3209 self._fn.download_version(self.servermap, self.version2))
3210 d.addCallback(lambda results:
3211 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3216 def test_download_nonexistent_version(self):
3217 d = self.do_upload_mdmf()
3218 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3219 def _set_servermap(servermap):
3220 self.servermap = servermap
3221 d.addCallback(_set_servermap)
3222 d.addCallback(lambda ignored:
3223 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3225 self.mdmf_node.download_version, self.servermap,
3230 def test_partial_read(self):
3231 # read only a few bytes at a time, and see that the results are
3233 d = self.do_upload_mdmf()
3234 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3235 def _read_data(version):
3236 c = consumer.MemoryConsumer()
3237 d2 = defer.succeed(None)
3238 for i in xrange(0, len(self.data), 10000):
3239 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3240 d2.addCallback(lambda ignored:
3241 self.failUnlessEqual(self.data, "".join(c.chunks)))
3243 d.addCallback(_read_data)
3247 def _test_partial_read(self, offset, length):
3248 d = self.do_upload_mdmf()
3249 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3250 c = consumer.MemoryConsumer()
3251 d.addCallback(lambda version:
3252 version.read(c, offset, length))
3253 expected = self.data[offset:offset+length]
3254 d.addCallback(lambda ignored: "".join(c.chunks))
3255 def _check(results):
3256 if results != expected:
3258 print "got: %s ... %s" % (results[:20], results[-20:])
3259 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3260 self.fail("results != expected")
3261 d.addCallback(_check)
3264 def test_partial_read_starting_on_segment_boundary(self):
3265 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 50)
3267 def test_partial_read_ending_one_byte_after_segment_boundary(self):
3268 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3)-50, 51)
3270 def test_partial_read_zero_length_at_start(self):
3271 return self._test_partial_read(0, 0)
3273 def test_partial_read_zero_length_in_middle(self):
3274 return self._test_partial_read(50, 0)
3276 def test_partial_read_zero_length_at_segment_boundary(self):
3277 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 0)
3279 # XXX factor these into a single upload after they pass
3280 _broken = "zero-length reads of mutable files don't work"
3281 test_partial_read_zero_length_at_start.todo = _broken
3282 test_partial_read_zero_length_in_middle.todo = _broken
3283 test_partial_read_zero_length_at_segment_boundary.todo = _broken
3285 def _test_read_and_download(self, node, expected):
3286 d = node.get_best_readable_version()
3287 def _read_data(version):
3288 c = consumer.MemoryConsumer()
3289 d2 = defer.succeed(None)
3290 d2.addCallback(lambda ignored: version.read(c))
3291 d2.addCallback(lambda ignored:
3292 self.failUnlessEqual(expected, "".join(c.chunks)))
3294 d.addCallback(_read_data)
3295 d.addCallback(lambda ignored: node.download_best_version())
3296 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3299 def test_read_and_download_mdmf(self):
3300 d = self.do_upload_mdmf()
3301 d.addCallback(self._test_read_and_download, self.data)
3304 def test_read_and_download_sdmf(self):
3305 d = self.do_upload_sdmf()
3306 d.addCallback(self._test_read_and_download, self.small_data)
3309 def test_read_and_download_sdmf_zero_length(self):
3310 d = self.do_upload_empty_sdmf()
3311 d.addCallback(self._test_read_and_download, "")
3315 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3316 timeout = 400 # these tests are too big, 120s is not enough on slow
3319 GridTestMixin.setUp(self)
3320 self.basedir = self.mktemp()
3322 self.c = self.g.clients[0]
3323 self.nm = self.c.nodemaker
3324 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3325 self.small_data = "test data" * 10 # about 90 B; SDMF
3326 return self.do_upload()
3329 def do_upload(self):
3330 d1 = self.nm.create_mutable_file(MutableData(self.data),
3331 version=MDMF_VERSION)
3332 d2 = self.nm.create_mutable_file(MutableData(self.small_data))
3333 dl = gatherResults([d1, d2])
3334 def _then((n1, n2)):
3335 assert isinstance(n1, MutableFileNode)
3336 assert isinstance(n2, MutableFileNode)
3340 dl.addCallback(_then)
3341 # Make SDMF and MDMF mutable file nodes that have 255 shares.
3342 def _make_max_shares(ign):
3343 self.nm.default_encoding_parameters['n'] = 255
3344 self.nm.default_encoding_parameters['k'] = 127
3345 d1 = self.nm.create_mutable_file(MutableData(self.data),
3346 version=MDMF_VERSION)
3348 self.nm.create_mutable_file(MutableData(self.small_data))
3349 return gatherResults([d1, d2])
3350 dl.addCallback(_make_max_shares)
3351 def _stash((n1, n2)):
3352 assert isinstance(n1, MutableFileNode)
3353 assert isinstance(n2, MutableFileNode)
3355 self.mdmf_max_shares_node = n1
3356 self.sdmf_max_shares_node = n2
3357 dl.addCallback(_stash)
3361 def _test_replace(self, offset, new_data):
3362 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3363 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3364 d = node.get_best_mutable_version()
3365 d.addCallback(lambda mv:
3366 mv.update(MutableData(new_data), offset))
3367 # close around node.
3368 d.addCallback(lambda ignored, node=node:
3369 node.download_best_version())
3370 def _check(results):
3371 if results != expected:
3373 print "got: %s ... %s" % (results[:20], results[-20:])
3374 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3375 self.fail("results != expected")
3376 d.addCallback(_check)
3379 def test_append(self):
3380 # We should be able to append data to a mutable file and get
3382 return self._test_replace(len(self.data), "appended")
3384 def test_replace_middle(self):
3385 # We should be able to replace data in the middle of a mutable
3386 # file and get what we expect back.
3387 return self._test_replace(100, "replaced")
3389 def test_replace_beginning(self):
3390 # We should be able to replace data at the beginning of the file
3391 # without truncating the file
3392 return self._test_replace(0, "beginning")
3394 def test_replace_segstart1(self):
3395 return self._test_replace(128*1024+1, "NNNN")
3397 def test_replace_zero_length_beginning(self):
3398 return self._test_replace(0, "")
3400 def test_replace_zero_length_middle(self):
3401 return self._test_replace(50, "")
3403 def test_replace_zero_length_segstart1(self):
3404 return self._test_replace(128*1024+1, "")
3406 def test_replace_and_extend(self):
3407 # We should be able to replace data in the middle of a mutable
3408 # file and extend that mutable file and get what we expect.
3409 return self._test_replace(100, "modified " * 100000)
3412 def _check_differences(self, got, expected):
3413 # displaying arbitrary file corruption is tricky for a
3414 # 1MB file of repeating data,, so look for likely places
3415 # with problems and display them separately
3416 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3417 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3418 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3419 for (start,end) in gotmods]
3420 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3421 for (start,end) in expmods]
3422 #print "expecting: %s" % expspans
3426 print "differences:"
3427 for segnum in range(len(expected)//SEGSIZE):
3428 start = segnum * SEGSIZE
3429 end = (segnum+1) * SEGSIZE
3430 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3431 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3432 if got_ends != exp_ends:
3433 print "expected[%d]: %s" % (start, exp_ends)
3434 print "got [%d]: %s" % (start, got_ends)
3435 if expspans != gotspans:
3436 print "expected: %s" % expspans
3437 print "got : %s" % gotspans
3438 open("EXPECTED","wb").write(expected)
3439 open("GOT","wb").write(got)
3440 print "wrote data to EXPECTED and GOT"
3441 self.fail("didn't get expected data")
3444 def test_replace_locations(self):
3445 # exercise fencepost conditions
3446 expected = self.data
3448 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3449 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3450 d = defer.succeed(None)
3451 for offset in suspects:
3452 new_data = letters.next()*2 # "AA", then "BB", etc
3453 expected = expected[:offset]+new_data+expected[offset+2:]
3454 d.addCallback(lambda ign:
3455 self.mdmf_node.get_best_mutable_version())
3456 def _modify(mv, offset=offset, new_data=new_data):
3457 # close over 'offset','new_data'
3458 md = MutableData(new_data)
3459 return mv.update(md, offset)
3460 d.addCallback(_modify)
3461 d.addCallback(lambda ignored:
3462 self.mdmf_node.download_best_version())
3463 d.addCallback(self._check_differences, expected)
3466 def test_replace_locations_max_shares(self):
3467 # exercise fencepost conditions
3468 expected = self.data
3470 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3471 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3472 d = defer.succeed(None)
3473 for offset in suspects:
3474 new_data = letters.next()*2 # "AA", then "BB", etc
3475 expected = expected[:offset]+new_data+expected[offset+2:]
3476 d.addCallback(lambda ign:
3477 self.mdmf_max_shares_node.get_best_mutable_version())
3478 def _modify(mv, offset=offset, new_data=new_data):
3479 # close over 'offset','new_data'
3480 md = MutableData(new_data)
3481 return mv.update(md, offset)
3482 d.addCallback(_modify)
3483 d.addCallback(lambda ignored:
3484 self.mdmf_max_shares_node.download_best_version())
3485 d.addCallback(self._check_differences, expected)
3489 def test_append_power_of_two(self):
3490 # If we attempt to extend a mutable file so that its segment
3491 # count crosses a power-of-two boundary, the update operation
3492 # should know how to reencode the file.
3494 # Note that the data populating self.mdmf_node is about 900 KiB
3495 # long -- this is 7 segments in the default segment size. So we
3496 # need to add 2 segments worth of data to push it over a
3497 # power-of-two boundary.
3498 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3499 new_data = self.data + (segment * 2)
3500 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3501 d = node.get_best_mutable_version()
3502 d.addCallback(lambda mv:
3503 mv.update(MutableData(segment * 2), len(self.data)))
3504 d.addCallback(lambda ignored, node=node:
3505 node.download_best_version())
3506 d.addCallback(lambda results:
3507 self.failUnlessEqual(results, new_data))
3511 def test_update_sdmf(self):
3512 # Running update on a single-segment file should still work.
3513 new_data = self.small_data + "appended"
3514 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3515 d = node.get_best_mutable_version()
3516 d.addCallback(lambda mv:
3517 mv.update(MutableData("appended"), len(self.small_data)))
3518 d.addCallback(lambda ignored, node=node:
3519 node.download_best_version())
3520 d.addCallback(lambda results:
3521 self.failUnlessEqual(results, new_data))
3524 def test_replace_in_last_segment(self):
3525 # The wrapper should know how to handle the tail segment
3527 replace_offset = len(self.data) - 100
3528 new_data = self.data[:replace_offset] + "replaced"
3529 rest_offset = replace_offset + len("replaced")
3530 new_data += self.data[rest_offset:]
3531 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3532 d = node.get_best_mutable_version()
3533 d.addCallback(lambda mv:
3534 mv.update(MutableData("replaced"), replace_offset))
3535 d.addCallback(lambda ignored, node=node:
3536 node.download_best_version())
3537 d.addCallback(lambda results:
3538 self.failUnlessEqual(results, new_data))
3542 def test_multiple_segment_replace(self):
3543 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3544 new_data = self.data[:replace_offset]
3545 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3546 new_data += 2 * new_segment
3547 new_data += "replaced"
3548 rest_offset = len(new_data)
3549 new_data += self.data[rest_offset:]
3550 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3551 d = node.get_best_mutable_version()
3552 d.addCallback(lambda mv:
3553 mv.update(MutableData((2 * new_segment) + "replaced"),
3555 d.addCallback(lambda ignored, node=node:
3556 node.download_best_version())
3557 d.addCallback(lambda results:
3558 self.failUnlessEqual(results, new_data))
3561 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3562 sdmf_old_shares = {}
3563 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3564 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3565 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3566 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3567 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3568 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3569 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3570 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3571 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3572 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3573 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3574 sdmf_old_contents = "This is a test file.\n"
3575 def copy_sdmf_shares(self):
3576 # We'll basically be short-circuiting the upload process.
3577 servernums = self.g.servers_by_number.keys()
3578 assert len(servernums) == 10
3580 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3581 # Get the storage index.
3582 cap = uri.from_string(self.sdmf_old_cap)
3583 si = cap.get_storage_index()
3585 # Now execute each assignment by writing the storage.
3586 for (share, servernum) in assignments:
3587 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3588 storedir = self.get_serverdir(servernum)
3589 storage_path = os.path.join(storedir, "shares",
3590 storage_index_to_dir(si))
3591 fileutil.make_dirs(storage_path)
3592 fileutil.write(os.path.join(storage_path, "%d" % share),
3594 # ...and verify that the shares are there.
3595 shares = self.find_uri_shares(self.sdmf_old_cap)
3596 assert len(shares) == 10
3598 def test_new_downloader_can_read_old_shares(self):
3599 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3601 self.copy_sdmf_shares()
3602 nm = self.g.clients[0].nodemaker
3603 n = nm.create_from_cap(self.sdmf_old_cap)
3604 d = n.download_best_version()
3605 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3608 class DifferentEncoding(unittest.TestCase):
3610 self._storage = s = FakeStorage()
3611 self.nodemaker = make_nodemaker(s)
3613 def test_filenode(self):
3614 # create a file with 3-of-20, then modify it with a client configured
3615 # to do 3-of-10. #1510 tracks a failure here
3616 self.nodemaker.default_encoding_parameters["n"] = 20
3617 d = self.nodemaker.create_mutable_file("old contents")
3619 filecap = n.get_cap().to_string()
3620 del n # we want a new object, not the cached one
3621 self.nodemaker.default_encoding_parameters["n"] = 10
3622 n2 = self.nodemaker.create_from_cap(filecap)
3624 d.addCallback(_created)
3625 def modifier(old_contents, servermap, first_time):
3626 return "new contents"
3627 d.addCallback(lambda n: n.modify(modifier))