3 from cStringIO import StringIO
4 from twisted.trial import unittest
5 from twisted.internet import defer, reactor
6 from twisted.internet.interfaces import IConsumer
7 from zope.interface import implements
8 from allmydata import uri, client
9 from allmydata.nodemaker import NodeMaker
10 from allmydata.util import base32, consumer, fileutil, mathutil
11 from allmydata.util.hashutil import tagged_hash, ssk_writekey_hash, \
12 ssk_pubkey_fingerprint_hash
13 from allmydata.util.deferredutil import gatherResults
14 from allmydata.interfaces import IRepairResults, ICheckAndRepairResults, \
15 NotEnoughSharesError, SDMF_VERSION, MDMF_VERSION
16 from allmydata.monitor import Monitor
17 from allmydata.test.common import ShouldFailMixin
18 from allmydata.test.no_network import GridTestMixin
19 from foolscap.api import eventually, fireEventually
20 from foolscap.logging import log
21 from allmydata.storage_client import StorageFarmBroker
22 from allmydata.storage.common import storage_index_to_dir
24 from allmydata.mutable.filenode import MutableFileNode, BackoffAgent
25 from allmydata.mutable.common import ResponseCache, \
26 MODE_CHECK, MODE_ANYTHING, MODE_WRITE, MODE_READ, \
27 NeedMoreDataError, UnrecoverableFileError, UncoordinatedWriteError, \
28 NotEnoughServersError, CorruptShareError
29 from allmydata.mutable.retrieve import Retrieve
30 from allmydata.mutable.publish import Publish, MutableFileHandle, \
32 DEFAULT_MAX_SEGMENT_SIZE
33 from allmydata.mutable.servermap import ServerMap, ServermapUpdater
34 from allmydata.mutable.layout import unpack_header, MDMFSlotReadProxy
35 from allmydata.mutable.repairer import MustForceRepairError
37 import allmydata.test.common_util as testutil
38 from allmydata.test.common import TEST_RSA_KEY_SIZE
41 # this "FakeStorage" exists to put the share data in RAM and avoid using real
42 # network connections, both to speed up the tests and to reduce the amount of
43 # non-mutable.py code being exercised.
46 # this class replaces the collection of storage servers, allowing the
47 # tests to examine and manipulate the published shares. It also lets us
48 # control the order in which read queries are answered, to exercise more
49 # of the error-handling code in Retrieve .
51 # Note that we ignore the storage index: this FakeStorage instance can
52 # only be used for a single storage index.
57 # _sequence is used to cause the responses to occur in a specific
58 # order. If it is in use, then we will defer queries instead of
59 # answering them right away, accumulating the Deferreds in a dict. We
60 # don't know exactly how many queries we'll get, so exactly one
61 # second after the first query arrives, we will release them all (in
65 self._pending_timer = None
67 def read(self, peerid, storage_index):
68 shares = self._peers.get(peerid, {})
69 if self._sequence is None:
70 return defer.succeed(shares)
73 self._pending_timer = reactor.callLater(1.0, self._fire_readers)
74 self._pending[peerid] = (d, shares)
77 def _fire_readers(self):
78 self._pending_timer = None
79 pending = self._pending
81 for peerid in self._sequence:
83 d, shares = pending.pop(peerid)
84 eventually(d.callback, shares)
85 for (d, shares) in pending.values():
86 eventually(d.callback, shares)
88 def write(self, peerid, storage_index, shnum, offset, data):
89 if peerid not in self._peers:
90 self._peers[peerid] = {}
91 shares = self._peers[peerid]
93 f.write(shares.get(shnum, ""))
96 shares[shnum] = f.getvalue()
99 class FakeStorageServer:
100 def __init__(self, peerid, storage):
102 self.storage = storage
104 def callRemote(self, methname, *args, **kwargs):
107 meth = getattr(self, methname)
108 return meth(*args, **kwargs)
110 d.addCallback(lambda res: _call())
113 def callRemoteOnly(self, methname, *args, **kwargs):
115 d = self.callRemote(methname, *args, **kwargs)
116 d.addBoth(lambda ignore: None)
119 def advise_corrupt_share(self, share_type, storage_index, shnum, reason):
122 def slot_readv(self, storage_index, shnums, readv):
123 d = self.storage.read(self.peerid, storage_index)
127 if shnums and shnum not in shnums:
129 vector = response[shnum] = []
130 for (offset, length) in readv:
131 assert isinstance(offset, (int, long)), offset
132 assert isinstance(length, (int, long)), length
133 vector.append(shares[shnum][offset:offset+length])
138 def slot_testv_and_readv_and_writev(self, storage_index, secrets,
139 tw_vectors, read_vector):
140 # always-pass: parrot the test vectors back to them.
142 for shnum, (testv, writev, new_length) in tw_vectors.items():
143 for (offset, length, op, specimen) in testv:
144 assert op in ("le", "eq", "ge")
145 # TODO: this isn't right, the read is controlled by read_vector,
147 readv[shnum] = [ specimen
148 for (offset, length, op, specimen)
150 for (offset, data) in writev:
151 self.storage.write(self.peerid, storage_index, shnum,
153 answer = (True, readv)
154 return fireEventually(answer)
157 def flip_bit(original, byte_offset):
158 return (original[:byte_offset] +
159 chr(ord(original[byte_offset]) ^ 0x01) +
160 original[byte_offset+1:])
162 def add_two(original, byte_offset):
163 # It isn't enough to simply flip the bit for the version number,
164 # because 1 is a valid version number. So we add two instead.
165 return (original[:byte_offset] +
166 chr(ord(original[byte_offset]) ^ 0x02) +
167 original[byte_offset+1:])
169 def corrupt(res, s, offset, shnums_to_corrupt=None, offset_offset=0):
170 # if shnums_to_corrupt is None, corrupt all shares. Otherwise it is a
171 # list of shnums to corrupt.
173 for peerid in s._peers:
174 shares = s._peers[peerid]
176 if (shnums_to_corrupt is not None
177 and shnum not in shnums_to_corrupt):
180 # We're feeding the reader all of the share data, so it
181 # won't need to use the rref that we didn't provide, nor the
182 # storage index that we didn't provide. We do this because
183 # the reader will work for both MDMF and SDMF.
184 reader = MDMFSlotReadProxy(None, None, shnum, data)
185 # We need to get the offsets for the next part.
186 d = reader.get_verinfo()
187 def _do_corruption(verinfo, data, shnum):
193 k, n, prefix, o) = verinfo
194 if isinstance(offset, tuple):
195 offset1, offset2 = offset
199 if offset1 == "pubkey" and IV:
202 real_offset = o[offset1]
204 real_offset = offset1
205 real_offset = int(real_offset) + offset2 + offset_offset
206 assert isinstance(real_offset, int), offset
207 if offset1 == 0: # verbyte
211 shares[shnum] = f(data, real_offset)
212 d.addCallback(_do_corruption, data, shnum)
214 dl = defer.DeferredList(ds)
215 dl.addCallback(lambda ignored: res)
218 def make_storagebroker(s=None, num_peers=10):
221 peerids = [tagged_hash("peerid", "%d" % i)[:20]
222 for i in range(num_peers)]
223 storage_broker = StorageFarmBroker(None, True)
224 for peerid in peerids:
225 fss = FakeStorageServer(peerid, s)
226 storage_broker.test_add_rref(peerid, fss)
227 return storage_broker
229 def make_nodemaker(s=None, num_peers=10):
230 storage_broker = make_storagebroker(s, num_peers)
231 sh = client.SecretHolder("lease secret", "convergence secret")
232 keygen = client.KeyGenerator()
233 keygen.set_default_keysize(TEST_RSA_KEY_SIZE)
234 nodemaker = NodeMaker(storage_broker, sh, None,
236 {"k": 3, "n": 10}, keygen)
239 class Filenode(unittest.TestCase, testutil.ShouldFailMixin):
240 # this used to be in Publish, but we removed the limit. Some of
241 # these tests test whether the new code correctly allows files
242 # larger than the limit.
243 OLD_MAX_SEGMENT_SIZE = 3500000
245 self._storage = s = FakeStorage()
246 self.nodemaker = make_nodemaker(s)
248 def test_create(self):
249 d = self.nodemaker.create_mutable_file()
251 self.failUnless(isinstance(n, MutableFileNode))
252 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
253 sb = self.nodemaker.storage_broker
254 peer0 = sorted(sb.get_all_serverids())[0]
255 shnums = self._storage._peers[peer0].keys()
256 self.failUnlessEqual(len(shnums), 1)
257 d.addCallback(_created)
261 def test_create_mdmf(self):
262 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
264 self.failUnless(isinstance(n, MutableFileNode))
265 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
266 sb = self.nodemaker.storage_broker
267 peer0 = sorted(sb.get_all_serverids())[0]
268 shnums = self._storage._peers[peer0].keys()
269 self.failUnlessEqual(len(shnums), 1)
270 d.addCallback(_created)
273 def test_single_share(self):
274 # Make sure that we tolerate publishing a single share.
275 self.nodemaker.default_encoding_parameters['k'] = 1
276 self.nodemaker.default_encoding_parameters['happy'] = 1
277 self.nodemaker.default_encoding_parameters['n'] = 1
278 d = defer.succeed(None)
279 for v in (SDMF_VERSION, MDMF_VERSION):
280 d.addCallback(lambda ignored:
281 self.nodemaker.create_mutable_file(version=v))
283 self.failUnless(isinstance(n, MutableFileNode))
286 d.addCallback(_created)
287 d.addCallback(lambda n:
288 n.overwrite(MutableData("Contents" * 50000)))
289 d.addCallback(lambda ignored:
290 self._node.download_best_version())
291 d.addCallback(lambda contents:
292 self.failUnlessEqual(contents, "Contents" * 50000))
295 def test_max_shares(self):
296 self.nodemaker.default_encoding_parameters['n'] = 255
297 d = self.nodemaker.create_mutable_file(version=SDMF_VERSION)
299 self.failUnless(isinstance(n, MutableFileNode))
300 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
301 sb = self.nodemaker.storage_broker
302 num_shares = sum([len(self._storage._peers[x].keys()) for x \
303 in sb.get_all_serverids()])
304 self.failUnlessEqual(num_shares, 255)
307 d.addCallback(_created)
308 # Now we upload some contents
309 d.addCallback(lambda n:
310 n.overwrite(MutableData("contents" * 50000)))
311 # ...then download contents
312 d.addCallback(lambda ignored:
313 self._node.download_best_version())
314 # ...and check to make sure everything went okay.
315 d.addCallback(lambda contents:
316 self.failUnlessEqual("contents" * 50000, contents))
319 def test_max_shares_mdmf(self):
320 # Test how files behave when there are 255 shares.
321 self.nodemaker.default_encoding_parameters['n'] = 255
322 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
324 self.failUnless(isinstance(n, MutableFileNode))
325 self.failUnlessEqual(n.get_storage_index(), n._storage_index)
326 sb = self.nodemaker.storage_broker
327 num_shares = sum([len(self._storage._peers[x].keys()) for x \
328 in sb.get_all_serverids()])
329 self.failUnlessEqual(num_shares, 255)
332 d.addCallback(_created)
333 d.addCallback(lambda n:
334 n.overwrite(MutableData("contents" * 50000)))
335 d.addCallback(lambda ignored:
336 self._node.download_best_version())
337 d.addCallback(lambda contents:
338 self.failUnlessEqual(contents, "contents" * 50000))
341 def test_mdmf_filenode_cap(self):
342 # Test that an MDMF filenode, once created, returns an MDMF URI.
343 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
345 self.failUnless(isinstance(n, MutableFileNode))
347 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
348 rcap = n.get_readcap()
349 self.failUnless(isinstance(rcap, uri.ReadonlyMDMFFileURI))
350 vcap = n.get_verify_cap()
351 self.failUnless(isinstance(vcap, uri.MDMFVerifierURI))
352 d.addCallback(_created)
356 def test_create_from_mdmf_writecap(self):
357 # Test that the nodemaker is capable of creating an MDMF
358 # filenode given an MDMF cap.
359 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
361 self.failUnless(isinstance(n, MutableFileNode))
363 self.failUnless(s.startswith("URI:MDMF"))
364 n2 = self.nodemaker.create_from_cap(s)
365 self.failUnless(isinstance(n2, MutableFileNode))
366 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
367 self.failUnlessEqual(n.get_uri(), n2.get_uri())
368 d.addCallback(_created)
372 def test_create_from_mdmf_writecap_with_extensions(self):
373 # Test that the nodemaker is capable of creating an MDMF
374 # filenode when given a writecap with extension parameters in
376 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
378 self.failUnless(isinstance(n, MutableFileNode))
380 # We need to cheat a little and delete the nodemaker's
381 # cache, otherwise we'll get the same node instance back.
382 self.failUnlessIn(":3:131073", s)
383 n2 = self.nodemaker.create_from_cap(s)
385 self.failUnlessEqual(n2.get_storage_index(), n.get_storage_index())
386 self.failUnlessEqual(n.get_writekey(), n2.get_writekey())
387 hints = n2._downloader_hints
388 self.failUnlessEqual(hints['k'], 3)
389 self.failUnlessEqual(hints['segsize'], 131073)
390 d.addCallback(_created)
394 def test_create_from_mdmf_readcap(self):
395 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
397 self.failUnless(isinstance(n, MutableFileNode))
398 s = n.get_readonly_uri()
399 n2 = self.nodemaker.create_from_cap(s)
400 self.failUnless(isinstance(n2, MutableFileNode))
402 # Check that it's a readonly node
403 self.failUnless(n2.is_readonly())
404 d.addCallback(_created)
408 def test_create_from_mdmf_readcap_with_extensions(self):
409 # We should be able to create an MDMF filenode with the
410 # extension parameters without it breaking.
411 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
413 self.failUnless(isinstance(n, MutableFileNode))
414 s = n.get_readonly_uri()
415 self.failUnlessIn(":3:131073", s)
417 n2 = self.nodemaker.create_from_cap(s)
418 self.failUnless(isinstance(n2, MutableFileNode))
419 self.failUnless(n2.is_readonly())
420 self.failUnlessEqual(n.get_storage_index(), n2.get_storage_index())
421 hints = n2._downloader_hints
422 self.failUnlessEqual(hints["k"], 3)
423 self.failUnlessEqual(hints["segsize"], 131073)
424 d.addCallback(_created)
428 def test_internal_version_from_cap(self):
429 # MutableFileNodes and MutableFileVersions have an internal
430 # switch that tells them whether they're dealing with an SDMF or
431 # MDMF mutable file when they start doing stuff. We want to make
432 # sure that this is set appropriately given an MDMF cap.
433 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
435 self.uri = n.get_uri()
436 self.failUnlessEqual(n._protocol_version, MDMF_VERSION)
438 n2 = self.nodemaker.create_from_cap(self.uri)
439 self.failUnlessEqual(n2._protocol_version, MDMF_VERSION)
440 d.addCallback(_created)
444 def test_serialize(self):
445 n = MutableFileNode(None, None, {"k": 3, "n": 10}, None)
447 def _callback(*args, **kwargs):
448 self.failUnlessEqual(args, (4,) )
449 self.failUnlessEqual(kwargs, {"foo": 5})
452 d = n._do_serialized(_callback, 4, foo=5)
453 def _check_callback(res):
454 self.failUnlessEqual(res, 6)
455 self.failUnlessEqual(calls, [1])
456 d.addCallback(_check_callback)
459 raise ValueError("heya")
460 d.addCallback(lambda res:
461 self.shouldFail(ValueError, "_check_errback", "heya",
462 n._do_serialized, _errback))
465 def test_upload_and_download(self):
466 d = self.nodemaker.create_mutable_file()
468 d = defer.succeed(None)
469 d.addCallback(lambda res: n.get_servermap(MODE_READ))
470 d.addCallback(lambda smap: smap.dump(StringIO()))
471 d.addCallback(lambda sio:
472 self.failUnless("3-of-10" in sio.getvalue()))
473 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
474 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
475 d.addCallback(lambda res: n.download_best_version())
476 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
477 d.addCallback(lambda res: n.get_size_of_best_version())
478 d.addCallback(lambda size:
479 self.failUnlessEqual(size, len("contents 1")))
480 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
481 d.addCallback(lambda res: n.download_best_version())
482 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
483 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
484 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
485 d.addCallback(lambda res: n.download_best_version())
486 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
487 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
488 d.addCallback(lambda smap:
489 n.download_version(smap,
490 smap.best_recoverable_version()))
491 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
492 # test a file that is large enough to overcome the
493 # mapupdate-to-retrieve data caching (i.e. make the shares larger
494 # than the default readsize, which is 2000 bytes). A 15kB file
495 # will have 5kB shares.
496 d.addCallback(lambda res: n.overwrite(MutableData("large size file" * 1000)))
497 d.addCallback(lambda res: n.download_best_version())
498 d.addCallback(lambda res:
499 self.failUnlessEqual(res, "large size file" * 1000))
501 d.addCallback(_created)
505 def test_upload_and_download_mdmf(self):
506 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
508 d = defer.succeed(None)
509 d.addCallback(lambda ignored:
510 n.get_servermap(MODE_READ))
511 def _then(servermap):
512 dumped = servermap.dump(StringIO())
513 self.failUnlessIn("3-of-10", dumped.getvalue())
515 # Now overwrite the contents with some new contents. We want
516 # to make them big enough to force the file to be uploaded
517 # in more than one segment.
518 big_contents = "contents1" * 100000 # about 900 KiB
519 big_contents_uploadable = MutableData(big_contents)
520 d.addCallback(lambda ignored:
521 n.overwrite(big_contents_uploadable))
522 d.addCallback(lambda ignored:
523 n.download_best_version())
524 d.addCallback(lambda data:
525 self.failUnlessEqual(data, big_contents))
526 # Overwrite the contents again with some new contents. As
527 # before, they need to be big enough to force multiple
528 # segments, so that we make the downloader deal with
530 bigger_contents = "contents2" * 1000000 # about 9MiB
531 bigger_contents_uploadable = MutableData(bigger_contents)
532 d.addCallback(lambda ignored:
533 n.overwrite(bigger_contents_uploadable))
534 d.addCallback(lambda ignored:
535 n.download_best_version())
536 d.addCallback(lambda data:
537 self.failUnlessEqual(data, bigger_contents))
539 d.addCallback(_created)
543 def test_retrieve_pause(self):
544 # We should make sure that the retriever is able to pause
546 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
550 return node.overwrite(MutableData("contents1" * 100000))
551 d.addCallback(_created)
552 # Now we'll retrieve it into a pausing consumer.
553 d.addCallback(lambda ignored:
554 self.node.get_best_mutable_version())
555 def _got_version(version):
556 self.c = PausingConsumer()
557 return version.read(self.c)
558 d.addCallback(_got_version)
559 d.addCallback(lambda ignored:
560 self.failUnlessEqual(self.c.data, "contents1" * 100000))
564 def test_download_from_mdmf_cap(self):
565 # We should be able to download an MDMF file given its cap
566 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION)
568 self.uri = node.get_uri()
570 return node.overwrite(MutableData("contents1" * 100000))
572 node = self.nodemaker.create_from_cap(self.uri)
573 return node.download_best_version()
574 def _downloaded(data):
575 self.failUnlessEqual(data, "contents1" * 100000)
576 d.addCallback(_created)
578 d.addCallback(_downloaded)
582 def test_create_and_download_from_bare_mdmf_cap(self):
583 # MDMF caps have extension parameters on them by default. We
584 # need to make sure that they work without extension parameters.
585 contents = MutableData("contents" * 100000)
586 d = self.nodemaker.create_mutable_file(version=MDMF_VERSION,
591 self.failUnlessIn(":3:131073", uri)
592 # Now strip that off the end of the uri, then try creating
593 # and downloading the node again.
594 bare_uri = uri.replace(":3:131073", "")
595 assert ":3:131073" not in bare_uri
597 return self.nodemaker.create_from_cap(bare_uri)
598 d.addCallback(_created)
599 def _created_bare(node):
600 self.failUnlessEqual(node.get_writekey(),
601 self._created.get_writekey())
602 self.failUnlessEqual(node.get_readkey(),
603 self._created.get_readkey())
604 self.failUnlessEqual(node.get_storage_index(),
605 self._created.get_storage_index())
606 return node.download_best_version()
607 d.addCallback(_created_bare)
608 d.addCallback(lambda data:
609 self.failUnlessEqual(data, "contents" * 100000))
613 def test_mdmf_write_count(self):
614 # Publishing an MDMF file should only cause one write for each
615 # share that is to be published. Otherwise, we introduce
616 # undesirable semantics that are a regression from SDMF
617 upload = MutableData("MDMF" * 100000) # about 400 KiB
618 d = self.nodemaker.create_mutable_file(upload,
619 version=MDMF_VERSION)
620 def _check_server_write_counts(ignored):
621 sb = self.nodemaker.storage_broker
622 for server in sb.servers.itervalues():
623 self.failUnlessEqual(server.get_rref().queries, 1)
624 d.addCallback(_check_server_write_counts)
628 def test_create_with_initial_contents(self):
629 upload1 = MutableData("contents 1")
630 d = self.nodemaker.create_mutable_file(upload1)
632 d = n.download_best_version()
633 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
634 upload2 = MutableData("contents 2")
635 d.addCallback(lambda res: n.overwrite(upload2))
636 d.addCallback(lambda res: n.download_best_version())
637 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
639 d.addCallback(_created)
643 def test_create_mdmf_with_initial_contents(self):
644 initial_contents = "foobarbaz" * 131072 # 900KiB
645 initial_contents_uploadable = MutableData(initial_contents)
646 d = self.nodemaker.create_mutable_file(initial_contents_uploadable,
647 version=MDMF_VERSION)
649 d = n.download_best_version()
650 d.addCallback(lambda data:
651 self.failUnlessEqual(data, initial_contents))
652 uploadable2 = MutableData(initial_contents + "foobarbaz")
653 d.addCallback(lambda ignored:
654 n.overwrite(uploadable2))
655 d.addCallback(lambda ignored:
656 n.download_best_version())
657 d.addCallback(lambda data:
658 self.failUnlessEqual(data, initial_contents +
661 d.addCallback(_created)
665 def test_response_cache_memory_leak(self):
666 d = self.nodemaker.create_mutable_file("contents")
668 d = n.download_best_version()
669 d.addCallback(lambda res: self.failUnlessEqual(res, "contents"))
670 d.addCallback(lambda ign: self.failUnless(isinstance(n._cache, ResponseCache)))
672 def _check_cache(expected):
673 # The total size of cache entries should not increase on the second download;
674 # in fact the cache contents should be identical.
675 d2 = n.download_best_version()
676 d2.addCallback(lambda rep: self.failUnlessEqual(repr(n._cache.cache), expected))
678 d.addCallback(lambda ign: _check_cache(repr(n._cache.cache)))
680 d.addCallback(_created)
683 def test_create_with_initial_contents_function(self):
684 data = "initial contents"
685 def _make_contents(n):
686 self.failUnless(isinstance(n, MutableFileNode))
687 key = n.get_writekey()
688 self.failUnless(isinstance(key, str), key)
689 self.failUnlessEqual(len(key), 16) # AES key size
690 return MutableData(data)
691 d = self.nodemaker.create_mutable_file(_make_contents)
693 return n.download_best_version()
694 d.addCallback(_created)
695 d.addCallback(lambda data2: self.failUnlessEqual(data2, data))
699 def test_create_mdmf_with_initial_contents_function(self):
700 data = "initial contents" * 100000
701 def _make_contents(n):
702 self.failUnless(isinstance(n, MutableFileNode))
703 key = n.get_writekey()
704 self.failUnless(isinstance(key, str), key)
705 self.failUnlessEqual(len(key), 16)
706 return MutableData(data)
707 d = self.nodemaker.create_mutable_file(_make_contents,
708 version=MDMF_VERSION)
709 d.addCallback(lambda n:
710 n.download_best_version())
711 d.addCallback(lambda data2:
712 self.failUnlessEqual(data2, data))
716 def test_create_with_too_large_contents(self):
717 BIG = "a" * (self.OLD_MAX_SEGMENT_SIZE + 1)
718 BIG_uploadable = MutableData(BIG)
719 d = self.nodemaker.create_mutable_file(BIG_uploadable)
721 other_BIG_uploadable = MutableData(BIG)
722 d = n.overwrite(other_BIG_uploadable)
724 d.addCallback(_created)
727 def failUnlessCurrentSeqnumIs(self, n, expected_seqnum, which):
728 d = n.get_servermap(MODE_READ)
729 d.addCallback(lambda servermap: servermap.best_recoverable_version())
730 d.addCallback(lambda verinfo:
731 self.failUnlessEqual(verinfo[0], expected_seqnum, which))
734 def test_modify(self):
735 def _modifier(old_contents, servermap, first_time):
736 new_contents = old_contents + "line2"
738 def _non_modifier(old_contents, servermap, first_time):
740 def _none_modifier(old_contents, servermap, first_time):
742 def _error_modifier(old_contents, servermap, first_time):
743 raise ValueError("oops")
744 def _toobig_modifier(old_contents, servermap, first_time):
745 new_content = "b" * (self.OLD_MAX_SEGMENT_SIZE + 1)
748 def _ucw_error_modifier(old_contents, servermap, first_time):
749 # simulate an UncoordinatedWriteError once
752 raise UncoordinatedWriteError("simulated")
753 new_contents = old_contents + "line3"
755 def _ucw_error_non_modifier(old_contents, servermap, first_time):
756 # simulate an UncoordinatedWriteError once, and don't actually
757 # modify the contents on subsequent invocations
760 raise UncoordinatedWriteError("simulated")
763 initial_contents = "line1"
764 d = self.nodemaker.create_mutable_file(MutableData(initial_contents))
766 d = n.modify(_modifier)
767 d.addCallback(lambda res: n.download_best_version())
768 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
769 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
771 d.addCallback(lambda res: n.modify(_non_modifier))
772 d.addCallback(lambda res: n.download_best_version())
773 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
774 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "non"))
776 d.addCallback(lambda res: n.modify(_none_modifier))
777 d.addCallback(lambda res: n.download_best_version())
778 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
779 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "none"))
781 d.addCallback(lambda res:
782 self.shouldFail(ValueError, "error_modifier", None,
783 n.modify, _error_modifier))
784 d.addCallback(lambda res: n.download_best_version())
785 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
786 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "err"))
789 d.addCallback(lambda res: n.download_best_version())
790 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
791 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "big"))
793 d.addCallback(lambda res: n.modify(_ucw_error_modifier))
794 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
795 d.addCallback(lambda res: n.download_best_version())
796 d.addCallback(lambda res: self.failUnlessEqual(res,
798 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "ucw"))
800 def _reset_ucw_error_modifier(res):
803 d.addCallback(_reset_ucw_error_modifier)
805 # in practice, this n.modify call should publish twice: the first
806 # one gets a UCWE, the second does not. But our test jig (in
807 # which the modifier raises the UCWE) skips over the first one,
808 # so in this test there will be only one publish, and the seqnum
809 # will only be one larger than the previous test, not two (i.e. 4
811 d.addCallback(lambda res: n.modify(_ucw_error_non_modifier))
812 d.addCallback(lambda res: self.failUnlessEqual(len(calls), 2))
813 d.addCallback(lambda res: n.download_best_version())
814 d.addCallback(lambda res: self.failUnlessEqual(res,
816 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 4, "ucw"))
817 d.addCallback(lambda res: n.modify(_toobig_modifier))
819 d.addCallback(_created)
823 def test_modify_backoffer(self):
824 def _modifier(old_contents, servermap, first_time):
825 return old_contents + "line2"
827 def _ucw_error_modifier(old_contents, servermap, first_time):
828 # simulate an UncoordinatedWriteError once
831 raise UncoordinatedWriteError("simulated")
832 return old_contents + "line3"
833 def _always_ucw_error_modifier(old_contents, servermap, first_time):
834 raise UncoordinatedWriteError("simulated")
835 def _backoff_stopper(node, f):
837 def _backoff_pauser(node, f):
839 reactor.callLater(0.5, d.callback, None)
842 # the give-up-er will hit its maximum retry count quickly
843 giveuper = BackoffAgent()
844 giveuper._delay = 0.1
847 d = self.nodemaker.create_mutable_file(MutableData("line1"))
849 d = n.modify(_modifier)
850 d.addCallback(lambda res: n.download_best_version())
851 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
852 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "m"))
854 d.addCallback(lambda res:
855 self.shouldFail(UncoordinatedWriteError,
856 "_backoff_stopper", None,
857 n.modify, _ucw_error_modifier,
859 d.addCallback(lambda res: n.download_best_version())
860 d.addCallback(lambda res: self.failUnlessEqual(res, "line1line2"))
861 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 2, "stop"))
863 def _reset_ucw_error_modifier(res):
866 d.addCallback(_reset_ucw_error_modifier)
867 d.addCallback(lambda res: n.modify(_ucw_error_modifier,
869 d.addCallback(lambda res: n.download_best_version())
870 d.addCallback(lambda res: self.failUnlessEqual(res,
872 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "pause"))
874 d.addCallback(lambda res:
875 self.shouldFail(UncoordinatedWriteError,
877 n.modify, _always_ucw_error_modifier,
879 d.addCallback(lambda res: n.download_best_version())
880 d.addCallback(lambda res: self.failUnlessEqual(res,
882 d.addCallback(lambda res: self.failUnlessCurrentSeqnumIs(n, 3, "giveup"))
885 d.addCallback(_created)
888 def test_upload_and_download_full_size_keys(self):
889 self.nodemaker.key_generator = client.KeyGenerator()
890 d = self.nodemaker.create_mutable_file()
892 d = defer.succeed(None)
893 d.addCallback(lambda res: n.get_servermap(MODE_READ))
894 d.addCallback(lambda smap: smap.dump(StringIO()))
895 d.addCallback(lambda sio:
896 self.failUnless("3-of-10" in sio.getvalue()))
897 d.addCallback(lambda res: n.overwrite(MutableData("contents 1")))
898 d.addCallback(lambda res: self.failUnlessIdentical(res, None))
899 d.addCallback(lambda res: n.download_best_version())
900 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
901 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
902 d.addCallback(lambda res: n.download_best_version())
903 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
904 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
905 d.addCallback(lambda smap: n.upload(MutableData("contents 3"), smap))
906 d.addCallback(lambda res: n.download_best_version())
907 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
908 d.addCallback(lambda res: n.get_servermap(MODE_ANYTHING))
909 d.addCallback(lambda smap:
910 n.download_version(smap,
911 smap.best_recoverable_version()))
912 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 3"))
914 d.addCallback(_created)
918 def test_size_after_servermap_update(self):
919 # a mutable file node should have something to say about how big
920 # it is after a servermap update is performed, since this tells
921 # us how large the best version of that mutable file is.
922 d = self.nodemaker.create_mutable_file()
925 return n.get_servermap(MODE_READ)
926 d.addCallback(_created)
927 d.addCallback(lambda ignored:
928 self.failUnlessEqual(self.n.get_size(), 0))
929 d.addCallback(lambda ignored:
930 self.n.overwrite(MutableData("foobarbaz")))
931 d.addCallback(lambda ignored:
932 self.failUnlessEqual(self.n.get_size(), 9))
933 d.addCallback(lambda ignored:
934 self.nodemaker.create_mutable_file(MutableData("foobarbaz")))
935 d.addCallback(_created)
936 d.addCallback(lambda ignored:
937 self.failUnlessEqual(self.n.get_size(), 9))
942 def publish_one(self):
943 # publish a file and create shares, which can then be manipulated
945 self.CONTENTS = "New contents go here" * 1000
946 self.uploadable = MutableData(self.CONTENTS)
947 self._storage = FakeStorage()
948 self._nodemaker = make_nodemaker(self._storage)
949 self._storage_broker = self._nodemaker.storage_broker
950 d = self._nodemaker.create_mutable_file(self.uploadable)
953 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
954 d.addCallback(_created)
957 def publish_mdmf(self):
958 # like publish_one, except that the result is guaranteed to be
960 # self.CONTENTS should have more than one segment.
961 self.CONTENTS = "This is an MDMF file" * 100000
962 self.uploadable = MutableData(self.CONTENTS)
963 self._storage = FakeStorage()
964 self._nodemaker = make_nodemaker(self._storage)
965 self._storage_broker = self._nodemaker.storage_broker
966 d = self._nodemaker.create_mutable_file(self.uploadable, version=MDMF_VERSION)
969 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
970 d.addCallback(_created)
974 def publish_sdmf(self):
975 # like publish_one, except that the result is guaranteed to be
977 self.CONTENTS = "This is an SDMF file" * 1000
978 self.uploadable = MutableData(self.CONTENTS)
979 self._storage = FakeStorage()
980 self._nodemaker = make_nodemaker(self._storage)
981 self._storage_broker = self._nodemaker.storage_broker
982 d = self._nodemaker.create_mutable_file(self.uploadable, version=SDMF_VERSION)
985 self._fn2 = self._nodemaker.create_from_cap(node.get_uri())
986 d.addCallback(_created)
990 def publish_multiple(self, version=0):
991 self.CONTENTS = ["Contents 0",
996 self.uploadables = [MutableData(d) for d in self.CONTENTS]
997 self._copied_shares = {}
998 self._storage = FakeStorage()
999 self._nodemaker = make_nodemaker(self._storage)
1000 d = self._nodemaker.create_mutable_file(self.uploadables[0], version=version) # seqnum=1
1003 # now create multiple versions of the same file, and accumulate
1004 # their shares, so we can mix and match them later.
1005 d = defer.succeed(None)
1006 d.addCallback(self._copy_shares, 0)
1007 d.addCallback(lambda res: node.overwrite(self.uploadables[1])) #s2
1008 d.addCallback(self._copy_shares, 1)
1009 d.addCallback(lambda res: node.overwrite(self.uploadables[2])) #s3
1010 d.addCallback(self._copy_shares, 2)
1011 d.addCallback(lambda res: node.overwrite(self.uploadables[3])) #s4a
1012 d.addCallback(self._copy_shares, 3)
1013 # now we replace all the shares with version s3, and upload a new
1014 # version to get s4b.
1015 rollback = dict([(i,2) for i in range(10)])
1016 d.addCallback(lambda res: self._set_versions(rollback))
1017 d.addCallback(lambda res: node.overwrite(self.uploadables[4])) #s4b
1018 d.addCallback(self._copy_shares, 4)
1019 # we leave the storage in state 4
1021 d.addCallback(_created)
1025 def _copy_shares(self, ignored, index):
1026 shares = self._storage._peers
1027 # we need a deep copy
1029 for peerid in shares:
1030 new_shares[peerid] = {}
1031 for shnum in shares[peerid]:
1032 new_shares[peerid][shnum] = shares[peerid][shnum]
1033 self._copied_shares[index] = new_shares
1035 def _set_versions(self, versionmap):
1036 # versionmap maps shnums to which version (0,1,2,3,4) we want the
1037 # share to be at. Any shnum which is left out of the map will stay at
1038 # its current version.
1039 shares = self._storage._peers
1040 oldshares = self._copied_shares
1041 for peerid in shares:
1042 for shnum in shares[peerid]:
1043 if shnum in versionmap:
1044 index = versionmap[shnum]
1045 shares[peerid][shnum] = oldshares[index][peerid][shnum]
1047 class PausingConsumer:
1048 implements(IConsumer)
1051 self.already_paused = False
1053 def registerProducer(self, producer, streaming):
1054 self.producer = producer
1055 self.producer.resumeProducing()
1057 def unregisterProducer(self):
1058 self.producer = None
1060 def _unpause(self, ignored):
1061 self.producer.resumeProducing()
1063 def write(self, data):
1065 if not self.already_paused:
1066 self.producer.pauseProducing()
1067 self.already_paused = True
1068 reactor.callLater(15, self._unpause, None)
1071 class Servermap(unittest.TestCase, PublishMixin):
1073 return self.publish_one()
1075 def make_servermap(self, mode=MODE_CHECK, fn=None, sb=None,
1080 sb = self._storage_broker
1081 smu = ServermapUpdater(fn, sb, Monitor(),
1082 ServerMap(), mode, update_range=update_range)
1086 def update_servermap(self, oldmap, mode=MODE_CHECK):
1087 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
1092 def failUnlessOneRecoverable(self, sm, num_shares):
1093 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1094 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1095 best = sm.best_recoverable_version()
1096 self.failIfEqual(best, None)
1097 self.failUnlessEqual(sm.recoverable_versions(), set([best]))
1098 self.failUnlessEqual(len(sm.shares_available()), 1)
1099 self.failUnlessEqual(sm.shares_available()[best], (num_shares, 3, 10))
1100 shnum, peerids = sm.make_sharemap().items()[0]
1101 peerid = list(peerids)[0]
1102 self.failUnlessEqual(sm.version_on_peer(peerid, shnum), best)
1103 self.failUnlessEqual(sm.version_on_peer(peerid, 666), None)
1106 def test_basic(self):
1107 d = defer.succeed(None)
1108 ms = self.make_servermap
1109 us = self.update_servermap
1111 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1112 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1113 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1114 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1115 d.addCallback(lambda res: ms(mode=MODE_READ))
1116 # this mode stops at k+epsilon, and epsilon=k, so 6 shares
1117 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1118 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1119 # this mode stops at 'k' shares
1120 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1122 # and can we re-use the same servermap? Note that these are sorted in
1123 # increasing order of number of servers queried, since once a server
1124 # gets into the servermap, we'll always ask it for an update.
1125 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 3))
1126 d.addCallback(lambda sm: us(sm, mode=MODE_READ))
1127 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1128 d.addCallback(lambda sm: us(sm, mode=MODE_WRITE))
1129 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1130 d.addCallback(lambda sm: us(sm, mode=MODE_CHECK))
1131 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1132 d.addCallback(lambda sm: us(sm, mode=MODE_ANYTHING))
1133 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1137 def test_fetch_privkey(self):
1138 d = defer.succeed(None)
1139 # use the sibling filenode (which hasn't been used yet), and make
1140 # sure it can fetch the privkey. The file is small, so the privkey
1141 # will be fetched on the first (query) pass.
1142 d.addCallback(lambda res: self.make_servermap(MODE_WRITE, self._fn2))
1143 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1145 # create a new file, which is large enough to knock the privkey out
1146 # of the early part of the file
1147 LARGE = "These are Larger contents" * 200 # about 5KB
1148 LARGE_uploadable = MutableData(LARGE)
1149 d.addCallback(lambda res: self._nodemaker.create_mutable_file(LARGE_uploadable))
1150 def _created(large_fn):
1151 large_fn2 = self._nodemaker.create_from_cap(large_fn.get_uri())
1152 return self.make_servermap(MODE_WRITE, large_fn2)
1153 d.addCallback(_created)
1154 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 10))
1158 def test_mark_bad(self):
1159 d = defer.succeed(None)
1160 ms = self.make_servermap
1162 d.addCallback(lambda res: ms(mode=MODE_READ))
1163 d.addCallback(lambda sm: self.failUnlessOneRecoverable(sm, 6))
1165 v = sm.best_recoverable_version()
1166 vm = sm.make_versionmap()
1167 shares = list(vm[v])
1168 self.failUnlessEqual(len(shares), 6)
1169 self._corrupted = set()
1170 # mark the first 5 shares as corrupt, then update the servermap.
1171 # The map should not have the marked shares it in any more, and
1172 # new shares should be found to replace the missing ones.
1173 for (shnum, peerid, timestamp) in shares:
1175 self._corrupted.add( (peerid, shnum) )
1176 sm.mark_bad_share(peerid, shnum, "")
1177 return self.update_servermap(sm, MODE_WRITE)
1178 d.addCallback(_made_map)
1180 # this should find all 5 shares that weren't marked bad
1181 v = sm.best_recoverable_version()
1182 vm = sm.make_versionmap()
1183 shares = list(vm[v])
1184 for (peerid, shnum) in self._corrupted:
1185 peer_shares = sm.shares_on_peer(peerid)
1186 self.failIf(shnum in peer_shares,
1187 "%d was in %s" % (shnum, peer_shares))
1188 self.failUnlessEqual(len(shares), 5)
1189 d.addCallback(_check_map)
1192 def failUnlessNoneRecoverable(self, sm):
1193 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1194 self.failUnlessEqual(len(sm.unrecoverable_versions()), 0)
1195 best = sm.best_recoverable_version()
1196 self.failUnlessEqual(best, None)
1197 self.failUnlessEqual(len(sm.shares_available()), 0)
1199 def test_no_shares(self):
1200 self._storage._peers = {} # delete all shares
1201 ms = self.make_servermap
1202 d = defer.succeed(None)
1204 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1205 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1207 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1208 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1210 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1211 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1213 d.addCallback(lambda res: ms(mode=MODE_READ))
1214 d.addCallback(lambda sm: self.failUnlessNoneRecoverable(sm))
1218 def failUnlessNotQuiteEnough(self, sm):
1219 self.failUnlessEqual(len(sm.recoverable_versions()), 0)
1220 self.failUnlessEqual(len(sm.unrecoverable_versions()), 1)
1221 best = sm.best_recoverable_version()
1222 self.failUnlessEqual(best, None)
1223 self.failUnlessEqual(len(sm.shares_available()), 1)
1224 self.failUnlessEqual(sm.shares_available().values()[0], (2,3,10) )
1227 def test_not_quite_enough_shares(self):
1229 ms = self.make_servermap
1230 num_shares = len(s._peers)
1231 for peerid in s._peers:
1232 s._peers[peerid] = {}
1236 # now there ought to be only two shares left
1237 assert len([peerid for peerid in s._peers if s._peers[peerid]]) == 2
1239 d = defer.succeed(None)
1241 d.addCallback(lambda res: ms(mode=MODE_CHECK))
1242 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1243 d.addCallback(lambda sm:
1244 self.failUnlessEqual(len(sm.make_sharemap()), 2))
1245 d.addCallback(lambda res: ms(mode=MODE_ANYTHING))
1246 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1247 d.addCallback(lambda res: ms(mode=MODE_WRITE))
1248 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1249 d.addCallback(lambda res: ms(mode=MODE_READ))
1250 d.addCallback(lambda sm: self.failUnlessNotQuiteEnough(sm))
1255 def test_servermapupdater_finds_mdmf_files(self):
1256 # setUp already published an MDMF file for us. We just need to
1257 # make sure that when we run the ServermapUpdater, the file is
1258 # reported to have one recoverable version.
1259 d = defer.succeed(None)
1260 d.addCallback(lambda ignored:
1261 self.publish_mdmf())
1262 d.addCallback(lambda ignored:
1263 self.make_servermap(mode=MODE_CHECK))
1264 # Calling make_servermap also updates the servermap in the mode
1265 # that we specify, so we just need to see what it says.
1266 def _check_servermap(sm):
1267 self.failUnlessEqual(len(sm.recoverable_versions()), 1)
1268 d.addCallback(_check_servermap)
1272 def test_fetch_update(self):
1273 d = defer.succeed(None)
1274 d.addCallback(lambda ignored:
1275 self.publish_mdmf())
1276 d.addCallback(lambda ignored:
1277 self.make_servermap(mode=MODE_WRITE, update_range=(1, 2)))
1278 def _check_servermap(sm):
1280 self.failUnlessEqual(len(sm.update_data), 10)
1282 for data in sm.update_data.itervalues():
1283 self.failUnlessEqual(len(data), 1)
1284 d.addCallback(_check_servermap)
1288 def test_servermapupdater_finds_sdmf_files(self):
1289 d = defer.succeed(None)
1290 d.addCallback(lambda ignored:
1291 self.publish_sdmf())
1292 d.addCallback(lambda ignored:
1293 self.make_servermap(mode=MODE_CHECK))
1294 d.addCallback(lambda servermap:
1295 self.failUnlessEqual(len(servermap.recoverable_versions()), 1))
1299 class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
1301 return self.publish_one()
1303 def make_servermap(self, mode=MODE_READ, oldmap=None, sb=None):
1305 oldmap = ServerMap()
1307 sb = self._storage_broker
1308 smu = ServermapUpdater(self._fn, sb, Monitor(), oldmap, mode)
1312 def abbrev_verinfo(self, verinfo):
1315 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1316 offsets_tuple) = verinfo
1317 return "%d-%s" % (seqnum, base32.b2a(root_hash)[:4])
1319 def abbrev_verinfo_dict(self, verinfo_d):
1321 for verinfo,value in verinfo_d.items():
1322 (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
1323 offsets_tuple) = verinfo
1324 output["%d-%s" % (seqnum, base32.b2a(root_hash)[:4])] = value
1327 def dump_servermap(self, servermap):
1328 print "SERVERMAP", servermap
1329 print "RECOVERABLE", [self.abbrev_verinfo(v)
1330 for v in servermap.recoverable_versions()]
1331 print "BEST", self.abbrev_verinfo(servermap.best_recoverable_version())
1332 print "available", self.abbrev_verinfo_dict(servermap.shares_available())
1334 def do_download(self, servermap, version=None):
1336 version = servermap.best_recoverable_version()
1337 r = Retrieve(self._fn, servermap, version)
1338 c = consumer.MemoryConsumer()
1339 d = r.download(consumer=c)
1340 d.addCallback(lambda mc: "".join(mc.chunks))
1344 def test_basic(self):
1345 d = self.make_servermap()
1346 def _do_retrieve(servermap):
1347 self._smap = servermap
1348 #self.dump_servermap(servermap)
1349 self.failUnlessEqual(len(servermap.recoverable_versions()), 1)
1350 return self.do_download(servermap)
1351 d.addCallback(_do_retrieve)
1352 def _retrieved(new_contents):
1353 self.failUnlessEqual(new_contents, self.CONTENTS)
1354 d.addCallback(_retrieved)
1355 # we should be able to re-use the same servermap, both with and
1356 # without updating it.
1357 d.addCallback(lambda res: self.do_download(self._smap))
1358 d.addCallback(_retrieved)
1359 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1360 d.addCallback(lambda res: self.do_download(self._smap))
1361 d.addCallback(_retrieved)
1362 # clobbering the pubkey should make the servermap updater re-fetch it
1363 def _clobber_pubkey(res):
1364 self._fn._pubkey = None
1365 d.addCallback(_clobber_pubkey)
1366 d.addCallback(lambda res: self.make_servermap(oldmap=self._smap))
1367 d.addCallback(lambda res: self.do_download(self._smap))
1368 d.addCallback(_retrieved)
1371 def test_all_shares_vanished(self):
1372 d = self.make_servermap()
1373 def _remove_shares(servermap):
1374 for shares in self._storage._peers.values():
1376 d1 = self.shouldFail(NotEnoughSharesError,
1377 "test_all_shares_vanished",
1379 self.do_download, servermap)
1381 d.addCallback(_remove_shares)
1384 def test_no_servers(self):
1385 sb2 = make_storagebroker(num_peers=0)
1386 # if there are no servers, then a MODE_READ servermap should come
1388 d = self.make_servermap(sb=sb2)
1389 def _check_servermap(servermap):
1390 self.failUnlessEqual(servermap.best_recoverable_version(), None)
1391 self.failIf(servermap.recoverable_versions())
1392 self.failIf(servermap.unrecoverable_versions())
1393 self.failIf(servermap.all_peers())
1394 d.addCallback(_check_servermap)
1397 def test_no_servers_download(self):
1398 sb2 = make_storagebroker(num_peers=0)
1399 self._fn._storage_broker = sb2
1400 d = self.shouldFail(UnrecoverableFileError,
1401 "test_no_servers_download",
1402 "no recoverable versions",
1403 self._fn.download_best_version)
1405 # a failed download that occurs while we aren't connected to
1406 # anybody should not prevent a subsequent download from working.
1407 # This isn't quite the webapi-driven test that #463 wants, but it
1408 # should be close enough.
1409 self._fn._storage_broker = self._storage_broker
1410 return self._fn.download_best_version()
1411 def _retrieved(new_contents):
1412 self.failUnlessEqual(new_contents, self.CONTENTS)
1413 d.addCallback(_restore)
1414 d.addCallback(_retrieved)
1418 def _test_corrupt_all(self, offset, substring,
1419 should_succeed=False,
1421 failure_checker=None,
1422 fetch_privkey=False):
1423 d = defer.succeed(None)
1425 d.addCallback(corrupt, self._storage, offset)
1426 d.addCallback(lambda res: self.make_servermap())
1427 if not corrupt_early:
1428 d.addCallback(corrupt, self._storage, offset)
1429 def _do_retrieve(servermap):
1430 ver = servermap.best_recoverable_version()
1431 if ver is None and not should_succeed:
1432 # no recoverable versions == not succeeding. The problem
1433 # should be noted in the servermap's list of problems.
1435 allproblems = [str(f) for f in servermap.problems]
1436 self.failUnlessIn(substring, "".join(allproblems))
1439 d1 = self._fn.download_version(servermap, ver,
1441 d1.addCallback(lambda new_contents:
1442 self.failUnlessEqual(new_contents, self.CONTENTS))
1444 d1 = self.shouldFail(NotEnoughSharesError,
1445 "_corrupt_all(offset=%s)" % (offset,),
1447 self._fn.download_version, servermap,
1451 d1.addCallback(failure_checker)
1452 d1.addCallback(lambda res: servermap)
1454 d.addCallback(_do_retrieve)
1457 def test_corrupt_all_verbyte(self):
1458 # when the version byte is not 0 or 1, we hit an UnknownVersionError
1459 # error in unpack_share().
1460 d = self._test_corrupt_all(0, "UnknownVersionError")
1461 def _check_servermap(servermap):
1462 # and the dump should mention the problems
1464 dump = servermap.dump(s).getvalue()
1465 self.failUnless("30 PROBLEMS" in dump, dump)
1466 d.addCallback(_check_servermap)
1469 def test_corrupt_all_seqnum(self):
1470 # a corrupt sequence number will trigger a bad signature
1471 return self._test_corrupt_all(1, "signature is invalid")
1473 def test_corrupt_all_R(self):
1474 # a corrupt root hash will trigger a bad signature
1475 return self._test_corrupt_all(9, "signature is invalid")
1477 def test_corrupt_all_IV(self):
1478 # a corrupt salt/IV will trigger a bad signature
1479 return self._test_corrupt_all(41, "signature is invalid")
1481 def test_corrupt_all_k(self):
1482 # a corrupt 'k' will trigger a bad signature
1483 return self._test_corrupt_all(57, "signature is invalid")
1485 def test_corrupt_all_N(self):
1486 # a corrupt 'N' will trigger a bad signature
1487 return self._test_corrupt_all(58, "signature is invalid")
1489 def test_corrupt_all_segsize(self):
1490 # a corrupt segsize will trigger a bad signature
1491 return self._test_corrupt_all(59, "signature is invalid")
1493 def test_corrupt_all_datalen(self):
1494 # a corrupt data length will trigger a bad signature
1495 return self._test_corrupt_all(67, "signature is invalid")
1497 def test_corrupt_all_pubkey(self):
1498 # a corrupt pubkey won't match the URI's fingerprint. We need to
1499 # remove the pubkey from the filenode, or else it won't bother trying
1501 self._fn._pubkey = None
1502 return self._test_corrupt_all("pubkey",
1503 "pubkey doesn't match fingerprint")
1505 def test_corrupt_all_sig(self):
1506 # a corrupt signature is a bad one
1507 # the signature runs from about [543:799], depending upon the length
1509 return self._test_corrupt_all("signature", "signature is invalid")
1511 def test_corrupt_all_share_hash_chain_number(self):
1512 # a corrupt share hash chain entry will show up as a bad hash. If we
1513 # mangle the first byte, that will look like a bad hash number,
1514 # causing an IndexError
1515 return self._test_corrupt_all("share_hash_chain", "corrupt hashes")
1517 def test_corrupt_all_share_hash_chain_hash(self):
1518 # a corrupt share hash chain entry will show up as a bad hash. If we
1519 # mangle a few bytes in, that will look like a bad hash.
1520 return self._test_corrupt_all(("share_hash_chain",4), "corrupt hashes")
1522 def test_corrupt_all_block_hash_tree(self):
1523 return self._test_corrupt_all("block_hash_tree",
1524 "block hash tree failure")
1526 def test_corrupt_all_block(self):
1527 return self._test_corrupt_all("share_data", "block hash tree failure")
1529 def test_corrupt_all_encprivkey(self):
1530 # a corrupted privkey won't even be noticed by the reader, only by a
1532 return self._test_corrupt_all("enc_privkey", None, should_succeed=True)
1535 def test_corrupt_all_encprivkey_late(self):
1536 # this should work for the same reason as above, but we corrupt
1537 # after the servermap update to exercise the error handling
1539 # We need to remove the privkey from the node, or the retrieve
1540 # process won't know to update it.
1541 self._fn._privkey = None
1542 return self._test_corrupt_all("enc_privkey",
1543 None, # this shouldn't fail
1544 should_succeed=True,
1545 corrupt_early=False,
1549 def test_corrupt_all_seqnum_late(self):
1550 # corrupting the seqnum between mapupdate and retrieve should result
1551 # in NotEnoughSharesError, since each share will look invalid
1554 self.failUnless(f.check(NotEnoughSharesError))
1555 self.failUnless("uncoordinated write" in str(f))
1556 return self._test_corrupt_all(1, "ran out of peers",
1557 corrupt_early=False,
1558 failure_checker=_check)
1560 def test_corrupt_all_block_hash_tree_late(self):
1563 self.failUnless(f.check(NotEnoughSharesError))
1564 return self._test_corrupt_all("block_hash_tree",
1565 "block hash tree failure",
1566 corrupt_early=False,
1567 failure_checker=_check)
1570 def test_corrupt_all_block_late(self):
1573 self.failUnless(f.check(NotEnoughSharesError))
1574 return self._test_corrupt_all("share_data", "block hash tree failure",
1575 corrupt_early=False,
1576 failure_checker=_check)
1579 def test_basic_pubkey_at_end(self):
1580 # we corrupt the pubkey in all but the last 'k' shares, allowing the
1581 # download to succeed but forcing a bunch of retries first. Note that
1582 # this is rather pessimistic: our Retrieve process will throw away
1583 # the whole share if the pubkey is bad, even though the rest of the
1584 # share might be good.
1586 self._fn._pubkey = None
1587 k = self._fn.get_required_shares()
1588 N = self._fn.get_total_shares()
1589 d = defer.succeed(None)
1590 d.addCallback(corrupt, self._storage, "pubkey",
1591 shnums_to_corrupt=range(0, N-k))
1592 d.addCallback(lambda res: self.make_servermap())
1593 def _do_retrieve(servermap):
1594 self.failUnless(servermap.problems)
1595 self.failUnless("pubkey doesn't match fingerprint"
1596 in str(servermap.problems[0]))
1597 ver = servermap.best_recoverable_version()
1598 r = Retrieve(self._fn, servermap, ver)
1599 c = consumer.MemoryConsumer()
1600 return r.download(c)
1601 d.addCallback(_do_retrieve)
1602 d.addCallback(lambda mc: "".join(mc.chunks))
1603 d.addCallback(lambda new_contents:
1604 self.failUnlessEqual(new_contents, self.CONTENTS))
1608 def _test_corrupt_some(self, offset, mdmf=False):
1610 d = self.publish_mdmf()
1612 d = defer.succeed(None)
1613 d.addCallback(lambda ignored:
1614 corrupt(None, self._storage, offset, range(5)))
1615 d.addCallback(lambda ignored:
1616 self.make_servermap())
1617 def _do_retrieve(servermap):
1618 ver = servermap.best_recoverable_version()
1619 self.failUnless(ver)
1620 return self._fn.download_best_version()
1621 d.addCallback(_do_retrieve)
1622 d.addCallback(lambda new_contents:
1623 self.failUnlessEqual(new_contents, self.CONTENTS))
1627 def test_corrupt_some(self):
1628 # corrupt the data of first five shares (so the servermap thinks
1629 # they're good but retrieve marks them as bad), so that the
1630 # MODE_READ set of 6 will be insufficient, forcing node.download to
1631 # retry with more servers.
1632 return self._test_corrupt_some("share_data")
1635 def test_download_fails(self):
1636 d = corrupt(None, self._storage, "signature")
1637 d.addCallback(lambda ignored:
1638 self.shouldFail(UnrecoverableFileError, "test_download_anyway",
1639 "no recoverable versions",
1640 self._fn.download_best_version))
1645 def test_corrupt_mdmf_block_hash_tree(self):
1646 d = self.publish_mdmf()
1647 d.addCallback(lambda ignored:
1648 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1649 "block hash tree failure",
1650 corrupt_early=False,
1651 should_succeed=False))
1655 def test_corrupt_mdmf_block_hash_tree_late(self):
1656 d = self.publish_mdmf()
1657 d.addCallback(lambda ignored:
1658 self._test_corrupt_all(("block_hash_tree", 12 * 32),
1659 "block hash tree failure",
1661 should_succeed=False))
1665 def test_corrupt_mdmf_share_data(self):
1666 d = self.publish_mdmf()
1667 d.addCallback(lambda ignored:
1668 # TODO: Find out what the block size is and corrupt a
1669 # specific block, rather than just guessing.
1670 self._test_corrupt_all(("share_data", 12 * 40),
1671 "block hash tree failure",
1673 should_succeed=False))
1677 def test_corrupt_some_mdmf(self):
1678 return self._test_corrupt_some(("share_data", 12 * 40),
1683 def check_good(self, r, where):
1684 self.failUnless(r.is_healthy(), where)
1687 def check_bad(self, r, where):
1688 self.failIf(r.is_healthy(), where)
1691 def check_expected_failure(self, r, expected_exception, substring, where):
1692 for (peerid, storage_index, shnum, f) in r.problems:
1693 if f.check(expected_exception):
1694 self.failUnless(substring in str(f),
1695 "%s: substring '%s' not in '%s'" %
1696 (where, substring, str(f)))
1698 self.fail("%s: didn't see expected exception %s in problems %s" %
1699 (where, expected_exception, r.problems))
1702 class Checker(unittest.TestCase, CheckerMixin, PublishMixin):
1704 return self.publish_one()
1707 def test_check_good(self):
1708 d = self._fn.check(Monitor())
1709 d.addCallback(self.check_good, "test_check_good")
1712 def test_check_mdmf_good(self):
1713 d = self.publish_mdmf()
1714 d.addCallback(lambda ignored:
1715 self._fn.check(Monitor()))
1716 d.addCallback(self.check_good, "test_check_mdmf_good")
1719 def test_check_no_shares(self):
1720 for shares in self._storage._peers.values():
1722 d = self._fn.check(Monitor())
1723 d.addCallback(self.check_bad, "test_check_no_shares")
1726 def test_check_mdmf_no_shares(self):
1727 d = self.publish_mdmf()
1729 for share in self._storage._peers.values():
1731 d.addCallback(_then)
1732 d.addCallback(lambda ignored:
1733 self._fn.check(Monitor()))
1734 d.addCallback(self.check_bad, "test_check_mdmf_no_shares")
1737 def test_check_not_enough_shares(self):
1738 for shares in self._storage._peers.values():
1739 for shnum in shares.keys():
1742 d = self._fn.check(Monitor())
1743 d.addCallback(self.check_bad, "test_check_not_enough_shares")
1746 def test_check_mdmf_not_enough_shares(self):
1747 d = self.publish_mdmf()
1749 for shares in self._storage._peers.values():
1750 for shnum in shares.keys():
1753 d.addCallback(_then)
1754 d.addCallback(lambda ignored:
1755 self._fn.check(Monitor()))
1756 d.addCallback(self.check_bad, "test_check_mdmf_not_enougH_shares")
1760 def test_check_all_bad_sig(self):
1761 d = corrupt(None, self._storage, 1) # bad sig
1762 d.addCallback(lambda ignored:
1763 self._fn.check(Monitor()))
1764 d.addCallback(self.check_bad, "test_check_all_bad_sig")
1767 def test_check_mdmf_all_bad_sig(self):
1768 d = self.publish_mdmf()
1769 d.addCallback(lambda ignored:
1770 corrupt(None, self._storage, 1))
1771 d.addCallback(lambda ignored:
1772 self._fn.check(Monitor()))
1773 d.addCallback(self.check_bad, "test_check_mdmf_all_bad_sig")
1776 def test_check_all_bad_blocks(self):
1777 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1778 # the Checker won't notice this.. it doesn't look at actual data
1779 d.addCallback(lambda ignored:
1780 self._fn.check(Monitor()))
1781 d.addCallback(self.check_good, "test_check_all_bad_blocks")
1785 def test_check_mdmf_all_bad_blocks(self):
1786 d = self.publish_mdmf()
1787 d.addCallback(lambda ignored:
1788 corrupt(None, self._storage, "share_data"))
1789 d.addCallback(lambda ignored:
1790 self._fn.check(Monitor()))
1791 d.addCallback(self.check_good, "test_check_mdmf_all_bad_blocks")
1794 def test_verify_good(self):
1795 d = self._fn.check(Monitor(), verify=True)
1796 d.addCallback(self.check_good, "test_verify_good")
1799 def test_verify_all_bad_sig(self):
1800 d = corrupt(None, self._storage, 1) # bad sig
1801 d.addCallback(lambda ignored:
1802 self._fn.check(Monitor(), verify=True))
1803 d.addCallback(self.check_bad, "test_verify_all_bad_sig")
1806 def test_verify_one_bad_sig(self):
1807 d = corrupt(None, self._storage, 1, [9]) # bad sig
1808 d.addCallback(lambda ignored:
1809 self._fn.check(Monitor(), verify=True))
1810 d.addCallback(self.check_bad, "test_verify_one_bad_sig")
1813 def test_verify_one_bad_block(self):
1814 d = corrupt(None, self._storage, "share_data", [9]) # bad blocks
1815 # the Verifier *will* notice this, since it examines every byte
1816 d.addCallback(lambda ignored:
1817 self._fn.check(Monitor(), verify=True))
1818 d.addCallback(self.check_bad, "test_verify_one_bad_block")
1819 d.addCallback(self.check_expected_failure,
1820 CorruptShareError, "block hash tree failure",
1821 "test_verify_one_bad_block")
1824 def test_verify_one_bad_sharehash(self):
1825 d = corrupt(None, self._storage, "share_hash_chain", [9], 5)
1826 d.addCallback(lambda ignored:
1827 self._fn.check(Monitor(), verify=True))
1828 d.addCallback(self.check_bad, "test_verify_one_bad_sharehash")
1829 d.addCallback(self.check_expected_failure,
1830 CorruptShareError, "corrupt hashes",
1831 "test_verify_one_bad_sharehash")
1834 def test_verify_one_bad_encprivkey(self):
1835 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1836 d.addCallback(lambda ignored:
1837 self._fn.check(Monitor(), verify=True))
1838 d.addCallback(self.check_bad, "test_verify_one_bad_encprivkey")
1839 d.addCallback(self.check_expected_failure,
1840 CorruptShareError, "invalid privkey",
1841 "test_verify_one_bad_encprivkey")
1844 def test_verify_one_bad_encprivkey_uncheckable(self):
1845 d = corrupt(None, self._storage, "enc_privkey", [9]) # bad privkey
1846 readonly_fn = self._fn.get_readonly()
1847 # a read-only node has no way to validate the privkey
1848 d.addCallback(lambda ignored:
1849 readonly_fn.check(Monitor(), verify=True))
1850 d.addCallback(self.check_good,
1851 "test_verify_one_bad_encprivkey_uncheckable")
1855 def test_verify_mdmf_good(self):
1856 d = self.publish_mdmf()
1857 d.addCallback(lambda ignored:
1858 self._fn.check(Monitor(), verify=True))
1859 d.addCallback(self.check_good, "test_verify_mdmf_good")
1863 def test_verify_mdmf_one_bad_block(self):
1864 d = self.publish_mdmf()
1865 d.addCallback(lambda ignored:
1866 corrupt(None, self._storage, "share_data", [1]))
1867 d.addCallback(lambda ignored:
1868 self._fn.check(Monitor(), verify=True))
1869 # We should find one bad block here
1870 d.addCallback(self.check_bad, "test_verify_mdmf_one_bad_block")
1871 d.addCallback(self.check_expected_failure,
1872 CorruptShareError, "block hash tree failure",
1873 "test_verify_mdmf_one_bad_block")
1877 def test_verify_mdmf_bad_encprivkey(self):
1878 d = self.publish_mdmf()
1879 d.addCallback(lambda ignored:
1880 corrupt(None, self._storage, "enc_privkey", [0]))
1881 d.addCallback(lambda ignored:
1882 self._fn.check(Monitor(), verify=True))
1883 d.addCallback(self.check_bad, "test_verify_mdmf_bad_encprivkey")
1884 d.addCallback(self.check_expected_failure,
1885 CorruptShareError, "privkey",
1886 "test_verify_mdmf_bad_encprivkey")
1890 def test_verify_mdmf_bad_sig(self):
1891 d = self.publish_mdmf()
1892 d.addCallback(lambda ignored:
1893 corrupt(None, self._storage, 1, [1]))
1894 d.addCallback(lambda ignored:
1895 self._fn.check(Monitor(), verify=True))
1896 d.addCallback(self.check_bad, "test_verify_mdmf_bad_sig")
1900 def test_verify_mdmf_bad_encprivkey_uncheckable(self):
1901 d = self.publish_mdmf()
1902 d.addCallback(lambda ignored:
1903 corrupt(None, self._storage, "enc_privkey", [1]))
1904 d.addCallback(lambda ignored:
1905 self._fn.get_readonly())
1906 d.addCallback(lambda fn:
1907 fn.check(Monitor(), verify=True))
1908 d.addCallback(self.check_good,
1909 "test_verify_mdmf_bad_encprivkey_uncheckable")
1913 class Repair(unittest.TestCase, PublishMixin, ShouldFailMixin):
1915 def get_shares(self, s):
1916 all_shares = {} # maps (peerid, shnum) to share data
1917 for peerid in s._peers:
1918 shares = s._peers[peerid]
1919 for shnum in shares:
1920 data = shares[shnum]
1921 all_shares[ (peerid, shnum) ] = data
1924 def copy_shares(self, ignored=None):
1925 self.old_shares.append(self.get_shares(self._storage))
1927 def test_repair_nop(self):
1928 self.old_shares = []
1929 d = self.publish_one()
1930 d.addCallback(self.copy_shares)
1931 d.addCallback(lambda res: self._fn.check(Monitor()))
1932 d.addCallback(lambda check_results: self._fn.repair(check_results))
1933 def _check_results(rres):
1934 self.failUnless(IRepairResults.providedBy(rres))
1935 self.failUnless(rres.get_successful())
1936 # TODO: examine results
1940 initial_shares = self.old_shares[0]
1941 new_shares = self.old_shares[1]
1942 # TODO: this really shouldn't change anything. When we implement
1943 # a "minimal-bandwidth" repairer", change this test to assert:
1944 #self.failUnlessEqual(new_shares, initial_shares)
1946 # all shares should be in the same place as before
1947 self.failUnlessEqual(set(initial_shares.keys()),
1948 set(new_shares.keys()))
1949 # but they should all be at a newer seqnum. The IV will be
1950 # different, so the roothash will be too.
1951 for key in initial_shares:
1956 k0, N0, segsize0, datalen0,
1957 o0) = unpack_header(initial_shares[key])
1962 k1, N1, segsize1, datalen1,
1963 o1) = unpack_header(new_shares[key])
1964 self.failUnlessEqual(version0, version1)
1965 self.failUnlessEqual(seqnum0+1, seqnum1)
1966 self.failUnlessEqual(k0, k1)
1967 self.failUnlessEqual(N0, N1)
1968 self.failUnlessEqual(segsize0, segsize1)
1969 self.failUnlessEqual(datalen0, datalen1)
1970 d.addCallback(_check_results)
1973 def failIfSharesChanged(self, ignored=None):
1974 old_shares = self.old_shares[-2]
1975 current_shares = self.old_shares[-1]
1976 self.failUnlessEqual(old_shares, current_shares)
1979 def test_unrepairable_0shares(self):
1980 d = self.publish_one()
1981 def _delete_all_shares(ign):
1982 shares = self._storage._peers
1983 for peerid in shares:
1985 d.addCallback(_delete_all_shares)
1986 d.addCallback(lambda ign: self._fn.check(Monitor()))
1987 d.addCallback(lambda check_results: self._fn.repair(check_results))
1989 self.failUnlessEqual(crr.get_successful(), False)
1990 d.addCallback(_check)
1993 def test_mdmf_unrepairable_0shares(self):
1994 d = self.publish_mdmf()
1995 def _delete_all_shares(ign):
1996 shares = self._storage._peers
1997 for peerid in shares:
1999 d.addCallback(_delete_all_shares)
2000 d.addCallback(lambda ign: self._fn.check(Monitor()))
2001 d.addCallback(lambda check_results: self._fn.repair(check_results))
2002 d.addCallback(lambda crr: self.failIf(crr.get_successful()))
2006 def test_unrepairable_1share(self):
2007 d = self.publish_one()
2008 def _delete_all_shares(ign):
2009 shares = self._storage._peers
2010 for peerid in shares:
2011 for shnum in list(shares[peerid]):
2013 del shares[peerid][shnum]
2014 d.addCallback(_delete_all_shares)
2015 d.addCallback(lambda ign: self._fn.check(Monitor()))
2016 d.addCallback(lambda check_results: self._fn.repair(check_results))
2018 self.failUnlessEqual(crr.get_successful(), False)
2019 d.addCallback(_check)
2022 def test_mdmf_unrepairable_1share(self):
2023 d = self.publish_mdmf()
2024 def _delete_all_shares(ign):
2025 shares = self._storage._peers
2026 for peerid in shares:
2027 for shnum in list(shares[peerid]):
2029 del shares[peerid][shnum]
2030 d.addCallback(_delete_all_shares)
2031 d.addCallback(lambda ign: self._fn.check(Monitor()))
2032 d.addCallback(lambda check_results: self._fn.repair(check_results))
2034 self.failUnlessEqual(crr.get_successful(), False)
2035 d.addCallback(_check)
2038 def test_repairable_5shares(self):
2039 d = self.publish_mdmf()
2040 def _delete_all_shares(ign):
2041 shares = self._storage._peers
2042 for peerid in shares:
2043 for shnum in list(shares[peerid]):
2045 del shares[peerid][shnum]
2046 d.addCallback(_delete_all_shares)
2047 d.addCallback(lambda ign: self._fn.check(Monitor()))
2048 d.addCallback(lambda check_results: self._fn.repair(check_results))
2050 self.failUnlessEqual(crr.get_successful(), True)
2051 d.addCallback(_check)
2054 def test_mdmf_repairable_5shares(self):
2055 d = self.publish_mdmf()
2056 def _delete_some_shares(ign):
2057 shares = self._storage._peers
2058 for peerid in shares:
2059 for shnum in list(shares[peerid]):
2061 del shares[peerid][shnum]
2062 d.addCallback(_delete_some_shares)
2063 d.addCallback(lambda ign: self._fn.check(Monitor()))
2065 self.failIf(cr.is_healthy())
2066 self.failUnless(cr.is_recoverable())
2068 d.addCallback(_check)
2069 d.addCallback(lambda check_results: self._fn.repair(check_results))
2071 self.failUnlessEqual(crr.get_successful(), True)
2072 d.addCallback(_check1)
2076 def test_merge(self):
2077 self.old_shares = []
2078 d = self.publish_multiple()
2079 # repair will refuse to merge multiple highest seqnums unless you
2081 d.addCallback(lambda res:
2082 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2083 1:4,3:4,5:4,7:4,9:4}))
2084 d.addCallback(self.copy_shares)
2085 d.addCallback(lambda res: self._fn.check(Monitor()))
2086 def _try_repair(check_results):
2087 ex = "There were multiple recoverable versions with identical seqnums, so force=True must be passed to the repair() operation"
2088 d2 = self.shouldFail(MustForceRepairError, "test_merge", ex,
2089 self._fn.repair, check_results)
2090 d2.addCallback(self.copy_shares)
2091 d2.addCallback(self.failIfSharesChanged)
2092 d2.addCallback(lambda res: check_results)
2094 d.addCallback(_try_repair)
2095 d.addCallback(lambda check_results:
2096 self._fn.repair(check_results, force=True))
2097 # this should give us 10 shares of the highest roothash
2098 def _check_repair_results(rres):
2099 self.failUnless(rres.get_successful())
2101 d.addCallback(_check_repair_results)
2102 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2103 def _check_smap(smap):
2104 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2105 self.failIf(smap.unrecoverable_versions())
2106 # now, which should have won?
2107 roothash_s4a = self.get_roothash_for(3)
2108 roothash_s4b = self.get_roothash_for(4)
2109 if roothash_s4b > roothash_s4a:
2110 expected_contents = self.CONTENTS[4]
2112 expected_contents = self.CONTENTS[3]
2113 new_versionid = smap.best_recoverable_version()
2114 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2115 d2 = self._fn.download_version(smap, new_versionid)
2116 d2.addCallback(self.failUnlessEqual, expected_contents)
2118 d.addCallback(_check_smap)
2121 def test_non_merge(self):
2122 self.old_shares = []
2123 d = self.publish_multiple()
2124 # repair should not refuse a repair that doesn't need to merge. In
2125 # this case, we combine v2 with v3. The repair should ignore v2 and
2126 # copy v3 into a new v5.
2127 d.addCallback(lambda res:
2128 self._set_versions({0:2,2:2,4:2,6:2,8:2,
2129 1:3,3:3,5:3,7:3,9:3}))
2130 d.addCallback(lambda res: self._fn.check(Monitor()))
2131 d.addCallback(lambda check_results: self._fn.repair(check_results))
2132 # this should give us 10 shares of v3
2133 def _check_repair_results(rres):
2134 self.failUnless(rres.get_successful())
2136 d.addCallback(_check_repair_results)
2137 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2138 def _check_smap(smap):
2139 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2140 self.failIf(smap.unrecoverable_versions())
2141 # now, which should have won?
2142 expected_contents = self.CONTENTS[3]
2143 new_versionid = smap.best_recoverable_version()
2144 self.failUnlessEqual(new_versionid[0], 5) # seqnum 5
2145 d2 = self._fn.download_version(smap, new_versionid)
2146 d2.addCallback(self.failUnlessEqual, expected_contents)
2148 d.addCallback(_check_smap)
2151 def get_roothash_for(self, index):
2152 # return the roothash for the first share we see in the saved set
2153 shares = self._copied_shares[index]
2154 for peerid in shares:
2155 for shnum in shares[peerid]:
2156 share = shares[peerid][shnum]
2157 (version, seqnum, root_hash, IV, k, N, segsize, datalen, o) = \
2158 unpack_header(share)
2161 def test_check_and_repair_readcap(self):
2162 # we can't currently repair from a mutable readcap: #625
2163 self.old_shares = []
2164 d = self.publish_one()
2165 d.addCallback(self.copy_shares)
2166 def _get_readcap(res):
2167 self._fn3 = self._fn.get_readonly()
2168 # also delete some shares
2169 for peerid,shares in self._storage._peers.items():
2171 d.addCallback(_get_readcap)
2172 d.addCallback(lambda res: self._fn3.check_and_repair(Monitor()))
2173 def _check_results(crr):
2174 self.failUnless(ICheckAndRepairResults.providedBy(crr))
2175 # we should detect the unhealthy, but skip over mutable-readcap
2176 # repairs until #625 is fixed
2177 self.failIf(crr.get_pre_repair_results().is_healthy())
2178 self.failIf(crr.get_repair_attempted())
2179 self.failIf(crr.get_post_repair_results().is_healthy())
2180 d.addCallback(_check_results)
2183 class DevNullDictionary(dict):
2184 def __setitem__(self, key, value):
2187 class MultipleEncodings(unittest.TestCase):
2189 self.CONTENTS = "New contents go here"
2190 self.uploadable = MutableData(self.CONTENTS)
2191 self._storage = FakeStorage()
2192 self._nodemaker = make_nodemaker(self._storage, num_peers=20)
2193 self._storage_broker = self._nodemaker.storage_broker
2194 d = self._nodemaker.create_mutable_file(self.uploadable)
2197 d.addCallback(_created)
2200 def _encode(self, k, n, data, version=SDMF_VERSION):
2201 # encode 'data' into a peerid->shares dict.
2204 # disable the nodecache, since for these tests we explicitly need
2205 # multiple nodes pointing at the same file
2206 self._nodemaker._node_cache = DevNullDictionary()
2207 fn2 = self._nodemaker.create_from_cap(fn.get_uri())
2208 # then we copy over other fields that are normally fetched from the
2210 fn2._pubkey = fn._pubkey
2211 fn2._privkey = fn._privkey
2212 fn2._encprivkey = fn._encprivkey
2213 # and set the encoding parameters to something completely different
2214 fn2._required_shares = k
2215 fn2._total_shares = n
2218 s._peers = {} # clear existing storage
2219 p2 = Publish(fn2, self._storage_broker, None)
2220 uploadable = MutableData(data)
2221 d = p2.publish(uploadable)
2222 def _published(res):
2226 d.addCallback(_published)
2229 def make_servermap(self, mode=MODE_READ, oldmap=None):
2231 oldmap = ServerMap()
2232 smu = ServermapUpdater(self._fn, self._storage_broker, Monitor(),
2237 def test_multiple_encodings(self):
2238 # we encode the same file in two different ways (3-of-10 and 4-of-9),
2239 # then mix up the shares, to make sure that download survives seeing
2240 # a variety of encodings. This is actually kind of tricky to set up.
2242 contents1 = "Contents for encoding 1 (3-of-10) go here"
2243 contents2 = "Contents for encoding 2 (4-of-9) go here"
2244 contents3 = "Contents for encoding 3 (4-of-7) go here"
2246 # we make a retrieval object that doesn't know what encoding
2248 fn3 = self._nodemaker.create_from_cap(self._fn.get_uri())
2250 # now we upload a file through fn1, and grab its shares
2251 d = self._encode(3, 10, contents1)
2252 def _encoded_1(shares):
2253 self._shares1 = shares
2254 d.addCallback(_encoded_1)
2255 d.addCallback(lambda res: self._encode(4, 9, contents2))
2256 def _encoded_2(shares):
2257 self._shares2 = shares
2258 d.addCallback(_encoded_2)
2259 d.addCallback(lambda res: self._encode(4, 7, contents3))
2260 def _encoded_3(shares):
2261 self._shares3 = shares
2262 d.addCallback(_encoded_3)
2265 log.msg("merging sharelists")
2266 # we merge the shares from the two sets, leaving each shnum in
2267 # its original location, but using a share from set1 or set2
2268 # according to the following sequence:
2279 # so that neither form can be recovered until fetch [f], at which
2280 # point version-s1 (the 3-of-10 form) should be recoverable. If
2281 # the implementation latches on to the first version it sees,
2282 # then s2 will be recoverable at fetch [g].
2284 # Later, when we implement code that handles multiple versions,
2285 # we can use this framework to assert that all recoverable
2286 # versions are retrieved, and test that 'epsilon' does its job
2288 places = [2, 2, 3, 2, 1, 1, 1, 2]
2291 sb = self._storage_broker
2293 for peerid in sorted(sb.get_all_serverids()):
2294 for shnum in self._shares1.get(peerid, {}):
2295 if shnum < len(places):
2296 which = places[shnum]
2299 self._storage._peers[peerid] = peers = {}
2300 in_1 = shnum in self._shares1[peerid]
2301 in_2 = shnum in self._shares2.get(peerid, {})
2302 in_3 = shnum in self._shares3.get(peerid, {})
2305 peers[shnum] = self._shares1[peerid][shnum]
2306 sharemap[shnum] = peerid
2309 peers[shnum] = self._shares2[peerid][shnum]
2310 sharemap[shnum] = peerid
2313 peers[shnum] = self._shares3[peerid][shnum]
2314 sharemap[shnum] = peerid
2316 # we don't bother placing any other shares
2317 # now sort the sequence so that share 0 is returned first
2318 new_sequence = [sharemap[shnum]
2319 for shnum in sorted(sharemap.keys())]
2320 self._storage._sequence = new_sequence
2321 log.msg("merge done")
2322 d.addCallback(_merge)
2323 d.addCallback(lambda res: fn3.download_best_version())
2324 def _retrieved(new_contents):
2325 # the current specified behavior is "first version recoverable"
2326 self.failUnlessEqual(new_contents, contents1)
2327 d.addCallback(_retrieved)
2331 class MultipleVersions(unittest.TestCase, PublishMixin, CheckerMixin):
2334 return self.publish_multiple()
2336 def test_multiple_versions(self):
2337 # if we see a mix of versions in the grid, download_best_version
2338 # should get the latest one
2339 self._set_versions(dict([(i,2) for i in (0,2,4,6,8)]))
2340 d = self._fn.download_best_version()
2341 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[4]))
2342 # and the checker should report problems
2343 d.addCallback(lambda res: self._fn.check(Monitor()))
2344 d.addCallback(self.check_bad, "test_multiple_versions")
2346 # but if everything is at version 2, that's what we should download
2347 d.addCallback(lambda res:
2348 self._set_versions(dict([(i,2) for i in range(10)])))
2349 d.addCallback(lambda res: self._fn.download_best_version())
2350 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2351 # if exactly one share is at version 3, we should still get v2
2352 d.addCallback(lambda res:
2353 self._set_versions({0:3}))
2354 d.addCallback(lambda res: self._fn.download_best_version())
2355 d.addCallback(lambda res: self.failUnlessEqual(res, self.CONTENTS[2]))
2356 # but the servermap should see the unrecoverable version. This
2357 # depends upon the single newer share being queried early.
2358 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2359 def _check_smap(smap):
2360 self.failUnlessEqual(len(smap.unrecoverable_versions()), 1)
2361 newer = smap.unrecoverable_newer_versions()
2362 self.failUnlessEqual(len(newer), 1)
2363 verinfo, health = newer.items()[0]
2364 self.failUnlessEqual(verinfo[0], 4)
2365 self.failUnlessEqual(health, (1,3))
2366 self.failIf(smap.needs_merge())
2367 d.addCallback(_check_smap)
2368 # if we have a mix of two parallel versions (s4a and s4b), we could
2370 d.addCallback(lambda res:
2371 self._set_versions({0:3,2:3,4:3,6:3,8:3,
2372 1:4,3:4,5:4,7:4,9:4}))
2373 d.addCallback(lambda res: self._fn.get_servermap(MODE_READ))
2374 def _check_smap_mixed(smap):
2375 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2376 newer = smap.unrecoverable_newer_versions()
2377 self.failUnlessEqual(len(newer), 0)
2378 self.failUnless(smap.needs_merge())
2379 d.addCallback(_check_smap_mixed)
2380 d.addCallback(lambda res: self._fn.download_best_version())
2381 d.addCallback(lambda res: self.failUnless(res == self.CONTENTS[3] or
2382 res == self.CONTENTS[4]))
2385 def test_replace(self):
2386 # if we see a mix of versions in the grid, we should be able to
2387 # replace them all with a newer version
2389 # if exactly one share is at version 3, we should download (and
2390 # replace) v2, and the result should be v4. Note that the index we
2391 # give to _set_versions is different than the sequence number.
2392 target = dict([(i,2) for i in range(10)]) # seqnum3
2393 target[0] = 3 # seqnum4
2394 self._set_versions(target)
2396 def _modify(oldversion, servermap, first_time):
2397 return oldversion + " modified"
2398 d = self._fn.modify(_modify)
2399 d.addCallback(lambda res: self._fn.download_best_version())
2400 expected = self.CONTENTS[2] + " modified"
2401 d.addCallback(lambda res: self.failUnlessEqual(res, expected))
2402 # and the servermap should indicate that the outlier was replaced too
2403 d.addCallback(lambda res: self._fn.get_servermap(MODE_CHECK))
2404 def _check_smap(smap):
2405 self.failUnlessEqual(smap.highest_seqnum(), 5)
2406 self.failUnlessEqual(len(smap.unrecoverable_versions()), 0)
2407 self.failUnlessEqual(len(smap.recoverable_versions()), 1)
2408 d.addCallback(_check_smap)
2412 class Utils(unittest.TestCase):
2413 def test_cache(self):
2415 # xdata = base62.b2a(os.urandom(100))[:100]
2416 xdata = "1Ex4mdMaDyOl9YnGBM3I4xaBF97j8OQAg1K3RBR01F2PwTP4HohB3XpACuku8Xj4aTQjqJIR1f36mEj3BCNjXaJmPBEZnnHL0U9l"
2417 ydata = "4DCUQXvkEPnnr9Lufikq5t21JsnzZKhzxKBhLhrBB6iIcBOWRuT4UweDhjuKJUre8A4wOObJnl3Kiqmlj4vjSLSqUGAkUD87Y3vs"
2418 c.add("v1", 1, 0, xdata)
2419 c.add("v1", 1, 2000, ydata)
2420 self.failUnlessEqual(c.read("v2", 1, 10, 11), None)
2421 self.failUnlessEqual(c.read("v1", 2, 10, 11), None)
2422 self.failUnlessEqual(c.read("v1", 1, 0, 10), xdata[:10])
2423 self.failUnlessEqual(c.read("v1", 1, 90, 10), xdata[90:])
2424 self.failUnlessEqual(c.read("v1", 1, 300, 10), None)
2425 self.failUnlessEqual(c.read("v1", 1, 2050, 5), ydata[50:55])
2426 self.failUnlessEqual(c.read("v1", 1, 0, 101), None)
2427 self.failUnlessEqual(c.read("v1", 1, 99, 1), xdata[99:100])
2428 self.failUnlessEqual(c.read("v1", 1, 100, 1), None)
2429 self.failUnlessEqual(c.read("v1", 1, 1990, 9), None)
2430 self.failUnlessEqual(c.read("v1", 1, 1990, 10), None)
2431 self.failUnlessEqual(c.read("v1", 1, 1990, 11), None)
2432 self.failUnlessEqual(c.read("v1", 1, 1990, 15), None)
2433 self.failUnlessEqual(c.read("v1", 1, 1990, 19), None)
2434 self.failUnlessEqual(c.read("v1", 1, 1990, 20), None)
2435 self.failUnlessEqual(c.read("v1", 1, 1990, 21), None)
2436 self.failUnlessEqual(c.read("v1", 1, 1990, 25), None)
2437 self.failUnlessEqual(c.read("v1", 1, 1999, 25), None)
2439 # test joining fragments
2441 c.add("v1", 1, 0, xdata[:10])
2442 c.add("v1", 1, 10, xdata[10:20])
2443 self.failUnlessEqual(c.read("v1", 1, 0, 20), xdata[:20])
2445 class Exceptions(unittest.TestCase):
2446 def test_repr(self):
2447 nmde = NeedMoreDataError(100, 50, 100)
2448 self.failUnless("NeedMoreDataError" in repr(nmde), repr(nmde))
2449 ucwe = UncoordinatedWriteError()
2450 self.failUnless("UncoordinatedWriteError" in repr(ucwe), repr(ucwe))
2452 class SameKeyGenerator:
2453 def __init__(self, pubkey, privkey):
2454 self.pubkey = pubkey
2455 self.privkey = privkey
2456 def generate(self, keysize=None):
2457 return defer.succeed( (self.pubkey, self.privkey) )
2459 class FirstServerGetsKilled:
2461 def notify(self, retval, wrapper, methname):
2463 wrapper.broken = True
2467 class FirstServerGetsDeleted:
2470 self.silenced = None
2471 def notify(self, retval, wrapper, methname):
2473 # this query will work, but later queries should think the share
2476 self.silenced = wrapper
2478 if wrapper == self.silenced:
2479 assert methname == "slot_testv_and_readv_and_writev"
2483 class Problems(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
2484 def test_publish_surprise(self):
2485 self.basedir = "mutable/Problems/test_publish_surprise"
2487 nm = self.g.clients[0].nodemaker
2488 d = nm.create_mutable_file(MutableData("contents 1"))
2490 d = defer.succeed(None)
2491 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2492 def _got_smap1(smap):
2493 # stash the old state of the file
2495 d.addCallback(_got_smap1)
2496 # then modify the file, leaving the old map untouched
2497 d.addCallback(lambda res: log.msg("starting winning write"))
2498 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2499 # now attempt to modify the file with the old servermap. This
2500 # will look just like an uncoordinated write, in which every
2501 # single share got updated between our mapupdate and our publish
2502 d.addCallback(lambda res: log.msg("starting doomed write"))
2503 d.addCallback(lambda res:
2504 self.shouldFail(UncoordinatedWriteError,
2505 "test_publish_surprise", None,
2507 MutableData("contents 2a"), self.old_map))
2509 d.addCallback(_created)
2512 def test_retrieve_surprise(self):
2513 self.basedir = "mutable/Problems/test_retrieve_surprise"
2515 nm = self.g.clients[0].nodemaker
2516 d = nm.create_mutable_file(MutableData("contents 1"))
2518 d = defer.succeed(None)
2519 d.addCallback(lambda res: n.get_servermap(MODE_READ))
2520 def _got_smap1(smap):
2521 # stash the old state of the file
2523 d.addCallback(_got_smap1)
2524 # then modify the file, leaving the old map untouched
2525 d.addCallback(lambda res: log.msg("starting winning write"))
2526 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2527 # now attempt to retrieve the old version with the old servermap.
2528 # This will look like someone has changed the file since we
2529 # updated the servermap.
2530 d.addCallback(lambda res: n._cache._clear())
2531 d.addCallback(lambda res: log.msg("starting doomed read"))
2532 d.addCallback(lambda res:
2533 self.shouldFail(NotEnoughSharesError,
2534 "test_retrieve_surprise",
2535 "ran out of peers: have 0 of 1",
2538 self.old_map.best_recoverable_version(),
2541 d.addCallback(_created)
2545 def test_unexpected_shares(self):
2546 # upload the file, take a servermap, shut down one of the servers,
2547 # upload it again (causing shares to appear on a new server), then
2548 # upload using the old servermap. The last upload should fail with an
2549 # UncoordinatedWriteError, because of the shares that didn't appear
2551 self.basedir = "mutable/Problems/test_unexpected_shares"
2553 nm = self.g.clients[0].nodemaker
2554 d = nm.create_mutable_file(MutableData("contents 1"))
2556 d = defer.succeed(None)
2557 d.addCallback(lambda res: n.get_servermap(MODE_WRITE))
2558 def _got_smap1(smap):
2559 # stash the old state of the file
2561 # now shut down one of the servers
2562 peer0 = list(smap.make_sharemap()[0])[0]
2563 self.g.remove_server(peer0)
2564 # then modify the file, leaving the old map untouched
2565 log.msg("starting winning write")
2566 return n.overwrite(MutableData("contents 2"))
2567 d.addCallback(_got_smap1)
2568 # now attempt to modify the file with the old servermap. This
2569 # will look just like an uncoordinated write, in which every
2570 # single share got updated between our mapupdate and our publish
2571 d.addCallback(lambda res: log.msg("starting doomed write"))
2572 d.addCallback(lambda res:
2573 self.shouldFail(UncoordinatedWriteError,
2574 "test_surprise", None,
2576 MutableData("contents 2a"), self.old_map))
2578 d.addCallback(_created)
2581 def test_bad_server(self):
2582 # Break one server, then create the file: the initial publish should
2583 # complete with an alternate server. Breaking a second server should
2584 # not prevent an update from succeeding either.
2585 self.basedir = "mutable/Problems/test_bad_server"
2587 nm = self.g.clients[0].nodemaker
2589 # to make sure that one of the initial peers is broken, we have to
2590 # get creative. We create an RSA key and compute its storage-index.
2591 # Then we make a KeyGenerator that always returns that one key, and
2592 # use it to create the mutable file. This will get easier when we can
2593 # use #467 static-server-selection to disable permutation and force
2594 # the choice of server for share[0].
2596 d = nm.key_generator.generate(TEST_RSA_KEY_SIZE)
2597 def _got_key( (pubkey, privkey) ):
2598 nm.key_generator = SameKeyGenerator(pubkey, privkey)
2599 pubkey_s = pubkey.serialize()
2600 privkey_s = privkey.serialize()
2601 u = uri.WriteableSSKFileURI(ssk_writekey_hash(privkey_s),
2602 ssk_pubkey_fingerprint_hash(pubkey_s))
2603 self._storage_index = u.get_storage_index()
2604 d.addCallback(_got_key)
2605 def _break_peer0(res):
2606 si = self._storage_index
2607 servers = nm.storage_broker.get_servers_for_psi(si)
2608 self.g.break_server(servers[0].get_serverid())
2609 self.server1 = servers[1]
2610 d.addCallback(_break_peer0)
2611 # now "create" the file, using the pre-established key, and let the
2612 # initial publish finally happen
2613 d.addCallback(lambda res: nm.create_mutable_file(MutableData("contents 1")))
2614 # that ought to work
2616 d = n.download_best_version()
2617 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2618 # now break the second peer
2619 def _break_peer1(res):
2620 self.g.break_server(self.server1.get_serverid())
2621 d.addCallback(_break_peer1)
2622 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2623 # that ought to work too
2624 d.addCallback(lambda res: n.download_best_version())
2625 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2626 def _explain_error(f):
2628 if f.check(NotEnoughServersError):
2629 print "first_error:", f.value.first_error
2631 d.addErrback(_explain_error)
2633 d.addCallback(_got_node)
2636 def test_bad_server_overlap(self):
2637 # like test_bad_server, but with no extra unused servers to fall back
2638 # upon. This means that we must re-use a server which we've already
2639 # used. If we don't remember the fact that we sent them one share
2640 # already, we'll mistakenly think we're experiencing an
2641 # UncoordinatedWriteError.
2643 # Break one server, then create the file: the initial publish should
2644 # complete with an alternate server. Breaking a second server should
2645 # not prevent an update from succeeding either.
2646 self.basedir = "mutable/Problems/test_bad_server_overlap"
2648 nm = self.g.clients[0].nodemaker
2649 sb = nm.storage_broker
2651 peerids = [s.get_serverid() for s in sb.get_connected_servers()]
2652 self.g.break_server(peerids[0])
2654 d = nm.create_mutable_file(MutableData("contents 1"))
2656 d = n.download_best_version()
2657 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 1"))
2658 # now break one of the remaining servers
2659 def _break_second_server(res):
2660 self.g.break_server(peerids[1])
2661 d.addCallback(_break_second_server)
2662 d.addCallback(lambda res: n.overwrite(MutableData("contents 2")))
2663 # that ought to work too
2664 d.addCallback(lambda res: n.download_best_version())
2665 d.addCallback(lambda res: self.failUnlessEqual(res, "contents 2"))
2667 d.addCallback(_created)
2670 def test_publish_all_servers_bad(self):
2671 # Break all servers: the publish should fail
2672 self.basedir = "mutable/Problems/test_publish_all_servers_bad"
2674 nm = self.g.clients[0].nodemaker
2675 for s in nm.storage_broker.get_connected_servers():
2676 s.get_rref().broken = True
2678 d = self.shouldFail(NotEnoughServersError,
2679 "test_publish_all_servers_bad",
2680 "ran out of good servers",
2681 nm.create_mutable_file, MutableData("contents"))
2684 def test_publish_no_servers(self):
2685 # no servers at all: the publish should fail
2686 self.basedir = "mutable/Problems/test_publish_no_servers"
2687 self.set_up_grid(num_servers=0)
2688 nm = self.g.clients[0].nodemaker
2690 d = self.shouldFail(NotEnoughServersError,
2691 "test_publish_no_servers",
2692 "Ran out of non-bad servers",
2693 nm.create_mutable_file, MutableData("contents"))
2697 def test_privkey_query_error(self):
2698 # when a servermap is updated with MODE_WRITE, it tries to get the
2699 # privkey. Something might go wrong during this query attempt.
2700 # Exercise the code in _privkey_query_failed which tries to handle
2702 self.basedir = "mutable/Problems/test_privkey_query_error"
2703 self.set_up_grid(num_servers=20)
2704 nm = self.g.clients[0].nodemaker
2705 nm._node_cache = DevNullDictionary() # disable the nodecache
2707 # we need some contents that are large enough to push the privkey out
2708 # of the early part of the file
2709 LARGE = "These are Larger contents" * 2000 # about 50KB
2710 LARGE_uploadable = MutableData(LARGE)
2711 d = nm.create_mutable_file(LARGE_uploadable)
2713 self.uri = n.get_uri()
2714 self.n2 = nm.create_from_cap(self.uri)
2716 # When a mapupdate is performed on a node that doesn't yet know
2717 # the privkey, a short read is sent to a batch of servers, to get
2718 # the verinfo and (hopefully, if the file is short enough) the
2719 # encprivkey. Our file is too large to let this first read
2720 # contain the encprivkey. Each non-encprivkey-bearing response
2721 # that arrives (until the node gets the encprivkey) will trigger
2722 # a second read to specifically read the encprivkey.
2724 # So, to exercise this case:
2725 # 1. notice which server gets a read() call first
2726 # 2. tell that server to start throwing errors
2727 killer = FirstServerGetsKilled()
2728 for s in nm.storage_broker.get_connected_servers():
2729 s.get_rref().post_call_notifier = killer.notify
2730 d.addCallback(_created)
2732 # now we update a servermap from a new node (which doesn't have the
2733 # privkey yet, forcing it to use a separate privkey query). Note that
2734 # the map-update will succeed, since we'll just get a copy from one
2735 # of the other shares.
2736 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2740 def test_privkey_query_missing(self):
2741 # like test_privkey_query_error, but the shares are deleted by the
2742 # second query, instead of raising an exception.
2743 self.basedir = "mutable/Problems/test_privkey_query_missing"
2744 self.set_up_grid(num_servers=20)
2745 nm = self.g.clients[0].nodemaker
2746 LARGE = "These are Larger contents" * 2000 # about 50KiB
2747 LARGE_uploadable = MutableData(LARGE)
2748 nm._node_cache = DevNullDictionary() # disable the nodecache
2750 d = nm.create_mutable_file(LARGE_uploadable)
2752 self.uri = n.get_uri()
2753 self.n2 = nm.create_from_cap(self.uri)
2754 deleter = FirstServerGetsDeleted()
2755 for s in nm.storage_broker.get_connected_servers():
2756 s.get_rref().post_call_notifier = deleter.notify
2757 d.addCallback(_created)
2758 d.addCallback(lambda res: self.n2.get_servermap(MODE_WRITE))
2762 def test_block_and_hash_query_error(self):
2763 # This tests for what happens when a query to a remote server
2764 # fails in either the hash validation step or the block getting
2765 # step (because of batching, this is the same actual query).
2766 # We need to have the storage server persist up until the point
2767 # that its prefix is validated, then suddenly die. This
2768 # exercises some exception handling code in Retrieve.
2769 self.basedir = "mutable/Problems/test_block_and_hash_query_error"
2770 self.set_up_grid(num_servers=20)
2771 nm = self.g.clients[0].nodemaker
2772 CONTENTS = "contents" * 2000
2773 CONTENTS_uploadable = MutableData(CONTENTS)
2774 d = nm.create_mutable_file(CONTENTS_uploadable)
2777 d.addCallback(_created)
2778 d.addCallback(lambda ignored:
2779 self._node.get_servermap(MODE_READ))
2780 def _then(servermap):
2781 # we have our servermap. Now we set up the servers like the
2782 # tests above -- the first one that gets a read call should
2783 # start throwing errors, but only after returning its prefix
2784 # for validation. Since we'll download without fetching the
2785 # private key, the next query to the remote server will be
2786 # for either a block and salt or for hashes, either of which
2787 # will exercise the error handling code.
2788 killer = FirstServerGetsKilled()
2789 for s in nm.storage_broker.get_connected_servers():
2790 s.get_rref().post_call_notifier = killer.notify
2791 ver = servermap.best_recoverable_version()
2793 return self._node.download_version(servermap, ver)
2794 d.addCallback(_then)
2795 d.addCallback(lambda data:
2796 self.failUnlessEqual(data, CONTENTS))
2800 class FileHandle(unittest.TestCase):
2802 self.test_data = "Test Data" * 50000
2803 self.sio = StringIO(self.test_data)
2804 self.uploadable = MutableFileHandle(self.sio)
2807 def test_filehandle_read(self):
2808 self.basedir = "mutable/FileHandle/test_filehandle_read"
2810 for i in xrange(0, len(self.test_data), chunk_size):
2811 data = self.uploadable.read(chunk_size)
2812 data = "".join(data)
2814 end = i + chunk_size
2815 self.failUnlessEqual(data, self.test_data[start:end])
2818 def test_filehandle_get_size(self):
2819 self.basedir = "mutable/FileHandle/test_filehandle_get_size"
2820 actual_size = len(self.test_data)
2821 size = self.uploadable.get_size()
2822 self.failUnlessEqual(size, actual_size)
2825 def test_filehandle_get_size_out_of_order(self):
2826 # We should be able to call get_size whenever we want without
2827 # disturbing the location of the seek pointer.
2829 data = self.uploadable.read(chunk_size)
2830 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2833 size = self.uploadable.get_size()
2834 self.failUnlessEqual(size, len(self.test_data))
2836 # Now get more data. We should be right where we left off.
2837 more_data = self.uploadable.read(chunk_size)
2839 end = chunk_size * 2
2840 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2843 def test_filehandle_file(self):
2844 # Make sure that the MutableFileHandle works on a file as well
2845 # as a StringIO object, since in some cases it will be asked to
2847 self.basedir = self.mktemp()
2848 # necessary? What am I doing wrong here?
2849 os.mkdir(self.basedir)
2850 f_path = os.path.join(self.basedir, "test_file")
2851 f = open(f_path, "w")
2852 f.write(self.test_data)
2854 f = open(f_path, "r")
2856 uploadable = MutableFileHandle(f)
2858 data = uploadable.read(len(self.test_data))
2859 self.failUnlessEqual("".join(data), self.test_data)
2860 size = uploadable.get_size()
2861 self.failUnlessEqual(size, len(self.test_data))
2864 def test_close(self):
2865 # Make sure that the MutableFileHandle closes its handle when
2867 self.uploadable.close()
2868 self.failUnless(self.sio.closed)
2871 class DataHandle(unittest.TestCase):
2873 self.test_data = "Test Data" * 50000
2874 self.uploadable = MutableData(self.test_data)
2877 def test_datahandle_read(self):
2879 for i in xrange(0, len(self.test_data), chunk_size):
2880 data = self.uploadable.read(chunk_size)
2881 data = "".join(data)
2883 end = i + chunk_size
2884 self.failUnlessEqual(data, self.test_data[start:end])
2887 def test_datahandle_get_size(self):
2888 actual_size = len(self.test_data)
2889 size = self.uploadable.get_size()
2890 self.failUnlessEqual(size, actual_size)
2893 def test_datahandle_get_size_out_of_order(self):
2894 # We should be able to call get_size whenever we want without
2895 # disturbing the location of the seek pointer.
2897 data = self.uploadable.read(chunk_size)
2898 self.failUnlessEqual("".join(data), self.test_data[:chunk_size])
2901 size = self.uploadable.get_size()
2902 self.failUnlessEqual(size, len(self.test_data))
2904 # Now get more data. We should be right where we left off.
2905 more_data = self.uploadable.read(chunk_size)
2907 end = chunk_size * 2
2908 self.failUnlessEqual("".join(more_data), self.test_data[start:end])
2911 class Version(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin, \
2914 GridTestMixin.setUp(self)
2915 self.basedir = self.mktemp()
2917 self.c = self.g.clients[0]
2918 self.nm = self.c.nodemaker
2919 self.data = "test data" * 100000 # about 900 KiB; MDMF
2920 self.small_data = "test data" * 10 # about 90 B; SDMF
2923 def do_upload_mdmf(self):
2924 d = self.nm.create_mutable_file(MutableData(self.data),
2925 version=MDMF_VERSION)
2927 assert isinstance(n, MutableFileNode)
2928 assert n._protocol_version == MDMF_VERSION
2931 d.addCallback(_then)
2934 def do_upload_sdmf(self):
2935 d = self.nm.create_mutable_file(MutableData(self.small_data))
2937 assert isinstance(n, MutableFileNode)
2938 assert n._protocol_version == SDMF_VERSION
2941 d.addCallback(_then)
2944 def do_upload_empty_sdmf(self):
2945 d = self.nm.create_mutable_file(MutableData(""))
2947 assert isinstance(n, MutableFileNode)
2948 self.sdmf_zero_length_node = n
2949 assert n._protocol_version == SDMF_VERSION
2951 d.addCallback(_then)
2954 def do_upload(self):
2955 d = self.do_upload_mdmf()
2956 d.addCallback(lambda ign: self.do_upload_sdmf())
2959 def test_get_sequence_number(self):
2960 d = self.do_upload()
2961 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
2962 d.addCallback(lambda bv:
2963 self.failUnlessEqual(bv.get_sequence_number(), 1))
2964 d.addCallback(lambda ignored:
2965 self.sdmf_node.get_best_readable_version())
2966 d.addCallback(lambda bv:
2967 self.failUnlessEqual(bv.get_sequence_number(), 1))
2968 # Now update. The sequence number in both cases should be 1 in
2970 def _do_update(ignored):
2971 new_data = MutableData("foo bar baz" * 100000)
2972 new_small_data = MutableData("foo bar baz" * 10)
2973 d1 = self.mdmf_node.overwrite(new_data)
2974 d2 = self.sdmf_node.overwrite(new_small_data)
2975 dl = gatherResults([d1, d2])
2977 d.addCallback(_do_update)
2978 d.addCallback(lambda ignored:
2979 self.mdmf_node.get_best_readable_version())
2980 d.addCallback(lambda bv:
2981 self.failUnlessEqual(bv.get_sequence_number(), 2))
2982 d.addCallback(lambda ignored:
2983 self.sdmf_node.get_best_readable_version())
2984 d.addCallback(lambda bv:
2985 self.failUnlessEqual(bv.get_sequence_number(), 2))
2989 def test_version_extension_api(self):
2990 # We need to define an API by which an uploader can set the
2991 # extension parameters, and by which a downloader can retrieve
2993 d = self.do_upload_mdmf()
2994 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
2995 def _got_version(version):
2996 hints = version.get_downloader_hints()
2997 # Should be empty at this point.
2998 self.failUnlessIn("k", hints)
2999 self.failUnlessEqual(hints['k'], 3)
3000 self.failUnlessIn('segsize', hints)
3001 self.failUnlessEqual(hints['segsize'], 131073)
3002 d.addCallback(_got_version)
3006 def test_extensions_from_cap(self):
3007 # If we initialize a mutable file with a cap that has extension
3008 # parameters in it and then grab the extension parameters using
3009 # our API, we should see that they're set correctly.
3010 d = self.do_upload_mdmf()
3012 mdmf_uri = self.mdmf_node.get_uri()
3013 new_node = self.nm.create_from_cap(mdmf_uri)
3014 return new_node.get_best_mutable_version()
3015 d.addCallback(_then)
3016 def _got_version(version):
3017 hints = version.get_downloader_hints()
3018 self.failUnlessIn("k", hints)
3019 self.failUnlessEqual(hints["k"], 3)
3020 self.failUnlessIn("segsize", hints)
3021 self.failUnlessEqual(hints["segsize"], 131073)
3022 d.addCallback(_got_version)
3026 def test_extensions_from_upload(self):
3027 # If we create a new mutable file with some contents, we should
3028 # get back an MDMF cap with the right hints in place.
3029 contents = "foo bar baz" * 100000
3030 d = self.nm.create_mutable_file(contents, version=MDMF_VERSION)
3031 def _got_mutable_file(n):
3032 rw_uri = n.get_uri()
3033 expected_k = str(self.c.DEFAULT_ENCODING_PARAMETERS['k'])
3034 self.failUnlessIn(expected_k, rw_uri)
3035 # XXX: Get this more intelligently.
3036 self.failUnlessIn("131073", rw_uri)
3038 ro_uri = n.get_readonly_uri()
3039 self.failUnlessIn(expected_k, ro_uri)
3040 self.failUnlessIn("131073", ro_uri)
3041 d.addCallback(_got_mutable_file)
3045 def test_cap_after_upload(self):
3046 # If we create a new mutable file and upload things to it, and
3047 # it's an MDMF file, we should get an MDMF cap back from that
3048 # file and should be able to use that.
3049 # That's essentially what MDMF node is, so just check that.
3050 d = self.do_upload_mdmf()
3052 mdmf_uri = self.mdmf_node.get_uri()
3053 cap = uri.from_string(mdmf_uri)
3054 self.failUnless(isinstance(cap, uri.WriteableMDMFFileURI))
3055 readonly_mdmf_uri = self.mdmf_node.get_readonly_uri()
3056 cap = uri.from_string(readonly_mdmf_uri)
3057 self.failUnless(isinstance(cap, uri.ReadonlyMDMFFileURI))
3058 d.addCallback(_then)
3061 def test_mutable_version(self):
3062 # assert that getting parameters from the IMutableVersion object
3063 # gives us the same data as getting them from the filenode itself
3064 d = self.do_upload()
3065 d.addCallback(lambda ign: self.mdmf_node.get_best_mutable_version())
3066 def _check_mdmf(bv):
3068 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3069 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3070 self.failIf(bv.is_readonly())
3071 d.addCallback(_check_mdmf)
3072 d.addCallback(lambda ign: self.sdmf_node.get_best_mutable_version())
3073 def _check_sdmf(bv):
3075 self.failUnlessEqual(bv.get_writekey(), n.get_writekey())
3076 self.failUnlessEqual(bv.get_storage_index(), n.get_storage_index())
3077 self.failIf(bv.is_readonly())
3078 d.addCallback(_check_sdmf)
3082 def test_get_readonly_version(self):
3083 d = self.do_upload()
3084 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3085 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3087 # Attempting to get a mutable version of a mutable file from a
3088 # filenode initialized with a readcap should return a readonly
3089 # version of that same node.
3090 d.addCallback(lambda ign: self.mdmf_node.get_readonly())
3091 d.addCallback(lambda ro: ro.get_best_mutable_version())
3092 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3094 d.addCallback(lambda ign: self.sdmf_node.get_best_readable_version())
3095 d.addCallback(lambda bv: self.failUnless(bv.is_readonly()))
3097 d.addCallback(lambda ign: self.sdmf_node.get_readonly())
3098 d.addCallback(lambda ro: ro.get_best_mutable_version())
3099 d.addCallback(lambda v: self.failUnless(v.is_readonly()))
3103 def test_toplevel_overwrite(self):
3104 new_data = MutableData("foo bar baz" * 100000)
3105 new_small_data = MutableData("foo bar baz" * 10)
3106 d = self.do_upload()
3107 d.addCallback(lambda ign: self.mdmf_node.overwrite(new_data))
3108 d.addCallback(lambda ignored:
3109 self.mdmf_node.download_best_version())
3110 d.addCallback(lambda data:
3111 self.failUnlessEqual(data, "foo bar baz" * 100000))
3112 d.addCallback(lambda ignored:
3113 self.sdmf_node.overwrite(new_small_data))
3114 d.addCallback(lambda ignored:
3115 self.sdmf_node.download_best_version())
3116 d.addCallback(lambda data:
3117 self.failUnlessEqual(data, "foo bar baz" * 10))
3121 def test_toplevel_modify(self):
3122 d = self.do_upload()
3123 def modifier(old_contents, servermap, first_time):
3124 return old_contents + "modified"
3125 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3126 d.addCallback(lambda ignored:
3127 self.mdmf_node.download_best_version())
3128 d.addCallback(lambda data:
3129 self.failUnlessIn("modified", data))
3130 d.addCallback(lambda ignored:
3131 self.sdmf_node.modify(modifier))
3132 d.addCallback(lambda ignored:
3133 self.sdmf_node.download_best_version())
3134 d.addCallback(lambda data:
3135 self.failUnlessIn("modified", data))
3139 def test_version_modify(self):
3140 # TODO: When we can publish multiple versions, alter this test
3141 # to modify a version other than the best usable version, then
3142 # test to see that the best recoverable version is that.
3143 d = self.do_upload()
3144 def modifier(old_contents, servermap, first_time):
3145 return old_contents + "modified"
3146 d.addCallback(lambda ign: self.mdmf_node.modify(modifier))
3147 d.addCallback(lambda ignored:
3148 self.mdmf_node.download_best_version())
3149 d.addCallback(lambda data:
3150 self.failUnlessIn("modified", data))
3151 d.addCallback(lambda ignored:
3152 self.sdmf_node.modify(modifier))
3153 d.addCallback(lambda ignored:
3154 self.sdmf_node.download_best_version())
3155 d.addCallback(lambda data:
3156 self.failUnlessIn("modified", data))
3160 def test_download_version(self):
3161 d = self.publish_multiple()
3162 # We want to have two recoverable versions on the grid.
3163 d.addCallback(lambda res:
3164 self._set_versions({0:0,2:0,4:0,6:0,8:0,
3165 1:1,3:1,5:1,7:1,9:1}))
3166 # Now try to download each version. We should get the plaintext
3167 # associated with that version.
3168 d.addCallback(lambda ignored:
3169 self._fn.get_servermap(mode=MODE_READ))
3170 def _got_servermap(smap):
3171 versions = smap.recoverable_versions()
3172 assert len(versions) == 2
3174 self.servermap = smap
3175 self.version1, self.version2 = versions
3176 assert self.version1 != self.version2
3178 self.version1_seqnum = self.version1[0]
3179 self.version2_seqnum = self.version2[0]
3180 self.version1_index = self.version1_seqnum - 1
3181 self.version2_index = self.version2_seqnum - 1
3183 d.addCallback(_got_servermap)
3184 d.addCallback(lambda ignored:
3185 self._fn.download_version(self.servermap, self.version1))
3186 d.addCallback(lambda results:
3187 self.failUnlessEqual(self.CONTENTS[self.version1_index],
3189 d.addCallback(lambda ignored:
3190 self._fn.download_version(self.servermap, self.version2))
3191 d.addCallback(lambda results:
3192 self.failUnlessEqual(self.CONTENTS[self.version2_index],
3197 def test_download_nonexistent_version(self):
3198 d = self.do_upload_mdmf()
3199 d.addCallback(lambda ign: self.mdmf_node.get_servermap(mode=MODE_WRITE))
3200 def _set_servermap(servermap):
3201 self.servermap = servermap
3202 d.addCallback(_set_servermap)
3203 d.addCallback(lambda ignored:
3204 self.shouldFail(UnrecoverableFileError, "nonexistent version",
3206 self.mdmf_node.download_version, self.servermap,
3211 def test_partial_read(self):
3212 # read only a few bytes at a time, and see that the results are
3214 d = self.do_upload_mdmf()
3215 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3216 def _read_data(version):
3217 c = consumer.MemoryConsumer()
3218 d2 = defer.succeed(None)
3219 for i in xrange(0, len(self.data), 10000):
3220 d2.addCallback(lambda ignored, i=i: version.read(c, i, 10000))
3221 d2.addCallback(lambda ignored:
3222 self.failUnlessEqual(self.data, "".join(c.chunks)))
3224 d.addCallback(_read_data)
3228 def _test_partial_read(self, offset, length):
3229 d = self.do_upload_mdmf()
3230 d.addCallback(lambda ign: self.mdmf_node.get_best_readable_version())
3231 c = consumer.MemoryConsumer()
3232 d.addCallback(lambda version:
3233 version.read(c, offset, length))
3234 expected = self.data[offset:offset+length]
3235 d.addCallback(lambda ignored: "".join(c.chunks))
3236 def _check(results):
3237 if results != expected:
3239 print "got: %s ... %s" % (results[:20], results[-20:])
3240 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3241 self.fail("results != expected")
3242 d.addCallback(_check)
3245 def test_partial_read_starting_on_segment_boundary(self):
3246 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 50)
3248 def test_partial_read_ending_one_byte_after_segment_boundary(self):
3249 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3)-50, 51)
3251 def test_partial_read_zero_length_at_start(self):
3252 return self._test_partial_read(0, 0)
3254 def test_partial_read_zero_length_in_middle(self):
3255 return self._test_partial_read(50, 0)
3257 def test_partial_read_zero_length_at_segment_boundary(self):
3258 return self._test_partial_read(mathutil.next_multiple(128 * 1024, 3), 0)
3260 # XXX factor these into a single upload after they pass
3261 _broken = "zero-length reads of mutable files don't work"
3262 test_partial_read_zero_length_at_start.todo = _broken
3263 test_partial_read_zero_length_in_middle.todo = _broken
3264 test_partial_read_zero_length_at_segment_boundary.todo = _broken
3266 def _test_read_and_download(self, node, expected):
3267 d = node.get_best_readable_version()
3268 def _read_data(version):
3269 c = consumer.MemoryConsumer()
3270 d2 = defer.succeed(None)
3271 d2.addCallback(lambda ignored: version.read(c))
3272 d2.addCallback(lambda ignored:
3273 self.failUnlessEqual(expected, "".join(c.chunks)))
3275 d.addCallback(_read_data)
3276 d.addCallback(lambda ignored: node.download_best_version())
3277 d.addCallback(lambda data: self.failUnlessEqual(expected, data))
3280 def test_read_and_download_mdmf(self):
3281 d = self.do_upload_mdmf()
3282 d.addCallback(self._test_read_and_download, self.data)
3285 def test_read_and_download_sdmf(self):
3286 d = self.do_upload_sdmf()
3287 d.addCallback(self._test_read_and_download, self.small_data)
3290 def test_read_and_download_sdmf_zero_length(self):
3291 d = self.do_upload_empty_sdmf()
3292 d.addCallback(self._test_read_and_download, "")
3296 class Update(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3297 timeout = 400 # these tests are too big, 120s is not enough on slow
3300 GridTestMixin.setUp(self)
3301 self.basedir = self.mktemp()
3303 self.c = self.g.clients[0]
3304 self.nm = self.c.nodemaker
3305 self.data = "testdata " * 100000 # about 900 KiB; MDMF
3306 self.small_data = "test data" * 10 # about 90 B; SDMF
3307 return self.do_upload()
3310 def do_upload(self):
3311 d1 = self.nm.create_mutable_file(MutableData(self.data),
3312 version=MDMF_VERSION)
3313 d2 = self.nm.create_mutable_file(MutableData(self.small_data))
3314 dl = gatherResults([d1, d2])
3315 def _then((n1, n2)):
3316 assert isinstance(n1, MutableFileNode)
3317 assert isinstance(n2, MutableFileNode)
3321 dl.addCallback(_then)
3322 # Make SDMF and MDMF mutable file nodes that have 255 shares.
3323 def _make_max_shares(ign):
3324 self.nm.default_encoding_parameters['n'] = 255
3325 self.nm.default_encoding_parameters['k'] = 127
3326 d1 = self.nm.create_mutable_file(MutableData(self.data),
3327 version=MDMF_VERSION)
3329 self.nm.create_mutable_file(MutableData(self.small_data))
3330 return gatherResults([d1, d2])
3331 dl.addCallback(_make_max_shares)
3332 def _stash((n1, n2)):
3333 assert isinstance(n1, MutableFileNode)
3334 assert isinstance(n2, MutableFileNode)
3336 self.mdmf_max_shares_node = n1
3337 self.sdmf_max_shares_node = n2
3338 dl.addCallback(_stash)
3342 def _test_replace(self, offset, new_data):
3343 expected = self.data[:offset]+new_data+self.data[offset+len(new_data):]
3344 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3345 d = node.get_best_mutable_version()
3346 d.addCallback(lambda mv:
3347 mv.update(MutableData(new_data), offset))
3348 # close around node.
3349 d.addCallback(lambda ignored, node=node:
3350 node.download_best_version())
3351 def _check(results):
3352 if results != expected:
3354 print "got: %s ... %s" % (results[:20], results[-20:])
3355 print "exp: %s ... %s" % (expected[:20], expected[-20:])
3356 self.fail("results != expected")
3357 d.addCallback(_check)
3360 def test_append(self):
3361 # We should be able to append data to a mutable file and get
3363 return self._test_replace(len(self.data), "appended")
3365 def test_replace_middle(self):
3366 # We should be able to replace data in the middle of a mutable
3367 # file and get what we expect back.
3368 return self._test_replace(100, "replaced")
3370 def test_replace_beginning(self):
3371 # We should be able to replace data at the beginning of the file
3372 # without truncating the file
3373 return self._test_replace(0, "beginning")
3375 def test_replace_segstart1(self):
3376 return self._test_replace(128*1024+1, "NNNN")
3378 def test_replace_zero_length_beginning(self):
3379 return self._test_replace(0, "")
3381 def test_replace_zero_length_middle(self):
3382 return self._test_replace(50, "")
3384 def test_replace_zero_length_segstart1(self):
3385 return self._test_replace(128*1024+1, "")
3387 def test_replace_and_extend(self):
3388 # We should be able to replace data in the middle of a mutable
3389 # file and extend that mutable file and get what we expect.
3390 return self._test_replace(100, "modified " * 100000)
3393 def _check_differences(self, got, expected):
3394 # displaying arbitrary file corruption is tricky for a
3395 # 1MB file of repeating data,, so look for likely places
3396 # with problems and display them separately
3397 gotmods = [mo.span() for mo in re.finditer('([A-Z]+)', got)]
3398 expmods = [mo.span() for mo in re.finditer('([A-Z]+)', expected)]
3399 gotspans = ["%d:%d=%s" % (start,end,got[start:end])
3400 for (start,end) in gotmods]
3401 expspans = ["%d:%d=%s" % (start,end,expected[start:end])
3402 for (start,end) in expmods]
3403 #print "expecting: %s" % expspans
3407 print "differences:"
3408 for segnum in range(len(expected)//SEGSIZE):
3409 start = segnum * SEGSIZE
3410 end = (segnum+1) * SEGSIZE
3411 got_ends = "%s .. %s" % (got[start:start+20], got[end-20:end])
3412 exp_ends = "%s .. %s" % (expected[start:start+20], expected[end-20:end])
3413 if got_ends != exp_ends:
3414 print "expected[%d]: %s" % (start, exp_ends)
3415 print "got [%d]: %s" % (start, got_ends)
3416 if expspans != gotspans:
3417 print "expected: %s" % expspans
3418 print "got : %s" % gotspans
3419 open("EXPECTED","wb").write(expected)
3420 open("GOT","wb").write(got)
3421 print "wrote data to EXPECTED and GOT"
3422 self.fail("didn't get expected data")
3425 def test_replace_locations(self):
3426 # exercise fencepost conditions
3427 expected = self.data
3429 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3430 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3431 d = defer.succeed(None)
3432 for offset in suspects:
3433 new_data = letters.next()*2 # "AA", then "BB", etc
3434 expected = expected[:offset]+new_data+expected[offset+2:]
3435 d.addCallback(lambda ign:
3436 self.mdmf_node.get_best_mutable_version())
3437 def _modify(mv, offset=offset, new_data=new_data):
3438 # close over 'offset','new_data'
3439 md = MutableData(new_data)
3440 return mv.update(md, offset)
3441 d.addCallback(_modify)
3442 d.addCallback(lambda ignored:
3443 self.mdmf_node.download_best_version())
3444 d.addCallback(self._check_differences, expected)
3447 def test_replace_locations_max_shares(self):
3448 # exercise fencepost conditions
3449 expected = self.data
3451 suspects = range(SEGSIZE-3, SEGSIZE+1)+range(2*SEGSIZE-3, 2*SEGSIZE+1)
3452 letters = iter("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
3453 d = defer.succeed(None)
3454 for offset in suspects:
3455 new_data = letters.next()*2 # "AA", then "BB", etc
3456 expected = expected[:offset]+new_data+expected[offset+2:]
3457 d.addCallback(lambda ign:
3458 self.mdmf_max_shares_node.get_best_mutable_version())
3459 def _modify(mv, offset=offset, new_data=new_data):
3460 # close over 'offset','new_data'
3461 md = MutableData(new_data)
3462 return mv.update(md, offset)
3463 d.addCallback(_modify)
3464 d.addCallback(lambda ignored:
3465 self.mdmf_max_shares_node.download_best_version())
3466 d.addCallback(self._check_differences, expected)
3470 def test_append_power_of_two(self):
3471 # If we attempt to extend a mutable file so that its segment
3472 # count crosses a power-of-two boundary, the update operation
3473 # should know how to reencode the file.
3475 # Note that the data populating self.mdmf_node is about 900 KiB
3476 # long -- this is 7 segments in the default segment size. So we
3477 # need to add 2 segments worth of data to push it over a
3478 # power-of-two boundary.
3479 segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3480 new_data = self.data + (segment * 2)
3481 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3482 d = node.get_best_mutable_version()
3483 d.addCallback(lambda mv:
3484 mv.update(MutableData(segment * 2), len(self.data)))
3485 d.addCallback(lambda ignored, node=node:
3486 node.download_best_version())
3487 d.addCallback(lambda results:
3488 self.failUnlessEqual(results, new_data))
3492 def test_update_sdmf(self):
3493 # Running update on a single-segment file should still work.
3494 new_data = self.small_data + "appended"
3495 for node in (self.sdmf_node, self.sdmf_max_shares_node):
3496 d = node.get_best_mutable_version()
3497 d.addCallback(lambda mv:
3498 mv.update(MutableData("appended"), len(self.small_data)))
3499 d.addCallback(lambda ignored, node=node:
3500 node.download_best_version())
3501 d.addCallback(lambda results:
3502 self.failUnlessEqual(results, new_data))
3505 def test_replace_in_last_segment(self):
3506 # The wrapper should know how to handle the tail segment
3508 replace_offset = len(self.data) - 100
3509 new_data = self.data[:replace_offset] + "replaced"
3510 rest_offset = replace_offset + len("replaced")
3511 new_data += self.data[rest_offset:]
3512 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3513 d = node.get_best_mutable_version()
3514 d.addCallback(lambda mv:
3515 mv.update(MutableData("replaced"), replace_offset))
3516 d.addCallback(lambda ignored, node=node:
3517 node.download_best_version())
3518 d.addCallback(lambda results:
3519 self.failUnlessEqual(results, new_data))
3523 def test_multiple_segment_replace(self):
3524 replace_offset = 2 * DEFAULT_MAX_SEGMENT_SIZE
3525 new_data = self.data[:replace_offset]
3526 new_segment = "a" * DEFAULT_MAX_SEGMENT_SIZE
3527 new_data += 2 * new_segment
3528 new_data += "replaced"
3529 rest_offset = len(new_data)
3530 new_data += self.data[rest_offset:]
3531 for node in (self.mdmf_node, self.mdmf_max_shares_node):
3532 d = node.get_best_mutable_version()
3533 d.addCallback(lambda mv:
3534 mv.update(MutableData((2 * new_segment) + "replaced"),
3536 d.addCallback(lambda ignored, node=node:
3537 node.download_best_version())
3538 d.addCallback(lambda results:
3539 self.failUnlessEqual(results, new_data))
3542 class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixin):
3543 sdmf_old_shares = {}
3544 sdmf_old_shares[0] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAQ/EX4eC/1+hGOQ/h4EiKUkqxdsfzdcPlDvd11SGWZ0VHsUclZChTzuBAU2zLTXm+cG8IFhO50ly6Ey/DB44NtMKVaVzO0nU8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3545 sdmf_old_shares[1] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAhgcAb/adrQFrhlrRNoRpvjDuxmFebA4F0qCyqWssm61AAP7FHJWQoU87gQFNsy015vnBvCBYTudJcuhMvwweODbTD8Rfh4L/X6EY5D+HgSIpSSrF2x/N1w+UO93XVIZZnRUeePDXEwhqYDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3546 sdmf_old_shares[2] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewASoSCFpVj4utEE+eVFM146xfgC6DX39GaQ2zT3YKsWX3GiLwKtGffwqV7IlZIcBEVqMfTXSTZsY+dZm1MxxCZH0Zd33VY0yggDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3547 sdmf_old_shares[3] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcABOOLy8EETxh7h7/z9d62EiPu9CNpRrCOLxUhn+JUS+DuAAd8jdiCodW233N1acXhZGnulDKR3hiNsMdEIsijRPemewARoi8CrRn38KleyJWSHARFajH010k2bGPnWZtTMcQmR9GhIIWlWPi60QT55UUzXjrF+ALoNff0ZpDbNPdgqxZfcSNSplrHqtsDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3548 sdmf_old_shares[4] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwAUMA7/aVz7Mb1em0eks+biC8ZuVUhuAEkTVOAF4YulIjE8JlfW0dS1XKk62u0586QxiN38NTsluUDx8EAPTL66yRsfb1f3rRIDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3549 sdmf_old_shares[5] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAoIM8M4XulprmLd4gGMobS2Bv9CmwB5LpK/ySHE1QWjdwATPCZX1tHUtVypOtrtOfOkMYjd/DU7JblA8fBAD0y+uskwDv9pXPsxvV6bR6Sz5uILxm5VSG4ASRNU4AXhi6UiMUKZHBmcmEgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3550 sdmf_old_shares[6] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAWDSFSPvKzcFzRcuRlVgKUf0HBce1MCF8SwpUbPPEyfVJty4xLZ7DvNU/Eh/R6BarsVAagVXdp+GtEu0+fok7nilT4LchmHo8DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3551 sdmf_old_shares[7] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgACtTh7+7gs/l5w1lOkgbF6w7rkXLNslK7L2KYF4SPFLUcAA6dlE140Fc7FgB77PeM5Phv+bypQEYtyfLQHxd+OxlG3AAlyHZU7RfTJjbHu1gjabWZsTu+7nAeRVG6/ZSd4iMQ1ZgAVbcuMS2ew7zVPxIf0egWq7FQGoFV3afhrRLtPn6JO54oNIVI+8rNwXNFy5GVWApR/QcFx7UwIXxLClRs88TJ9UtLnNF4/mM0DE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3552 sdmf_old_shares[8] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAYmqKY7A9vQChuYa17fYSyKerIb3682jxiIneQvCMWCK5WcuI4PMeIsUAj8yxdxHvV+a9vtSCEsDVvymrrooDKX1GK98t37yoDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3553 sdmf_old_shares[9] = "VGFob2UgbXV0YWJsZSBjb250YWluZXIgdjEKdQlEA47ESLbTdKdpLJXCpBxd5OH239tl5hvAiz1dvGdE5rIOpf8cbfxbPcwNF+Y5dM92uBVbmV6KAAAAAAAAB/wAAAAAAAAJ0AAAAAFOWSw7jSx7WXzaMpdleJYXwYsRCV82jNA5oex9m2YhXSnb2POh+vvC1LE1NAfRc9GOb2zQG84Xdsx1Jub2brEeKkyt0sRIttN0p2kslcKkHF3k4fbf22XmAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABamJprL6ecrsOoFKdrXUmWveLq8nzEGDOjFnyK9detI3noX3uyK2MwSnFdAfyN0tuAwoAAAAAAAAAFQAAAAAAAAAVAAABjwAAAo8AAAMXAAADNwAAAAAAAAM+AAAAAAAAB/wwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAwggEIAoIBAQC1IkainlJF12IBXBQdpRK1zXB7a26vuEYqRmQM09YjC6sQjCs0F2ICk8n9m/2Kw4l16eIEboB2Au9pODCE+u/dEAakEFh4qidTMn61rbGUbsLK8xzuWNW22ezzz9/nPia0HDrulXt51/FYtfnnAuD1RJGXJv/8tDllE9FL/18TzlH4WuB6Fp8FTgv7QdbZAfWJHDGFIpVCJr1XxOCsSZNFJIqGwZnD2lsChiWw5OJDbKd8otqN1hIbfHyMyfMOJ/BzRzvZXaUt4Dv5nf93EmQDWClxShRwpuX/NkZ5B2K9OFonFTbOCexm/MjMAdCBqebKKaiHFkiknUCn9eJQpZ5bAgERgV50VKj+AVTDfgTpqfO2vfo4wrufi6ZBb8QV7hllhUFBjYogQ9C96dnS7skv0s+cqFuUjwMILr5/rsbEmEMGvl0T0ytyAbtlXuowEFVj/YORNknM4yjY72YUtEPTlMpk0Cis7aIgTvu5qWMPER26PMApZuRqiwRsGIkaJIvOVOTHHjFYe3/YzdMkc7OZtqRMfQLtwVl2/zKQQV8b/a9vaT6q3mRLRd4P3esaAFe/+7sR/t+9tmB+a8kxtKM6kmaVQJMbXJZ4aoHGfeLX0m35Rcvu2Bmph7QfSDjk/eaE3q55zYSoGWShmlhlw4Kwg84sMuhmcVhLvo0LovR8bKmbdgABUSzNKiMx0E91q51/WH6ASL0fDEOLef9oxuyBX5F5cpoABojmWkDX3k3FKfgNHIeptE3lxB8HHzxDfSD250psyfNCAAwGsKbMxbmI2NpdTozZ3SICrySwgGkatA1gsDOJmOnTzgAXVnLiODzHiLFAI/MsXcR71fmvb7UghLA1b8pq66KAyl+aopjsD29AKG5hrXt9hLIp6shvfrzaPGIid5C8IxYIrjgBj1YohGgDE0Wua7Lx6Bnad5n91qmHAnwSEJE5YIhQM634omd6cq9Wk4seJCUIn+ucoknrpxp0IR9QMxpKSMRHRUg2K8ZegnY3YqFunRZKCfsq9ufQEKgjZN12AFqi551KPBdn4/3V5HK6xTv0P4robSsE/BvuIfByvRf/W7ZrDx+CFC4EEcsBOACOZCrkhhqd5TkYKbe9RA+vs56+9N5qZGurkxcoKviiyEncxvTuShD65DK/6x6kMDMgQv/EdZDI3x9GtHTnRBYXwDGnPJ19w+q2zC3e2XarbxTGYQIPEC5mYx0gAA0sbjf018NGfwBhl6SB54iGsa8uLvR3jHv6OSRJgwxL6j7P0Ts4Hv2EtO12P0Lv21pwi3JC1O/WviSrKCvrQD5lMHL9Uym3hwFi2zu0mqwZvxOAbGy7kfOPXkLYKOHTZLthzKj3PsdjeceWBfYIvPGKYcd6wDr36d1aXSYS4IWeApTS2AQ2lu0DUcgSefAvsA8NkgOklvJY1cjTMSg6j6cxQo48Bvl8RAWGLbr4h2S/8KwDGxwLsSv0Gop/gnFc3GzCsmL0EkEyHHWkCA8YRXCghfW80KLDV495ff7yF5oiwK56GniqowZ3RG9Jxp5MXoJQgsLV1VMQFMAmsY69yz8eoxRH3wl9L0dMyndLulhWWzNwPMQ2I0yAWdzA/pksVmwTJTFenB3MHCiWc5rEwJ3yofe6NZZnZQrYyL9r1TNnVwfTwRUiykPiLSk4x9Mi6DX7RamDAxc8u3gDVfjPsTOTagBOEGUWlGAL54KE/E6sgCQ5DEAt12chk8AxbjBFLPgV+/idrzS0lZHOL+IVBI9D0i3Bq1yZcSIqcjZB0M3IbxbPm4gLAYOWEiTUN2ecsEHHg9nt6rhgffVoqSbCCFPbpC0xf7WOC3+BQORIZECOCC7cUAciXq3xn+GuxpFE40RWRJeKAK7bBQ21X89ABIXlQFkFddZ9kRvlZ2Pnl0oeF+2pjnZu0Yc2czNfZEQF2P7BKIdLrgMgxG89snxAY8qAYTCKyQw6xTG87wkjDcpy1wzsZLP3WsOuO7cAm7b27xU0jRKq8Cw4d1hDoyRG+RdS53F8RFJzVMaNNYgxU2tfRwUvXpTRXiOheeRVvh25+YGVnjakUXjx/dSDnOw4ETHGHD+7styDkeSfc3BdSZxswzc6OehgMI+xsCxeeRym15QUm9hxvg8X7Bfz/0WulgFwgzrm11TVynZYOmvyHpiZKoqQyQyKahIrfhwuchCr7lMsZ4a+umIkNkKxCLZnI+T7jd+eGFMgKItjz3kTTxRl3IhaJG3LbPmwRUJynMxQKdMi4Uf0qy0U7+i8hIJ9m50QXc+3tw2bwDSbx22XYJ9Wf14gxx5G5SPTb1JVCbhe4fxNt91xIxCow2zk62tzbYfRe6dfmDmgYHkv2PIEtMJZK8iKLDjFfu2ZUxsKT2A5g1q17og6o9MeXeuFS3mzJXJYFQZd+3UzlFR9qwkFkby9mg5y4XSeMvRLOHPt/H/r5SpEqBE6a9MadZYt61FBV152CUEzd43ihXtrAa0XH9HdsiySBcWI1SpM3mv9rRP0DiLjMUzHw/K1D8TE2f07zW4t/9kvE11tFj/NpICixQAAAAA="
3554 sdmf_old_cap = "URI:SSK:gmjgofw6gan57gwpsow6gtrz3e:5adm6fayxmu3e4lkmfvt6lkkfix34ai2wop2ioqr4bgvvhiol3kq"
3555 sdmf_old_contents = "This is a test file.\n"
3556 def copy_sdmf_shares(self):
3557 # We'll basically be short-circuiting the upload process.
3558 servernums = self.g.servers_by_number.keys()
3559 assert len(servernums) == 10
3561 assignments = zip(self.sdmf_old_shares.keys(), servernums)
3562 # Get the storage index.
3563 cap = uri.from_string(self.sdmf_old_cap)
3564 si = cap.get_storage_index()
3566 # Now execute each assignment by writing the storage.
3567 for (share, servernum) in assignments:
3568 sharedata = base64.b64decode(self.sdmf_old_shares[share])
3569 storedir = self.get_serverdir(servernum)
3570 storage_path = os.path.join(storedir, "shares",
3571 storage_index_to_dir(si))
3572 fileutil.make_dirs(storage_path)
3573 fileutil.write(os.path.join(storage_path, "%d" % share),
3575 # ...and verify that the shares are there.
3576 shares = self.find_uri_shares(self.sdmf_old_cap)
3577 assert len(shares) == 10
3579 def test_new_downloader_can_read_old_shares(self):
3580 self.basedir = "mutable/Interoperability/new_downloader_can_read_old_shares"
3582 self.copy_sdmf_shares()
3583 nm = self.g.clients[0].nodemaker
3584 n = nm.create_from_cap(self.sdmf_old_cap)
3585 d = n.download_best_version()
3586 d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
3589 class DifferentEncoding(unittest.TestCase):
3591 self._storage = s = FakeStorage()
3592 self.nodemaker = make_nodemaker(s)
3594 def test_filenode(self):
3595 # create a file with 3-of-20, then modify it with a client configured
3596 # to do 3-of-10. #1510 tracks a failure here
3597 self.nodemaker.default_encoding_parameters["n"] = 20
3598 d = self.nodemaker.create_mutable_file("old contents")
3600 filecap = n.get_cap().to_string()
3601 del n # we want a new object, not the cached one
3602 self.nodemaker.default_encoding_parameters["n"] = 10
3603 n2 = self.nodemaker.create_from_cap(filecap)
3605 d.addCallback(_created)
3606 def modifier(old_contents, servermap, first_time):
3607 return "new contents"
3608 d.addCallback(lambda n: n.modify(modifier))