From: Brian Warner Date: Wed, 6 Aug 2008 06:12:39 +0000 (-0700) Subject: mutable: start adding Repair tests, fix a simple bug X-Git-Url: https://git.rkrishnan.org/?a=commitdiff_plain;h=dd6ec73efa30d6e2d946d2b5c19445c8061134ca;p=tahoe-lafs%2Ftahoe-lafs.git mutable: start adding Repair tests, fix a simple bug --- diff --git a/src/allmydata/mutable/publish.py b/src/allmydata/mutable/publish.py index af02e631..15df0919 100644 --- a/src/allmydata/mutable/publish.py +++ b/src/allmydata/mutable/publish.py @@ -11,7 +11,7 @@ from allmydata import hashtree, codec, storage from pycryptopp.cipher.aes import AES from foolscap.eventual import eventually -from common import MODE_WRITE, DictOfSets, \ +from common import MODE_WRITE, MODE_CHECK, DictOfSets, \ UncoordinatedWriteError, NotEnoughServersError from servermap import ServerMap from layout import pack_prefix, pack_share, unpack_header, pack_checkstring, \ @@ -153,7 +153,7 @@ class Publish: # servermap was updated in MODE_WRITE, so we can depend upon the # peerlist computed by that process instead of computing our own. if self._servermap: - assert self._servermap.last_update_mode == MODE_WRITE + assert self._servermap.last_update_mode in (MODE_WRITE, MODE_CHECK) # we will push a version that is one larger than anything present # in the grid, according to the servermap. self._new_seqnum = self._servermap.highest_seqnum() + 1 diff --git a/src/allmydata/test/test_mutable.py b/src/allmydata/test/test_mutable.py index 3fb2a61b..d4bc5a33 100644 --- a/src/allmydata/test/test_mutable.py +++ b/src/allmydata/test/test_mutable.py @@ -12,7 +12,7 @@ from allmydata.util.idlib import shortnodeid_b2a from allmydata.util.hashutil import tagged_hash from allmydata.util.fileutil import make_dirs from allmydata.interfaces import IURI, IMutableFileURI, IUploadable, \ - FileTooLargeError + FileTooLargeError, IRepairResults from foolscap.eventual import eventually, fireEventually from foolscap.logging import log import sha @@ -1244,6 +1244,66 @@ class Checker(unittest.TestCase, CheckerMixin): "test_verify_one_bad_encprivkey_uncheckable") return d +class Repair(unittest.TestCase, CheckerMixin): + def setUp(self): + # publish a file and create shares, which can then be manipulated + # later. + self.CONTENTS = "New contents go here" * 1000 + num_peers = 20 + self._client = FakeClient(num_peers) + self._storage = self._client._storage + d = self._client.create_mutable_file(self.CONTENTS) + def _created(node): + self._fn = node + d.addCallback(_created) + return d + + def get_shares(self, s): + all_shares = {} # maps (peerid, shnum) to share data + for peerid in s._peers: + shares = s._peers[peerid] + for shnum in shares: + data = shares[shnum] + all_shares[ (peerid, shnum) ] = data + return all_shares + + def test_repair_nop(self): + initial_shares = self.get_shares(self._storage) + + d = self._fn.check() + d.addCallback(self._fn.repair) + def _check_results(rres): + self.failUnless(IRepairResults.providedBy(rres)) + # TODO: examine results + + new_shares = self.get_shares(self._storage) + # all shares should be in the same place as before + self.failUnlessEqual(set(initial_shares.keys()), + set(new_shares.keys())) + # but they should all be at a newer seqnum. The IV will be + # different, so the roothash will be too. + for key in initial_shares: + (version0, + seqnum0, + root_hash0, + IV0, + k0, N0, segsize0, datalen0, + o0) = unpack_header(initial_shares[key]) + (version1, + seqnum1, + root_hash1, + IV1, + k1, N1, segsize1, datalen1, + o1) = unpack_header(new_shares[key]) + self.failUnlessEqual(version0, version1) + self.failUnlessEqual(seqnum0+1, seqnum1) + self.failUnlessEqual(k0, k1) + self.failUnlessEqual(N0, N1) + self.failUnlessEqual(segsize0, segsize1) + self.failUnlessEqual(datalen0, datalen1) + d.addCallback(_check_results) + return d + class MultipleEncodings(unittest.TestCase): def setUp(self):