From: Brian Warner Date: Sat, 27 Aug 2011 22:50:31 +0000 (-0700) Subject: SDMF: update filenode with correct k/N after Retrieve. Fixes #1510. X-Git-Url: https://git.rkrishnan.org/frontends/listings/pb1server.py?a=commitdiff_plain;h=370e6f271e40945bfc3061c014f1c873b092bb50;p=tahoe-lafs%2Ftahoe-lafs.git SDMF: update filenode with correct k/N after Retrieve. Fixes #1510. Without this, we get a regression when modifying a mutable file that was created with more shares (larger N) than our current tahoe.cfg . The modification attempt creates new versions of the (0,1,..,newN-1) shares, but leaves the old versions of the (newN,..,oldN-1) shares alone (and throws a assertion error in SDMFSlotWriteProxy.finish_publishing in the process). The mixed versions that result (some shares with e.g. N=10, some with N=20, such that both versions are recoverable) cause problems for the Publish code, even before MDMF landed. Might be related to refs #1390 and refs #1042. --- diff --git a/src/allmydata/mutable/layout.py b/src/allmydata/mutable/layout.py index e3835a19..93d0c8f1 100644 --- a/src/allmydata/mutable/layout.py +++ b/src/allmydata/mutable/layout.py @@ -499,7 +499,7 @@ class SDMFSlotWriteProxy: """ for k in ["sharedata", "encprivkey", "signature", "verification_key", "share_hash_chain", "block_hash_tree"]: - assert k in self._share_pieces, (k, self._share_pieces.keys()) + assert k in self._share_pieces, (self.shnum, k, self._share_pieces.keys()) # This is the only method that actually writes something to the # remote server. # First, we need to pack the share into data that we can write diff --git a/src/allmydata/mutable/retrieve.py b/src/allmydata/mutable/retrieve.py index 595da73e..b1ec761e 100644 --- a/src/allmydata/mutable/retrieve.py +++ b/src/allmydata/mutable/retrieve.py @@ -1080,6 +1080,12 @@ class Retrieve: self._status.timings['total'] = now - self._started self._status.timings['fetch'] = now - self._started_fetching + # remember the encoding parameters, use them again next time + (seqnum, root_hash, IV, segsize, datalength, k, N, prefix, + offsets_tuple) = self.verinfo + self._node._populate_required_shares(k) + self._node._populate_total_shares(N) + if self._verify: ret = list(self._bad_shares) self.log("done verifying, found %d bad shares" % len(ret)) diff --git a/src/allmydata/test/test_mutable.py b/src/allmydata/test/test_mutable.py index e6eea1ee..628f918f 100644 --- a/src/allmydata/test/test_mutable.py +++ b/src/allmydata/test/test_mutable.py @@ -3577,3 +3577,25 @@ class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixi d = n.download_best_version() d.addCallback(self.failUnlessEqual, self.sdmf_old_contents) return d + +class DifferentEncoding(unittest.TestCase): + def setUp(self): + self._storage = s = FakeStorage() + self.nodemaker = make_nodemaker(s) + + def test_filenode(self): + # create a file with 3-of-20, then modify it with a client configured + # to do 3-of-10. #1510 tracks a failure here + self.nodemaker.default_encoding_parameters["n"] = 20 + d = self.nodemaker.create_mutable_file("old contents") + def _created(n): + filecap = n.get_cap().to_string() + del n # we want a new object, not the cached one + self.nodemaker.default_encoding_parameters["n"] = 10 + n2 = self.nodemaker.create_from_cap(filecap) + return n2 + d.addCallback(_created) + def modifier(old_contents, servermap, first_time): + return "new contents" + d.addCallback(lambda n: n.modify(modifier)) + return d