]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
SDMF: update filenode with correct k/N after Retrieve. Fixes #1510.
authorBrian Warner <warner@lothar.com>
Sat, 27 Aug 2011 22:50:31 +0000 (15:50 -0700)
committerBrian Warner <warner@lothar.com>
Sat, 27 Aug 2011 22:50:31 +0000 (15:50 -0700)
Without this, we get a regression when modifying a mutable file that was
created with more shares (larger N) than our current tahoe.cfg . The
modification attempt creates new versions of the (0,1,..,newN-1) shares, but
leaves the old versions of the (newN,..,oldN-1) shares alone (and throws a
assertion error in SDMFSlotWriteProxy.finish_publishing in the process).

The mixed versions that result (some shares with e.g. N=10, some with N=20,
such that both versions are recoverable) cause problems for the Publish code,
even before MDMF landed. Might be related to refs #1390 and refs #1042.

src/allmydata/mutable/layout.py
src/allmydata/mutable/retrieve.py
src/allmydata/test/test_mutable.py

index e3835a1949e85fa3d5222700c4c22c6a5d1392d9..93d0c8f15c2b055715d04465e7797e67aab8252d 100644 (file)
@@ -499,7 +499,7 @@ class SDMFSlotWriteProxy:
         """
         for k in ["sharedata", "encprivkey", "signature", "verification_key",
                   "share_hash_chain", "block_hash_tree"]:
-            assert k in self._share_pieces, (k, self._share_pieces.keys())
+            assert k in self._share_pieces, (self.shnum, k, self._share_pieces.keys())
         # This is the only method that actually writes something to the
         # remote server.
         # First, we need to pack the share into data that we can write
index 595da73e43775a78c038f20a1f92237850a550dc..b1ec761efa0f7165c224c15dbe510564417b4aa8 100644 (file)
@@ -1080,6 +1080,12 @@ class Retrieve:
         self._status.timings['total'] = now - self._started
         self._status.timings['fetch'] = now - self._started_fetching
 
+        # remember the encoding parameters, use them again next time
+        (seqnum, root_hash, IV, segsize, datalength, k, N, prefix,
+         offsets_tuple) = self.verinfo
+        self._node._populate_required_shares(k)
+        self._node._populate_total_shares(N)
+
         if self._verify:
             ret = list(self._bad_shares)
             self.log("done verifying, found %d bad shares" % len(ret))
index e6eea1eebbe9918b593911e67330003eea1efb45..628f918fc715c205f8e7203dfc2340f27cad480a 100644 (file)
@@ -3577,3 +3577,25 @@ class Interoperability(GridTestMixin, unittest.TestCase, testutil.ShouldFailMixi
         d = n.download_best_version()
         d.addCallback(self.failUnlessEqual, self.sdmf_old_contents)
         return d
+
+class DifferentEncoding(unittest.TestCase):
+    def setUp(self):
+        self._storage = s = FakeStorage()
+        self.nodemaker = make_nodemaker(s)
+
+    def test_filenode(self):
+        # create a file with 3-of-20, then modify it with a client configured
+        # to do 3-of-10. #1510 tracks a failure here
+        self.nodemaker.default_encoding_parameters["n"] = 20
+        d = self.nodemaker.create_mutable_file("old contents")
+        def _created(n):
+            filecap = n.get_cap().to_string()
+            del n # we want a new object, not the cached one
+            self.nodemaker.default_encoding_parameters["n"] = 10
+            n2 = self.nodemaker.create_from_cap(filecap)
+            return n2
+        d.addCallback(_created)
+        def modifier(old_contents, servermap, first_time):
+            return "new contents"
+        d.addCallback(lambda n: n.modify(modifier))
+        return d