]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
Retrieve: remove the initial prefix-is-still-good check
authorBrian Warner <warner@lothar.com>
Mon, 5 Sep 2011 08:26:33 +0000 (01:26 -0700)
committerBrian Warner <warner@lothar.com>
Fri, 9 Sep 2011 18:11:50 +0000 (11:11 -0700)
This check needs to be done with each fetch from the storage server, to
detect when someone has changed the share (i.e. our servermap goes stale).
Doing it just once at the beginning of retrieve isn't enough: a write might
occur after the first segment but before the second, etc.

_try_to_validate_prefix() was not removed: it will be used by the future
check-with-each-fetch code.

test_mutable.Roundtrip.test_corrupt_all_seqnum_late was disabled, since it
fails until this check is brought back. (the corruption it applies only
touches the prefix, not the block data, so the check-less retrieve actually
tolerates it). Don't forget to re-enable it once the check is brought back.

src/allmydata/mutable/retrieve.py
src/allmydata/test/test_mutable.py

index 8d35707c7fe44876d0eba98417ffcb31544af023..cfa252958e69a0801f69bc874d2f06f3637f71bc 100644 (file)
@@ -502,67 +502,21 @@ class Retrieve:
         """
         assert self._active_readers, "No more active readers"
 
-        ds = []
         new_readers = set(self._active_readers) - self._validated_readers
         self.log('validating %d newly-added active readers' % len(new_readers))
 
         for reader in new_readers:
-            # We force a remote read here -- otherwise, we are relying
-            # on cached data that we already verified as valid, and we
-            # won't detect an uncoordinated write that has occurred
-            # since the last servermap update.
-            d = reader.get_prefix(force_remote=True)
-            d.addCallback(self._try_to_validate_prefix, reader)
-            ds.append(d)
-        dl = defer.DeferredList(ds, consumeErrors=True)
-        def _check_results(results):
-            # Each result in results will be of the form (success, msg).
-            # We don't care about msg, but success will tell us whether
-            # or not the checkstring validated. If it didn't, we need to
-            # remove the offending (peer,share) from our active readers,
-            # and ensure that active readers is again populated.
-            bad_readers = []
-            for i, result in enumerate(results):
-                if not result[0]:
-                    reader = self._active_readers[i]
-                    f = result[1]
-                    assert isinstance(f, failure.Failure)
-
-                    self.log("The reader %s failed to "
-                             "properly validate: %s" % \
-                             (reader, str(f.value)))
-                    bad_readers.append((reader, f))
-                else:
-                    reader = self._active_readers[i]
-                    self.log("the reader %s checks out, so we'll use it" % \
-                             reader)
-                    self._validated_readers.add(reader)
-                    # Each time we validate a reader, we check to see if
-                    # we need the private key. If we do, we politely ask
-                    # for it and then continue computing. If we find
-                    # that we haven't gotten it at the end of
-                    # segment decoding, then we'll take more drastic
-                    # measures.
-                    if self._need_privkey and not self._node.is_readonly():
-                        d = reader.get_encprivkey()
-                        d.addCallback(self._try_to_validate_privkey, reader)
-            if bad_readers:
-                # We do them all at once, or else we screw up list indexing.
-                for (reader, f) in bad_readers:
-                    self._mark_bad_share(reader, f)
-                if self._verify:
-                    if len(self._active_readers) >= self._required_shares:
-                        return self._download_current_segment()
-                    else:
-                        return self._failed()
-                else:
-                    return self._add_active_peers()
-            else:
-                return self._download_current_segment()
-            # The next step will assert that it has enough active
-            # readers to fetch shares; we just need to remove it.
-        dl.addCallback(_check_results)
-        return dl
+            self._validated_readers.add(reader)
+            # Each time we validate a reader, we check to see if we need the
+            # private key. If we do, we politely ask for it and then continue
+            # computing. If we find that we haven't gotten it at the end of
+            # segment decoding, then we'll take more drastic measures.
+            if self._need_privkey and not self._node.is_readonly():
+                d = reader.get_encprivkey()
+                d.addCallback(self._try_to_validate_privkey, reader)
+                # XXX: don't just drop the Deferred. We need error-reporting
+                # but not flow-control here.
+        return self._download_current_segment()
 
 
     def _try_to_validate_prefix(self, prefix, reader):
index 14b62ee2eae13da976608c296fe3c92638671b47..69ce131b77b3350fc5dea6c9d9f161325ce39667 100644 (file)
@@ -1550,7 +1550,10 @@ class Roundtrip(unittest.TestCase, testutil.ShouldFailMixin, PublishMixin):
                                       fetch_privkey=True)
 
 
-    def test_corrupt_all_seqnum_late(self):
+    # disabled until retrieve tests checkstring on each blockfetch. I didn't
+    # just use a .todo because the failing-but-ignored test emits about 30kB
+    # of noise.
+    def OFF_test_corrupt_all_seqnum_late(self):
         # corrupting the seqnum between mapupdate and retrieve should result
         # in NotEnoughSharesError, since each share will look invalid
         def _check(res):