]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
Remove support for [storage]debug_discard option.
authorDaira Hopwood <daira@jacaranda.org>
Fri, 10 Jul 2015 03:04:58 +0000 (04:04 +0100)
committerDaira Hopwood <daira@jacaranda.org>
Tue, 4 Aug 2015 16:55:23 +0000 (17:55 +0100)
Signed-off-by: Daira Hopwood <daira@jacaranda.org>
src/allmydata/client.py
src/allmydata/storage/server.py
src/allmydata/test/check_memory.py
src/allmydata/test/test_client.py
src/allmydata/test/test_storage.py

index bb6dce23096d4d0af7b89973af4e08394a0a460b..6e9cadc73bf3c2e3321b3cde1923475c96cb8586 100644 (file)
@@ -272,8 +272,8 @@ class Client(node.Node, pollmixin.PollMixin):
             raise
         if reserved is None:
             reserved = 0
-        discard = self.get_config("storage", "debug_discard", False,
-                                  boolean=True)
+        if self.get_config("storage", "debug_discard", False, boolean=True):
+            raise OldConfigOptionError("[storage]debug_discard = True is no longer supported.")
 
         expire = self.get_config("storage", "expire.enabled", False, boolean=True)
         if expire:
@@ -299,7 +299,6 @@ class Client(node.Node, pollmixin.PollMixin):
 
         ss = StorageServer(storedir, self.nodeid,
                            reserved_space=reserved,
-                           discard_storage=discard,
                            readonly_storage=readonly,
                            stats_provider=self.stats_provider,
                            expiration_enabled=expire,
index 1de4b22fe948fe8bcf5397c37a7ce772abbebffe..9e7ea15bb25675e8c1f6810d8e58203cce10e111 100644 (file)
@@ -39,7 +39,7 @@ class StorageServer(service.MultiService, Referenceable):
     LeaseCheckerClass = LeaseCheckingCrawler
 
     def __init__(self, storedir, nodeid, reserved_space=0,
-                 discard_storage=False, readonly_storage=False,
+                 readonly_storage=False,
                  stats_provider=None,
                  expiration_enabled=False,
                  expiration_mode="age",
@@ -58,7 +58,6 @@ class StorageServer(service.MultiService, Referenceable):
         self.corruption_advisory_dir = os.path.join(storedir,
                                                     "corruption-advisories")
         self.reserved_space = int(reserved_space)
-        self.no_storage = discard_storage
         self.readonly_storage = readonly_storage
         self.stats_provider = stats_provider
         if self.stats_provider:
@@ -298,8 +297,6 @@ class StorageServer(service.MultiService, Referenceable):
                 # ok! we need to create the new share file.
                 bw = BucketWriter(self, incominghome, finalhome,
                                   max_space_per_bucket, lease_info, canary)
-                if self.no_storage:
-                    bw.throw_out_all_data = True
                 bucketwriters[shnum] = bw
                 self._active_writers[bw] = 1
                 if limited:
index 1b727017ffa9420c9f89c41db2e9fce82f617c80..43231b4f1794915448e7df910fe09c64c9b6f5b8 100644 (file)
@@ -192,17 +192,13 @@ class SystemFramework(pollmixin.PollMixin):
                     "shares.happy = 1\n"
                     "[storage]\n"
                     % (self.introducer_furl,))
-            # the only tests for which we want the internal nodes to actually
-            # retain shares are the ones where somebody's going to download
-            # them.
-            if self.mode in ("download", "download-GET", "download-GET-slow"):
-                # retain shares
-                pass
-            else:
-                # for these tests, we tell the storage servers to pretend to
-                # accept shares, but really just throw them out, since we're
-                # only testing upload and not download.
-                f.write("debug_discard = true\n")
+
+            # We used to set the [storage]debug_discard option to discard
+            # shares when they will not be needed, i.e. when self.mode not in
+            # ("download", "download-GET", "download-GET-slow").
+            # But debug_discard is no longer supported. It should be OK to
+            # retain the shares anyway.
+
             if self.mode in ("receive",):
                 # for this mode, the client-under-test gets all the shares,
                 # so our internal nodes can refuse requests
@@ -253,8 +249,7 @@ this file are ignored.
         else:
             # don't accept any shares
             f.write("readonly = true\n")
-            ## also, if we do receive any shares, throw them away
-            #f.write("debug_discard = true")
+
         if self.mode == "upload-self":
             pass
         f.close()
index dfd9b8de7f954ddb0baa4371953d8b51fd5dbdd5..3d1d1f91af8a549abee3249c89b639004e09e1b4 100644 (file)
@@ -247,6 +247,16 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
                         "port = tcp:0:interface=127.0.0.1\n"))
         self.failUnlessRaises(NeedRootcapLookupScheme, client.Client, basedir)
 
+    def test_debug_discard_true_unsupported(self):
+        basedir = "client.Basic.test_debug_discard_true_unsupported"
+        os.mkdir(basedir)
+        fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
+                       BASECONFIG + \
+                       "[storage]\n" + \
+                       "enabled = true\n" + \
+                       "debug_discard = true\n")
+        self.failUnlessRaises(OldConfigOptionError, client.Client, basedir)
+
     def _permute(self, sb, key):
         return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
 
index 3e7cee519de541ebcaf451691f7902dd7a34508e..2058648349cc4f6f1ea0135ec49b9ac3f9089899 100644 (file)
@@ -712,28 +712,9 @@ class Server(unittest.TestCase):
             # But if there are stats, readonly_storage means disk_avail=0
             self.failUnlessEqual(stats["storage_server.disk_avail"], 0)
 
-    def test_discard(self):
-        # discard is really only used for other tests, but we test it anyways
-        workdir = self.workdir("test_discard")
-        ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
-        ss.setServiceParent(self.sparent)
-
-        already,writers = self.allocate(ss, "vid", [0,1,2], 75)
-        self.failUnlessEqual(already, set())
-        self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
-        for i,wb in writers.items():
-            wb.remote_write(0, "%25d" % i)
-            wb.remote_close()
-        # since we discard the data, the shares should be present but sparse.
-        # Since we write with some seeks, the data we read back will be all
-        # zeros.
-        b = ss.remote_get_buckets("vid")
-        self.failUnlessEqual(set(b.keys()), set([0,1,2]))
-        self.failUnlessEqual(b[0].remote_read(0, 25), "\x00" * 25)
-
     def test_advise_corruption(self):
         workdir = self.workdir("test_advise_corruption")
-        ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
+        ss = StorageServer(workdir, "\x00" * 20)
         ss.setServiceParent(self.sparent)
 
         si0_s = base32.b2a("si0")