Remove support for [storage]debug_discard option.
authorDavid-Sarah Hopwood <david-sarah@jacaranda.org>
Sat, 17 Nov 2012 19:46:20 +0000 (19:46 +0000)
committerDaira Hopwood <daira@jacaranda.org>
Tue, 8 Apr 2014 23:26:04 +0000 (00:26 +0100)
(BucketWriter.throw_out_all_data is kept because it might still be useful.)

Signed-off-by: David-Sarah Hopwood <david-sarah@jacaranda.org>
src/allmydata/client.py
src/allmydata/storage/server.py
src/allmydata/test/check_memory.py
src/allmydata/test/test_client.py
src/allmydata/test/test_storage.py

index 217a7b47448f145153095f87611c61483032480d..27f146da946c440c4b137821ae546545b2a9cc87 100644 (file)
@@ -267,8 +267,8 @@ class Client(node.Node, pollmixin.PollMixin):
             raise
         if reserved is None:
             reserved = 0
-        discard = self.get_config("storage", "debug_discard", False,
-                                  boolean=True)
+        if self.get_config("storage", "debug_discard", False, boolean=True):
+            raise OldConfigOptionError("[storage]debug_discard = True is no longer supported.")
 
         expire = self.get_config("storage", "expire.enabled", False, boolean=True)
         if expire:
@@ -294,7 +294,6 @@ class Client(node.Node, pollmixin.PollMixin):
 
         ss = StorageServer(storedir, self.nodeid,
                            reserved_space=reserved,
-                           discard_storage=discard,
                            readonly_storage=readonly,
                            stats_provider=self.stats_provider,
                            expiration_enabled=expire,
index 9c0397c0bffeaba894e1891d3e1b41a9f67f5db0..bd1b47b8703760f96a91a2b140ed079d153ab613 100644 (file)
@@ -39,7 +39,7 @@ class StorageServer(service.MultiService, Referenceable):
     LeaseCheckerClass = LeaseCheckingCrawler
 
     def __init__(self, storedir, nodeid, reserved_space=0,
-                 discard_storage=False, readonly_storage=False,
+                 readonly_storage=False,
                  stats_provider=None,
                  expiration_enabled=False,
                  expiration_mode="age",
@@ -58,7 +58,6 @@ class StorageServer(service.MultiService, Referenceable):
         self.corruption_advisory_dir = os.path.join(storedir,
                                                     "corruption-advisories")
         self.reserved_space = int(reserved_space)
-        self.no_storage = discard_storage
         self.readonly_storage = readonly_storage
         self.stats_provider = stats_provider
         if self.stats_provider:
@@ -297,8 +296,6 @@ class StorageServer(service.MultiService, Referenceable):
                 # ok! we need to create the new share file.
                 bw = BucketWriter(self, incominghome, finalhome,
                                   max_space_per_bucket, lease_info, canary)
-                if self.no_storage:
-                    bw.throw_out_all_data = True
                 bucketwriters[shnum] = bw
                 self._active_writers[bw] = 1
                 if limited:
index b9d79014e4a0316564454b65b7cdee5f18c3ee5d..e540f85ec4b034604e68ff18b3146bebe96d5228 100644 (file)
@@ -188,17 +188,13 @@ class SystemFramework(pollmixin.PollMixin):
                     "shares.happy = 1\n"
                     "[storage]\n"
                     % (self.introducer_furl,))
-            # the only tests for which we want the internal nodes to actually
-            # retain shares are the ones where somebody's going to download
-            # them.
-            if self.mode in ("download", "download-GET", "download-GET-slow"):
-                # retain shares
-                pass
-            else:
-                # for these tests, we tell the storage servers to pretend to
-                # accept shares, but really just throw them out, since we're
-                # only testing upload and not download.
-                f.write("debug_discard = true\n")
+
+            # We used to set the [storage]debug_discard option to discard
+            # shares when they will not be needed, i.e. when self.mode not in
+            # ("download", "download-GET", "download-GET-slow").
+            # But debug_discard is no longer supported. It should be OK to
+            # retain the shares anyway.
+
             if self.mode in ("receive",):
                 # for this mode, the client-under-test gets all the shares,
                 # so our internal nodes can refuse requests
@@ -249,8 +245,7 @@ this file are ignored.
         else:
             # don't accept any shares
             f.write("readonly = true\n")
-            ## also, if we do receive any shares, throw them away
-            #f.write("debug_discard = true")
+
         if self.mode == "upload-self":
             pass
         f.close()
index 796971b236140ed73e3872212ce90a72eb8c8e22..5b6183c1845cafa40608f2be020580aaebd5c6b8 100644 (file)
@@ -150,6 +150,16 @@ class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
                            "reserved_space = bogus\n")
         self.failUnlessRaises(ValueError, client.Client, basedir)
 
+    def test_debug_discard_true_unsupported(self):
+        basedir = "client.Basic.test_debug_discard_true_unsupported"
+        os.mkdir(basedir)
+        fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
+                       BASECONFIG + \
+                       "[storage]\n" + \
+                       "enabled = true\n" + \
+                       "debug_discard = true\n")
+        self.failUnlessRaises(OldConfigOptionError, client.Client, basedir)
+
     def _permute(self, sb, key):
         return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
 
index 5c5e2c7c02bae554f3a818885bcd2eab2e2d9a80..82bbc716d43bae91335310b0444bef02519ca4ae 100644 (file)
@@ -703,28 +703,9 @@ class Server(unittest.TestCase):
             # But if there are stats, readonly_storage means disk_avail=0
             self.failUnlessEqual(stats["storage_server.disk_avail"], 0)
 
-    def test_discard(self):
-        # discard is really only used for other tests, but we test it anyways
-        workdir = self.workdir("test_discard")
-        ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
-        ss.setServiceParent(self.sparent)
-
-        already,writers = self.allocate(ss, "vid", [0,1,2], 75)
-        self.failUnlessEqual(already, set())
-        self.failUnlessEqual(set(writers.keys()), set([0,1,2]))
-        for i,wb in writers.items():
-            wb.remote_write(0, "%25d" % i)
-            wb.remote_close()
-        # since we discard the data, the shares should be present but sparse.
-        # Since we write with some seeks, the data we read back will be all
-        # zeros.
-        b = ss.remote_get_buckets("vid")
-        self.failUnlessEqual(set(b.keys()), set([0,1,2]))
-        self.failUnlessEqual(b[0].remote_read(0, 25), "\x00" * 25)
-
     def test_advise_corruption(self):
         workdir = self.workdir("test_advise_corruption")
-        ss = StorageServer(workdir, "\x00" * 20, discard_storage=True)
+        ss = StorageServer(workdir, "\x00" * 20)
         ss.setServiceParent(self.sparent)
 
         si0_s = base32.b2a("si0")