]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
Sun May 13 09:15:50 BST 2012 Brian Warner <warner@lothar.com>
authorDaira Hopwood <daira@jacaranda.org>
Thu, 5 Sep 2013 17:02:21 +0000 (18:02 +0100)
committerDaira Hopwood <daira@jacaranda.org>
Thu, 5 Sep 2013 17:02:21 +0000 (18:02 +0100)
  * Doc updates and cosmetic fixes for #1115 patch.

  Removes the caveat from webapi.txt about count-good-share-hosts being wrong.

  This series should close #1115.

docs/frontends/webapi.rst
src/allmydata/immutable/filenode.py
src/allmydata/test/test_checker.py

index 49541a7c794c1a49a2b73988000ff8eb09d247bf..fb99c5be9175f1b2c08f5bb007392714ad9d969c 100644 (file)
@@ -1342,10 +1342,8 @@ mainly intended for developers.
     count-shares-good: the number of good shares that were found
     count-shares-needed: 'k', the number of shares required for recovery
     count-shares-expected: 'N', the number of total shares generated
-    count-good-share-hosts: this was intended to be the number of distinct
-                            storage servers with good shares. It is currently
-                            (as of Tahoe-LAFS v1.8.0) computed incorrectly;
-                            see ticket #1115.
+    count-good-share-hosts: the number of distinct storage servers with good
+                            shares
     count-wrong-shares: for mutable files, the number of shares for
                         versions other than the 'best' one (highest
                         sequence number, highest roothash). These are
index b338ea7d9d35e79a3d1cc331d77f60a4e8f94a5c..c7fa82f6a4193bff7bdb702b3807f49ec7953fc5 100644 (file)
@@ -124,8 +124,8 @@ class CiphertextFileNode:
                     servers_responding = sorted(servers_responding)
                     prr.data['servers-responding'] = servers_responding
                     prr.data['count-shares-good'] = len(sm)
-                    prr.data['count-good-share-hosts'] = len(reduce(set.union, 
-                                                       sm.itervalues(), set()))
+                    good_hosts = len(reduce(set.union, sm.itervalues(), set()))
+                    prr.data['count-good-share-hosts'] = good_hosts
                     is_healthy = bool(len(sm) >= verifycap.total_shares)
                     is_recoverable = bool(len(sm) >= verifycap.needed_shares)
                     prr.set_healthy(is_healthy)
index 8efe4d8f47abeb270fd04ace0c03fef924348e5e..dd2bfb0521822f4728d14af38ac885a4e7a6b024 100644 (file)
@@ -288,7 +288,7 @@ class BalancingAct(GridTestMixin, unittest.TestCase):
         self.g.add_server(server_number, ss)
 
     def add_server_with_share(self, server_number, uri, share_number=None,
-                               readonly=False):
+                              readonly=False):
         self.add_server(server_number, readonly)
         if share_number is not None:
             self.copy_share_to_server(uri, share_number, server_number)
@@ -321,13 +321,14 @@ class BalancingAct(GridTestMixin, unittest.TestCase):
         assert len(self.g.servers_by_number) < len(letters), \
             "This little printing function is only meant for < 26 servers"
         shares_chart = {}
-        names = dict(zip([ss.my_nodeid for _,ss in 
-                          self.g.servers_by_number.iteritems()], letters))
+        names = dict(zip([ss.my_nodeid
+                          for _,ss in self.g.servers_by_number.iteritems()],
+                         letters))
         for shnum, serverid, _ in self.find_uri_shares(uri):
             shares_chart.setdefault(shnum, []).append(names[serverid])
         return shares_chart
 
-    def test_1115(self):
+    def test_good_share_hosts(self):
         self.basedir = "checker/BalancingAct/1115"
         self.set_up_grid(num_servers=1)
         c0 = self.g.clients[0]
@@ -353,17 +354,18 @@ class BalancingAct(GridTestMixin, unittest.TestCase):
             #print self._pretty_shares_chart(self.uri)
         for i in range(1,5):
             d.addCallback(add_three, i)
-    
+
         def _check_and_repair(_):
             return self.imm.check_and_repair(Monitor())
         def _check_counts(crr, shares_good, good_share_hosts):
             p_crr = crr.get_post_repair_results().data
             #print self._pretty_shares_chart(self.uri)
-            self.failUnless(p_crr['count-shares-good'] == shares_good)
-            self.failUnless(p_crr['count-good-share-hosts'] == good_share_hosts)
+            self.failUnlessEqual(p_crr['count-shares-good'], shares_good)
+            self.failUnlessEqual(p_crr['count-good-share-hosts'],
+                                 good_share_hosts)
 
         """
-        Initial sharemap: 
+        Initial sharemap:
             0:[A] 1:[A] 2:[A] 3:[A,B,C,D,E]
           4 good shares, but 5 good hosts
         After deleting all instances of share #3 and repairing:
@@ -375,8 +377,8 @@ class BalancingAct(GridTestMixin, unittest.TestCase):
         d.addCallback(lambda _: self.delete_shares_numbered(self.uri, [3]))
         d.addCallback(_check_and_repair)
         d.addCallback(_check_counts, 4, 5)
-        d.addCallback(lambda _: [self.g.break_server(sid) for sid 
-                                 in self.g.get_all_serverids()])
+        d.addCallback(lambda _: [self.g.break_server(sid)
+                                 for sid in self.g.get_all_serverids()])
         d.addCallback(_check_and_repair)
         d.addCallback(_check_counts, 0, 0)
         return d