]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
introducer: remove remaining bits of 'push-to-myself' flags. The uploading/downloadin...
authorBrian Warner <warner@allmydata.com>
Tue, 5 Feb 2008 21:16:01 +0000 (14:16 -0700)
committerBrian Warner <warner@allmydata.com>
Tue, 5 Feb 2008 21:16:01 +0000 (14:16 -0700)
src/allmydata/client.py
src/allmydata/introducer.py
src/allmydata/mutable.py
src/allmydata/test/check_memory.py
src/allmydata/test/test_helper.py
src/allmydata/test/test_upload.py
src/allmydata/upload.py

index 3efc6bb10d12eb75c4f0f1b17ebdbcb073efc090..38fd4c75b0ce53c4b6b8e7e09e51c2b30ac87f96 100644 (file)
@@ -146,9 +146,7 @@ class Client(node.Node, testutil.PollMixin):
 
 
     def init_options(self):
-        self.push_to_ourselves = None
-        if self.get_config("push_to_ourselves") is not None:
-            self.push_to_ourselves = True
+        pass
 
     def init_web(self, webport):
         self.log("init_web(webport=%s)", args=(webport,))
@@ -213,9 +211,6 @@ class Client(node.Node, testutil.PollMixin):
         assert isinstance(key, str)
         return self.introducer_client.get_permuted_peers(service_name, key)
 
-    def get_push_to_ourselves(self):
-        return self.push_to_ourselves
-
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
         p = self.introducer_client.encoding_parameters # a tuple
index 1bdae85a0ae62c6a119d5ccb0173f1d7be251984..c8f70720d11b63695e9ace36c499c65a2b413a91 100644 (file)
@@ -314,38 +314,18 @@ class IntroducerClient(service.Service, Referenceable):
 
     def get_permuted_peers(self, service_name, key):
         """Return an ordered list of (peerid, rref) tuples."""
-        # TODO: flags like add-myself-at-beginning and remove-myself? maybe
-        # not.
 
         results = []
         for (c_peerid, c_service_name, rref) in self._connections:
             assert isinstance(c_peerid, str)
             if c_service_name != service_name:
                 continue
-            #if not include_myself and peerid == self.nodeid:
-            #    self.log("get_permuted_peers: removing myself from the list")
-            #    continue
             permuted = sha.new(key + c_peerid).digest()
             results.append((permuted, c_peerid, rref))
 
         results.sort(lambda a,b: cmp(a[0], b[0]))
         return [ (r[1], r[2]) for r in results ]
 
-    def _TODO__add_ourselves(self, partial_peerlist, peerlist):
-        # moved here from mutable.Publish
-        my_peerid = self._node._client.nodeid
-        for (permutedid, peerid, conn) in partial_peerlist:
-            if peerid == my_peerid:
-                # we're already in there
-                return partial_peerlist
-        for (permutedid, peerid, conn) in peerlist:
-            if peerid == self._node._client.nodeid:
-                # found it
-                partial_peerlist.append( (permutedid, peerid, conn) )
-                return partial_peerlist
-        self.log("we aren't in our own peerlist??", level=log.WEIRD)
-        return partial_peerlist
-
 
 
     def remote_set_encoding_parameters(self, parameters):
index f58f54f5db41cc908fdff18d05d9ec34315d3c4b..6a588ab95b5d91fff0a21e3dd1a87127357f24e3 100644 (file)
@@ -303,7 +303,6 @@ class Retrieve:
         n = self._node
         full_peerlist = n._client.get_permuted_peers("storage",
                                                      self._storage_index)
-        # TODO: include_myself=True
 
         # _peerlist is a list of (peerid,conn) tuples for peers that are
         # worth talking too. This starts with the first numqueries in the
@@ -503,7 +502,6 @@ class Retrieve:
             # we might be able to get some more peers from the list
             peers = self._node._client.get_permuted_peers("storage",
                                                           self._storage_index)
-            # TODO: include_myself=True
             self._peerlist = [p for p in islice(peers, search_distance)]
             self._peerlist_limit = search_distance
             self.log("added peers, peerlist=%d, peerlist_limit=%d"
@@ -778,20 +776,22 @@ class Publish:
 
         storage_index = self._storage_index
 
-        # we need to include ourselves in the list for two reasons. The most
-        # important is so that any shares which already exist on our own
-        # server get updated. The second is to ensure that we leave a share
-        # on our own server, so we're more likely to have the signing key
-        # around later. This way, even if all the servers die and the
-        # directory contents are unrecoverable, at least we can still push
-        # out a new copy with brand-new contents. TODO: it would be nice if
-        # the share we use for ourselves didn't count against the N total..
-        # maybe use N+1 if we find ourselves in the permuted list?
+        # In 0.7.0, we went through extra work to make sure that we include
+        # ourselves in the peerlist, mainly to match Retrieve (which did the
+        # same thing. With the post-0.7.0 Introducer refactoring, we got rid
+        # of the include-myself flags, and standardized on the
+        # uploading/downloading node not being special.
+
+        # One nice feature of the old approach was that by putting a share on
+        # the local storage server, we're more likely to be able to retrieve
+        # a copy of the encrypted private key (even if all the old servers
+        # have gone away), so we can regenerate new shares even if we can't
+        # retrieve the old contents. This need will eventually go away when
+        # we switch to DSA-based mutable files (which store the private key
+        # in the URI).
 
         peerlist = self._node._client.get_permuted_peers("storage",
                                                          storage_index)
-        # make sure our local server is in the list
-        # TODO: include_myself_at_beginning=True
 
         current_share_peers = DictOfSets()
         reachable_peers = {}
@@ -818,11 +818,13 @@ class Publish:
                       total_shares, reachable_peers,
                       current_share_peers)
         # TODO: add an errback to, probably to ignore that peer
+
         # TODO: if we can't get a privkey from these servers, consider
-        # looking farther afield. Make sure we include ourselves in the
-        # initial list, because of the 0.7.0 behavior that causes us to
-        # create our initial directory before we've connected to anyone
-        # but ourselves.
+        # looking farther afield. Be aware of the old 0.7.0 behavior that
+        # causes us to create our initial directory before we've connected to
+        # anyone but ourselves.. those old directories may not be
+        # retrieveable if our own server is no longer in the early part of
+        # the permuted peerlist.
         return d
 
     def _do_query(self, ss, peerid, storage_index):
index 53977d91d1035fa2c6b5c99a0c33e2a5e59f2efd..6987bceaf7affc94748d95e5e9441e9316c8b332 100644 (file)
@@ -242,17 +242,15 @@ this file are ignored.
             pass
         else:
             # don't accept any shares
-            f = open(os.path.join(clientdir, "sizelimit"), "w")
-            f.write("0\n")
+            f = open(os.path.join(clientdir, "readonly_storage"), "w")
+            f.write("true\n")
             f.close()
             ## also, if we do receive any shares, throw them away
             #f = open(os.path.join(clientdir, "debug_no_storage"), "w")
             #f.write("no_storage\n")
             #f.close()
         if self.mode == "upload-self":
-            f = open(os.path.join(clientdir, "push_to_ourselves"), "w")
-            f.write("push_to_ourselves\n")
-            f.close()
+            pass
         self.keepalive_file = os.path.join(clientdir,
                                            "suicide_prevention_hotline")
         # now start updating the mtime.
index 7ce1293b4e01e9074cf0063f3e0176db58c98921..0d80c60f5302e700a225f06cc2ec14bb951ea4b1 100644 (file)
@@ -39,8 +39,6 @@ class FakeClient(service.MultiService):
                                    }
     def log(self, *args, **kwargs):
         return log.msg(*args, **kwargs)
-    def get_push_to_ourselves(self):
-        return True
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
     def get_permuted_peers(self, service_name, storage_index):
index 0bda732b0beeb7b1f7fc578cc4a97b283095c9a6..88670c21ba97a289cd62ac6aae84cbd247c969b1 100644 (file)
@@ -144,8 +144,6 @@ class FakeClient:
                   for fakeid in range(self.num_servers) ]
         self.last_peers = [p[1] for p in peers]
         return peers
-    def get_push_to_ourselves(self):
-        return None
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
 
index 78fa8cb53054f350039efb0ea8712d0d9d1cd45b..22b85c6eeb2b8567efd6976e2c72d43a5a56ef40 100644 (file)
@@ -54,7 +54,6 @@ class PeerTracker:
         self._storageserver = storage_server # to an RIStorageServer
         self.buckets = {} # k: shareid, v: IRemoteBucketWriter
         self.sharesize = sharesize
-        #print "PeerTracker", peerid, sharesize
         as = storage.allocated_size(sharesize,
                                     num_segments,
                                     num_share_hashes,
@@ -75,7 +74,6 @@ class PeerTracker:
                    idlib.b2a(self.storage_index)[:6]))
 
     def query(self, sharenums):
-        #print " query", self.peerid, len(sharenums)
         d = self._storageserver.callRemote("allocate_buckets",
                                            self.storage_index,
                                            self.renew_secret,
@@ -115,8 +113,7 @@ class Tahoe2PeerSelector:
 
     def get_shareholders(self, client,
                          storage_index, share_size, block_size,
-                         num_segments, total_shares, shares_of_happiness,
-                         push_to_ourselves):
+                         num_segments, total_shares, shares_of_happiness):
         """
         @return: a set of PeerTracker instances that have agreed to hold some
                  shares for us
@@ -134,7 +131,6 @@ class Tahoe2PeerSelector:
         self.preexisting_shares = {} # sharenum -> PeerTracker holding the share
 
         peers = client.get_permuted_peers("storage", storage_index)
-        # TODO: push_to_ourselves
         if not peers:
             raise encode.NotEnoughPeersError("client gave us zero peers")
 
@@ -608,11 +604,10 @@ class CHKUploader:
         block_size = encoder.get_param("block_size")
         num_segments = encoder.get_param("num_segments")
         k,desired,n = encoder.get_param("share_counts")
-        push_to_ourselves = self._client.get_push_to_ourselves()
 
-        gs = peer_selector.get_shareholders
-        d = gs(self._client, storage_index, share_size, block_size,
-               num_segments, n, desired, push_to_ourselves)
+        d = peer_selector.get_shareholders(self._client, storage_index,
+                                           share_size, block_size,
+                                           num_segments, n, desired)
         return d
 
     def set_shareholders(self, used_peers, encoder):