From d98fde952c95487d06002ddd9b129c15a1ce1556 Mon Sep 17 00:00:00 2001
From: Brian Warner <warner@allmydata.com>
Date: Tue, 5 Feb 2008 14:16:01 -0700
Subject: [PATCH] introducer: remove remaining bits of 'push-to-myself' flags.
 The uploading/downloading node is no longer special.

---
 src/allmydata/client.py            |  7 +-----
 src/allmydata/introducer.py        | 20 -----------------
 src/allmydata/mutable.py           | 36 ++++++++++++++++--------------
 src/allmydata/test/check_memory.py |  8 +++----
 src/allmydata/test/test_helper.py  |  2 --
 src/allmydata/test/test_upload.py  |  2 --
 src/allmydata/upload.py            | 13 ++++-------
 7 files changed, 27 insertions(+), 61 deletions(-)

diff --git a/src/allmydata/client.py b/src/allmydata/client.py
index 3efc6bb1..38fd4c75 100644
--- a/src/allmydata/client.py
+++ b/src/allmydata/client.py
@@ -146,9 +146,7 @@ class Client(node.Node, testutil.PollMixin):
 
 
     def init_options(self):
-        self.push_to_ourselves = None
-        if self.get_config("push_to_ourselves") is not None:
-            self.push_to_ourselves = True
+        pass
 
     def init_web(self, webport):
         self.log("init_web(webport=%s)", args=(webport,))
@@ -213,9 +211,6 @@ class Client(node.Node, testutil.PollMixin):
         assert isinstance(key, str)
         return self.introducer_client.get_permuted_peers(service_name, key)
 
-    def get_push_to_ourselves(self):
-        return self.push_to_ourselves
-
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
         p = self.introducer_client.encoding_parameters # a tuple
diff --git a/src/allmydata/introducer.py b/src/allmydata/introducer.py
index 1bdae85a..c8f70720 100644
--- a/src/allmydata/introducer.py
+++ b/src/allmydata/introducer.py
@@ -314,38 +314,18 @@ class IntroducerClient(service.Service, Referenceable):
 
     def get_permuted_peers(self, service_name, key):
         """Return an ordered list of (peerid, rref) tuples."""
-        # TODO: flags like add-myself-at-beginning and remove-myself? maybe
-        # not.
 
         results = []
         for (c_peerid, c_service_name, rref) in self._connections:
             assert isinstance(c_peerid, str)
             if c_service_name != service_name:
                 continue
-            #if not include_myself and peerid == self.nodeid:
-            #    self.log("get_permuted_peers: removing myself from the list")
-            #    continue
             permuted = sha.new(key + c_peerid).digest()
             results.append((permuted, c_peerid, rref))
 
         results.sort(lambda a,b: cmp(a[0], b[0]))
         return [ (r[1], r[2]) for r in results ]
 
-    def _TODO__add_ourselves(self, partial_peerlist, peerlist):
-        # moved here from mutable.Publish
-        my_peerid = self._node._client.nodeid
-        for (permutedid, peerid, conn) in partial_peerlist:
-            if peerid == my_peerid:
-                # we're already in there
-                return partial_peerlist
-        for (permutedid, peerid, conn) in peerlist:
-            if peerid == self._node._client.nodeid:
-                # found it
-                partial_peerlist.append( (permutedid, peerid, conn) )
-                return partial_peerlist
-        self.log("we aren't in our own peerlist??", level=log.WEIRD)
-        return partial_peerlist
-
 
 
     def remote_set_encoding_parameters(self, parameters):
diff --git a/src/allmydata/mutable.py b/src/allmydata/mutable.py
index f58f54f5..6a588ab9 100644
--- a/src/allmydata/mutable.py
+++ b/src/allmydata/mutable.py
@@ -303,7 +303,6 @@ class Retrieve:
         n = self._node
         full_peerlist = n._client.get_permuted_peers("storage",
                                                      self._storage_index)
-        # TODO: include_myself=True
 
         # _peerlist is a list of (peerid,conn) tuples for peers that are
         # worth talking too. This starts with the first numqueries in the
@@ -503,7 +502,6 @@ class Retrieve:
             # we might be able to get some more peers from the list
             peers = self._node._client.get_permuted_peers("storage",
                                                           self._storage_index)
-            # TODO: include_myself=True
             self._peerlist = [p for p in islice(peers, search_distance)]
             self._peerlist_limit = search_distance
             self.log("added peers, peerlist=%d, peerlist_limit=%d"
@@ -778,20 +776,22 @@ class Publish:
 
         storage_index = self._storage_index
 
-        # we need to include ourselves in the list for two reasons. The most
-        # important is so that any shares which already exist on our own
-        # server get updated. The second is to ensure that we leave a share
-        # on our own server, so we're more likely to have the signing key
-        # around later. This way, even if all the servers die and the
-        # directory contents are unrecoverable, at least we can still push
-        # out a new copy with brand-new contents. TODO: it would be nice if
-        # the share we use for ourselves didn't count against the N total..
-        # maybe use N+1 if we find ourselves in the permuted list?
+        # In 0.7.0, we went through extra work to make sure that we include
+        # ourselves in the peerlist, mainly to match Retrieve (which did the
+        # same thing. With the post-0.7.0 Introducer refactoring, we got rid
+        # of the include-myself flags, and standardized on the
+        # uploading/downloading node not being special.
+
+        # One nice feature of the old approach was that by putting a share on
+        # the local storage server, we're more likely to be able to retrieve
+        # a copy of the encrypted private key (even if all the old servers
+        # have gone away), so we can regenerate new shares even if we can't
+        # retrieve the old contents. This need will eventually go away when
+        # we switch to DSA-based mutable files (which store the private key
+        # in the URI).
 
         peerlist = self._node._client.get_permuted_peers("storage",
                                                          storage_index)
-        # make sure our local server is in the list
-        # TODO: include_myself_at_beginning=True
 
         current_share_peers = DictOfSets()
         reachable_peers = {}
@@ -818,11 +818,13 @@ class Publish:
                       total_shares, reachable_peers,
                       current_share_peers)
         # TODO: add an errback to, probably to ignore that peer
+
         # TODO: if we can't get a privkey from these servers, consider
-        # looking farther afield. Make sure we include ourselves in the
-        # initial list, because of the 0.7.0 behavior that causes us to
-        # create our initial directory before we've connected to anyone
-        # but ourselves.
+        # looking farther afield. Be aware of the old 0.7.0 behavior that
+        # causes us to create our initial directory before we've connected to
+        # anyone but ourselves.. those old directories may not be
+        # retrieveable if our own server is no longer in the early part of
+        # the permuted peerlist.
         return d
 
     def _do_query(self, ss, peerid, storage_index):
diff --git a/src/allmydata/test/check_memory.py b/src/allmydata/test/check_memory.py
index 53977d91..6987bcea 100644
--- a/src/allmydata/test/check_memory.py
+++ b/src/allmydata/test/check_memory.py
@@ -242,17 +242,15 @@ this file are ignored.
             pass
         else:
             # don't accept any shares
-            f = open(os.path.join(clientdir, "sizelimit"), "w")
-            f.write("0\n")
+            f = open(os.path.join(clientdir, "readonly_storage"), "w")
+            f.write("true\n")
             f.close()
             ## also, if we do receive any shares, throw them away
             #f = open(os.path.join(clientdir, "debug_no_storage"), "w")
             #f.write("no_storage\n")
             #f.close()
         if self.mode == "upload-self":
-            f = open(os.path.join(clientdir, "push_to_ourselves"), "w")
-            f.write("push_to_ourselves\n")
-            f.close()
+            pass
         self.keepalive_file = os.path.join(clientdir,
                                            "suicide_prevention_hotline")
         # now start updating the mtime.
diff --git a/src/allmydata/test/test_helper.py b/src/allmydata/test/test_helper.py
index 7ce1293b..0d80c60f 100644
--- a/src/allmydata/test/test_helper.py
+++ b/src/allmydata/test/test_helper.py
@@ -39,8 +39,6 @@ class FakeClient(service.MultiService):
                                    }
     def log(self, *args, **kwargs):
         return log.msg(*args, **kwargs)
-    def get_push_to_ourselves(self):
-        return True
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
     def get_permuted_peers(self, service_name, storage_index):
diff --git a/src/allmydata/test/test_upload.py b/src/allmydata/test/test_upload.py
index 0bda732b..88670c21 100644
--- a/src/allmydata/test/test_upload.py
+++ b/src/allmydata/test/test_upload.py
@@ -144,8 +144,6 @@ class FakeClient:
                   for fakeid in range(self.num_servers) ]
         self.last_peers = [p[1] for p in peers]
         return peers
-    def get_push_to_ourselves(self):
-        return None
     def get_encoding_parameters(self):
         return self.DEFAULT_ENCODING_PARAMETERS
 
diff --git a/src/allmydata/upload.py b/src/allmydata/upload.py
index 78fa8cb5..22b85c6e 100644
--- a/src/allmydata/upload.py
+++ b/src/allmydata/upload.py
@@ -54,7 +54,6 @@ class PeerTracker:
         self._storageserver = storage_server # to an RIStorageServer
         self.buckets = {} # k: shareid, v: IRemoteBucketWriter
         self.sharesize = sharesize
-        #print "PeerTracker", peerid, sharesize
         as = storage.allocated_size(sharesize,
                                     num_segments,
                                     num_share_hashes,
@@ -75,7 +74,6 @@ class PeerTracker:
                    idlib.b2a(self.storage_index)[:6]))
 
     def query(self, sharenums):
-        #print " query", self.peerid, len(sharenums)
         d = self._storageserver.callRemote("allocate_buckets",
                                            self.storage_index,
                                            self.renew_secret,
@@ -115,8 +113,7 @@ class Tahoe2PeerSelector:
 
     def get_shareholders(self, client,
                          storage_index, share_size, block_size,
-                         num_segments, total_shares, shares_of_happiness,
-                         push_to_ourselves):
+                         num_segments, total_shares, shares_of_happiness):
         """
         @return: a set of PeerTracker instances that have agreed to hold some
                  shares for us
@@ -134,7 +131,6 @@ class Tahoe2PeerSelector:
         self.preexisting_shares = {} # sharenum -> PeerTracker holding the share
 
         peers = client.get_permuted_peers("storage", storage_index)
-        # TODO: push_to_ourselves
         if not peers:
             raise encode.NotEnoughPeersError("client gave us zero peers")
 
@@ -608,11 +604,10 @@ class CHKUploader:
         block_size = encoder.get_param("block_size")
         num_segments = encoder.get_param("num_segments")
         k,desired,n = encoder.get_param("share_counts")
-        push_to_ourselves = self._client.get_push_to_ourselves()
 
-        gs = peer_selector.get_shareholders
-        d = gs(self._client, storage_index, share_size, block_size,
-               num_segments, n, desired, push_to_ourselves)
+        d = peer_selector.get_shareholders(self._client, storage_index,
+                                           share_size, block_size,
+                                           num_segments, n, desired)
         return d
 
     def set_shareholders(self, used_peers, encoder):
-- 
2.45.2