]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
helper #609: uploading client should ignore old helper's UploadResults, which were...
authorBrian Warner <warner@allmydata.com>
Mon, 9 Feb 2009 21:45:43 +0000 (14:45 -0700)
committerBrian Warner <warner@allmydata.com>
Mon, 9 Feb 2009 21:45:43 +0000 (14:45 -0700)
src/allmydata/immutable/offloaded.py
src/allmydata/immutable/upload.py

index 7ba3943e5bf28c9b2c66185e52d46e08e754c543..775b71e6540a073c5e38f0a56448a806c278c55a 100644 (file)
@@ -10,7 +10,7 @@ from allmydata import interfaces, storage, uri
 from allmydata.immutable import upload
 from allmydata.immutable.layout import ReadBucketProxy
 from allmydata.util.assertutil import precondition
-from allmydata.util import idlib, log, observer, fileutil, hashutil
+from allmydata.util import idlib, log, observer, fileutil, hashutil, dictutil
 
 
 class NotEnoughWritersError(Exception):
@@ -33,7 +33,7 @@ class CHKCheckerAndUEBFetcher:
         self._peer_getter = peer_getter
         self._found_shares = set()
         self._storage_index = storage_index
-        self._sharemap = {}
+        self._sharemap = dictutil.DictOfSets()
         self._readers = set()
         self._ueb_hash = None
         self._ueb_data = None
@@ -69,9 +69,7 @@ class CHKCheckerAndUEBFetcher:
                  level=log.NOISY)
         self._found_shares.update(buckets.keys())
         for k in buckets:
-            if k not in self._sharemap:
-                self._sharemap[k] = []
-            self._sharemap[k].append(peerid)
+            self._sharemap.add(k, peerid)
         self._readers.update( [ (bucket, peerid)
                                 for bucket in buckets.values() ] )
 
@@ -632,11 +630,7 @@ class Helper(Referenceable, service.MultiService):
                 (sharemap, ueb_data, ueb_hash) = res
                 self.log("found file in grid", level=log.NOISY, parent=lp)
                 results.uri_extension_hash = ueb_hash
-                results.sharemap = {}
-                for shnum, peerids in sharemap.items():
-                    peers_s = ",".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
-                                        for peerid in peerids])
-                    results.sharemap[shnum] = "Found on " + peers_s
+                results.sharemap = sharemap
                 results.uri_extension_data = ueb_data
                 results.preexisting_shares = len(sharemap)
                 results.pushed_shares = 0
index 17155f44b0b8704ab9cce78f6742d532849fd5fb..28edda49aadbd22d70db73ef84ff47d94049cf0f 100644 (file)
@@ -993,8 +993,26 @@ class AssistedUploader:
         self._upload_status.set_results(upload_results)
         return upload_results
 
+    def _convert_old_upload_results(self, upload_results):
+        # pre-1.3.0 helpers return upload results which contain a mapping
+        # from shnum to a single human-readable string, containing things
+        # like "Found on [x],[y],[z]" (for healthy files that were already in
+        # the grid), "Found on [x]" (for files that needed upload but which
+        # discovered pre-existing shares), and "Placed on [x]" (for newly
+        # uploaded shares). The 1.3.0 helper returns a mapping from shnum to
+        # set of binary serverid strings.
+
+        # the old results are too hard to deal with (they don't even contain
+        # as much information as the new results, since the nodeids are
+        # abbreviated), so if we detect old results, just clobber them.
+
+        sharemap = upload_results.sharemap
+        if str in [type(v) for v in sharemap.values()]:
+            upload_results.sharemap = None
+
     def _build_verifycap(self, upload_results):
         self.log("upload finished, building readcap")
+        self._convert_old_upload_results(upload_results)
         self._upload_status.set_status("Building Readcap")
         r = upload_results
         assert r.uri_extension_data["needed_shares"] == self._needed_shares