from allmydata.immutable import upload
from allmydata.immutable.layout import ReadBucketProxy
from allmydata.util.assertutil import precondition
-from allmydata.util import idlib, log, observer, fileutil, hashutil
+from allmydata.util import idlib, log, observer, fileutil, hashutil, dictutil
class NotEnoughWritersError(Exception):
self._peer_getter = peer_getter
self._found_shares = set()
self._storage_index = storage_index
- self._sharemap = {}
+ self._sharemap = dictutil.DictOfSets()
self._readers = set()
self._ueb_hash = None
self._ueb_data = None
level=log.NOISY)
self._found_shares.update(buckets.keys())
for k in buckets:
- if k not in self._sharemap:
- self._sharemap[k] = []
- self._sharemap[k].append(peerid)
+ self._sharemap.add(k, peerid)
self._readers.update( [ (bucket, peerid)
for bucket in buckets.values() ] )
(sharemap, ueb_data, ueb_hash) = res
self.log("found file in grid", level=log.NOISY, parent=lp)
results.uri_extension_hash = ueb_hash
- results.sharemap = {}
- for shnum, peerids in sharemap.items():
- peers_s = ",".join(["[%s]" % idlib.shortnodeid_b2a(peerid)
- for peerid in peerids])
- results.sharemap[shnum] = "Found on " + peers_s
+ results.sharemap = sharemap
results.uri_extension_data = ueb_data
results.preexisting_shares = len(sharemap)
results.pushed_shares = 0
self._upload_status.set_results(upload_results)
return upload_results
+ def _convert_old_upload_results(self, upload_results):
+ # pre-1.3.0 helpers return upload results which contain a mapping
+ # from shnum to a single human-readable string, containing things
+ # like "Found on [x],[y],[z]" (for healthy files that were already in
+ # the grid), "Found on [x]" (for files that needed upload but which
+ # discovered pre-existing shares), and "Placed on [x]" (for newly
+ # uploaded shares). The 1.3.0 helper returns a mapping from shnum to
+ # set of binary serverid strings.
+
+ # the old results are too hard to deal with (they don't even contain
+ # as much information as the new results, since the nodeids are
+ # abbreviated), so if we detect old results, just clobber them.
+
+ sharemap = upload_results.sharemap
+ if str in [type(v) for v in sharemap.values()]:
+ upload_results.sharemap = None
+
def _build_verifycap(self, upload_results):
self.log("upload finished, building readcap")
+ self._convert_old_upload_results(upload_results)
self._upload_status.set_status("Building Readcap")
r = upload_results
assert r.uri_extension_data["needed_shares"] == self._needed_shares