From a4068dd1e0f952cd5e2fe4fce65f2f554bd9e9e7 Mon Sep 17 00:00:00 2001 From: Brian Warner <warner@lothar.com> Date: Sat, 26 Feb 2011 19:11:53 -0700 Subject: [PATCH] immutable/downloader/fetcher.py: fix diversity bug in server-response handling When blocks terminate (either COMPLETE or CORRUPT/DEAD/BADSEGNUM), the _shares_from_server dict was being popped incorrectly (using shnum as the index instead of serverid). I'm still thinking through the consequences of this bug. It was probably benign and really hard to detect. I think it would cause us to incorrectly believe that we're pulling too many shares from a server, and thus prefer a different server rather than asking for a second share from the first server. The diversity code is intended to spread out the number of shares simultaneously being requested from each server, but with this bug, it might be spreading out the total number of shares requested at all, not just simultaneously. (note that SegmentFetcher is scoped to a single segment, so the effect doesn't last very long). --- src/allmydata/immutable/downloader/fetcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/allmydata/immutable/downloader/fetcher.py b/src/allmydata/immutable/downloader/fetcher.py index 84552279..f45ffd96 100644 --- a/src/allmydata/immutable/downloader/fetcher.py +++ b/src/allmydata/immutable/downloader/fetcher.py @@ -236,7 +236,7 @@ class SegmentFetcher: # from all our tracking lists. if state in (COMPLETE, CORRUPT, DEAD, BADSEGNUM): self._share_observers.pop(share, None) - self._shares_from_server.discard(shnum, share) + self._shares_from_server.discard(share._server.get_serverid(), share) if self._active_share_map.get(shnum) is share: del self._active_share_map[shnum] self._overdue_share_map.discard(shnum, share) -- 2.45.2