3 from base64 import b32decode
4 from zope.interface import implements
5 from twisted.application import service
6 from foolscap import Referenceable
7 from allmydata.interfaces import InsufficientVersionError
8 from allmydata.introducer.interfaces import RIIntroducerSubscriberClient, \
10 from allmydata.util import log, idlib
11 from allmydata.util.rrefutil import get_versioned_remote_reference
12 from allmydata.introducer.common import make_index
15 class RemoteServiceConnector:
16 """I hold information about a peer service that we want to connect to. If
17 we are connected, I hold the RemoteReference, the peer's address, and the
18 peer's version information. I remember information about when we were
19 last connected to the peer too, even if we aren't currently connected.
21 @ivar announcement_time: when we first heard about this service
22 @ivar last_connect_time: when we last established a connection
23 @ivar last_loss_time: when we last lost a connection
25 @ivar version: the peer's version, from the most recent announcement
26 @ivar oldest_supported: the peer's oldest supported version, same
27 @ivar nickname: the peer's self-reported nickname, same
29 @ivar rref: the RemoteReference, if connected, otherwise None
30 @ivar remote_host: the IAddress, if connected, otherwise None
34 "storage": { "http://allmydata.org/tahoe/protocols/storage/v1" :
35 { "maximum-immutable-share-size": 2**32 },
36 "application-version": "unknown: no get_version()",
41 def __init__(self, announcement, tub, ic):
43 self._announcement = announcement
45 (furl, service_name, ri_name, nickname, ver, oldest) = announcement
48 m = re.match(r'pb://(\w+)@', furl)
50 self._nodeid = b32decode(m.group(1).upper())
51 self._nodeid_s = idlib.shortnodeid_b2a(self._nodeid)
53 self.service_name = service_name
55 self.log("attempting to connect to %s" % self._nodeid_s)
56 self.announcement_time = time.time()
57 self.last_loss_time = None
59 self.remote_host = None
60 self.last_connect_time = None
62 self.oldest_supported = oldest
63 self.nickname = nickname
65 def log(self, *args, **kwargs):
66 return self._ic.log(*args, **kwargs)
68 def startConnecting(self):
69 self._reconnector = self._tub.connectTo(self._furl, self._got_service)
71 def stopConnecting(self):
72 self._reconnector.stopConnecting()
74 def _got_service(self, rref):
75 self.log("got connection to %s, getting versions" % self._nodeid_s)
77 default = self.VERSION_DEFAULTS.get(self.service_name, {})
78 d = get_versioned_remote_reference(rref, default)
79 d.addCallback(self._got_versioned_service)
81 def _got_versioned_service(self, rref):
82 self.log("connected to %s, version %s" % (self._nodeid_s, rref.version))
84 self.last_connect_time = time.time()
85 self.remote_host = rref.rref.tracker.broker.transport.getPeer()
89 self._ic.add_connection(self._nodeid, self.service_name, rref)
91 rref.notifyOnDisconnect(self._lost, rref)
93 def _lost(self, rref):
94 self.log("lost connection to %s" % self._nodeid_s)
95 self.last_loss_time = time.time()
97 self.remote_host = None
98 self._ic.remove_connection(self._nodeid, self.service_name, rref)
102 self._reconnector.reset()
105 class IntroducerClient(service.Service, Referenceable):
106 implements(RIIntroducerSubscriberClient, IIntroducerClient)
108 def __init__(self, tub, introducer_furl,
109 nickname, my_version, oldest_supported):
111 self.introducer_furl = introducer_furl
113 self._nickname = nickname.encode("utf-8")
114 self._my_version = my_version
115 self._oldest_supported = oldest_supported
117 self._published_announcements = set()
119 self._publisher = None
120 self._connected = False
122 self._subscribed_service_names = set()
123 self._subscriptions = set() # requests we've actually sent
124 self._received_announcements = set()
125 # TODO: this set will grow without bound, until the node is restarted
127 # we only accept one announcement per (peerid+service_name) pair.
128 # This insures that an upgraded host replace their previous
129 # announcement. It also means that each peer must have their own Tub
130 # (no sharing), which is slightly weird but consistent with the rest
131 # of the Tahoe codebase.
132 self._connectors = {} # k: (peerid+svcname), v: RemoteServiceConnector
133 # self._connections is a set of (peerid, service_name, rref) tuples
134 self._connections = set()
136 self.counter = 0 # incremented each time we change state, for tests
137 self.encoding_parameters = None
139 def startService(self):
140 service.Service.startService(self)
141 self._introducer_error = None
142 rc = self._tub.connectTo(self.introducer_furl, self._got_introducer)
143 self._introducer_reconnector = rc
144 def connect_failed(failure):
145 self.log("Initial Introducer connection failed: perhaps it's down",
146 level=log.WEIRD, failure=failure, umid="c5MqUQ")
147 d = self._tub.getReference(self.introducer_furl)
148 d.addErrback(connect_failed)
150 def _got_introducer(self, publisher):
151 self.log("connected to introducer, getting versions")
152 default = { "http://allmydata.org/tahoe/protocols/introducer/v1":
154 "application-version": "unknown: no get_version()",
156 d = get_versioned_remote_reference(publisher, default)
157 d.addCallback(self._got_versioned_introducer)
158 d.addErrback(self._got_error)
160 def _got_error(self, f):
161 # TODO: for the introducer, perhaps this should halt the application
162 self._introducer_error = f # polled by tests
164 def _got_versioned_introducer(self, publisher):
165 self.log("got introducer version: %s" % (publisher.version,))
166 # we require a V1 introducer
167 needed = "http://allmydata.org/tahoe/protocols/introducer/v1"
168 if needed not in publisher.version:
169 raise InsufficientVersionError(needed, publisher.version)
170 self._connected = True
171 self._publisher = publisher
172 publisher.notifyOnDisconnect(self._disconnected)
173 self._maybe_publish()
174 self._maybe_subscribe()
176 def _disconnected(self):
177 self.log("bummer, we've lost our connection to the introducer")
178 self._connected = False
179 self._publisher = None
180 self._subscriptions.clear()
182 def stopService(self):
183 service.Service.stopService(self)
184 self._introducer_reconnector.stopConnecting()
185 for rsc in self._connectors.itervalues():
188 def log(self, *args, **kwargs):
189 if "facility" not in kwargs:
190 kwargs["facility"] = "tahoe.introducer"
191 return log.msg(*args, **kwargs)
194 def publish(self, furl, service_name, remoteinterface_name):
195 ann = (furl, service_name, remoteinterface_name,
196 self._nickname, self._my_version, self._oldest_supported)
197 self._published_announcements.add(ann)
198 self._maybe_publish()
200 def subscribe_to(self, service_name):
201 self._subscribed_service_names.add(service_name)
202 self._maybe_subscribe()
204 def _maybe_subscribe(self):
205 if not self._publisher:
206 self.log("want to subscribe, but no introducer yet",
209 for service_name in self._subscribed_service_names:
210 if service_name not in self._subscriptions:
211 # there is a race here, but the subscription desk ignores
212 # duplicate requests.
213 self._subscriptions.add(service_name)
214 d = self._publisher.callRemote("subscribe", self, service_name)
215 d.addErrback(log.err, facility="tahoe.introducer",
216 level=log.WEIRD, umid="2uMScQ")
218 def _maybe_publish(self):
219 if not self._publisher:
220 self.log("want to publish, but no introducer yet", level=log.NOISY)
222 # this re-publishes everything. The Introducer ignores duplicates
223 for ann in self._published_announcements:
224 d = self._publisher.callRemote("publish", ann)
225 d.addErrback(log.err, facility="tahoe.introducer",
226 level=log.WEIRD, umid="xs9pVQ")
230 def remote_announce(self, announcements):
231 for ann in announcements:
232 self.log("received %d announcements" % len(announcements))
233 (furl, service_name, ri_name, nickname, ver, oldest) = ann
234 if service_name not in self._subscribed_service_names:
235 self.log("announcement for a service we don't care about [%s]"
236 % (service_name,), level=log.UNUSUAL, umid="dIpGNA")
238 if ann in self._received_announcements:
239 self.log("ignoring old announcement: %s" % (ann,),
242 self.log("new announcement[%s]: %s" % (service_name, ann))
243 self._received_announcements.add(ann)
244 self._new_announcement(ann)
246 def _new_announcement(self, announcement):
247 # this will only be called for new announcements
248 index = make_index(announcement)
249 if index in self._connectors:
250 self.log("replacing earlier announcement", level=log.NOISY)
251 self._connectors[index].stopConnecting()
252 rsc = RemoteServiceConnector(announcement, self._tub, self)
253 self._connectors[index] = rsc
254 rsc.startConnecting()
256 def add_connection(self, nodeid, service_name, rref):
257 self._connections.add( (nodeid, service_name, rref) )
259 # when one connection is established, reset the timers on all others,
260 # to trigger a reconnection attempt in one second. This is intended
261 # to accelerate server connections when we've been offline for a
262 # while. The goal is to avoid hanging out for a long time with
263 # connections to only a subset of the servers, which would increase
264 # the chances that we'll put shares in weird places (and not update
265 # existing shares of mutable files). See #374 for more details.
266 for rsc in self._connectors.values():
269 def remove_connection(self, nodeid, service_name, rref):
270 self._connections.discard( (nodeid, service_name, rref) )
274 def get_all_connections(self):
275 return frozenset(self._connections)
277 def get_all_connectors(self):
278 return self._connectors.copy()
280 def get_all_peerids(self):
281 return frozenset([peerid
282 for (peerid, service_name, rref)
283 in self._connections])
285 def get_nickname_for_peerid(self, peerid):
286 for k in self._connectors:
287 (peerid0, svcname0) = k
288 if peerid0 == peerid:
289 rsc = self._connectors[k]
293 def get_all_connections_for(self, service_name):
295 for c in self._connections
296 if c[1] == service_name])
298 def get_peers(self, service_name):
299 """Return a set of (peerid, versioned-rref) tuples."""
300 return frozenset([(pid, r) for (pid, servname, r) in self._connections if servname == servname])
302 def get_permuted_peers(self, service_name, key):
303 """Return an ordered list of (peerid, versioned-rref) tuples."""
305 servers = self.get_peers(service_name)
307 return sorted(servers, key=lambda x: sha.new(key+x[0]).digest())
309 def remote_set_encoding_parameters(self, parameters):
310 self.encoding_parameters = parameters
312 def connected_to_introducer(self):
313 return self._connected
315 def debug_disconnect_from_peerid(self, victim_nodeid):
316 # for unit tests: locate and sever all connections to the given
318 for (nodeid, service_name, rref) in self._connections:
319 if nodeid == victim_nodeid:
320 rref.tracker.broker.transport.loseConnection()