]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/storage_client.py
make IServer instances retain identity in copy() and deepcopy()
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / storage_client.py
1
2 """
3 I contain the client-side code which speaks to storage servers, in particular
4 the foolscap-based server implemented in src/allmydata/storage/*.py .
5 """
6
7 # roadmap:
8 #
9 # 1: implement StorageFarmBroker (i.e. "storage broker"), change Client to
10 # create it, change uploader/servermap to get rrefs from it. ServerFarm calls
11 # IntroducerClient.subscribe_to . ServerFarm hides descriptors, passes rrefs
12 # to clients. webapi status pages call broker.get_info_about_serverid.
13 #
14 # 2: move get_info methods to the descriptor, webapi status pages call
15 # broker.get_descriptor_for_serverid().get_info
16 #
17 # 3?later?: store descriptors in UploadResults/etc instead of serverids,
18 # webapi status pages call descriptor.get_info and don't use storage_broker
19 # or Client
20 #
21 # 4: enable static config: tahoe.cfg can add descriptors. Make the introducer
22 # optional. This closes #467
23 #
24 # 5: implement NativeStorageClient, pass it to Tahoe2PeerSelector and other
25 # clients. Clients stop doing callRemote(), use NativeStorageClient methods
26 # instead (which might do something else, i.e. http or whatever). The
27 # introducer and tahoe.cfg only create NativeStorageClients for now.
28 #
29 # 6: implement other sorts of IStorageClient classes: S3, etc
30
31
32 import re, time
33 from zope.interface import implements
34 from foolscap.api import eventually
35 from allmydata.interfaces import IStorageBroker, IServer
36 from allmydata.util import log, base32
37 from allmydata.util.assertutil import precondition
38 from allmydata.util.rrefutil import add_version_to_remote_reference
39 from allmydata.util.hashutil import sha1
40
41 # who is responsible for de-duplication?
42 #  both?
43 #  IC remembers the unpacked announcements it receives, to provide for late
44 #  subscribers and to remove duplicates
45
46 # if a client subscribes after startup, will they receive old announcements?
47 #  yes
48
49 # who will be responsible for signature checking?
50 #  make it be IntroducerClient, so they can push the filter outwards and
51 #  reduce inbound network traffic
52
53 # what should the interface between StorageFarmBroker and IntroducerClient
54 # look like?
55 #  don't pass signatures: only pass validated blessed-objects
56
57 class StorageFarmBroker:
58     implements(IStorageBroker)
59     """I live on the client, and know about storage servers. For each server
60     that is participating in a grid, I either maintain a connection to it or
61     remember enough information to establish a connection to it on demand.
62     I'm also responsible for subscribing to the IntroducerClient to find out
63     about new servers as they are announced by the Introducer.
64     """
65     def __init__(self, tub, permute_peers):
66         self.tub = tub
67         assert permute_peers # False not implemented yet
68         self.permute_peers = permute_peers
69         # self.servers maps serverid -> IServer, and keeps track of all the
70         # storage servers that we've heard about. Each descriptor manages its
71         # own Reconnector, and will give us a RemoteReference when we ask
72         # them for it.
73         self.servers = {}
74         self.introducer_client = None
75
76     # these two are used in unit tests
77     def test_add_rref(self, serverid, rref, ann):
78         s = NativeStorageServer(serverid, ann.copy())
79         s.rref = rref
80         self.servers[serverid] = s
81
82     def test_add_server(self, serverid, s):
83         self.servers[serverid] = s
84
85     def use_introducer(self, introducer_client):
86         self.introducer_client = ic = introducer_client
87         ic.subscribe_to("storage", self._got_announcement)
88
89     def _got_announcement(self, key_s, ann):
90         if key_s is not None:
91             precondition(isinstance(key_s, str), key_s)
92             precondition(key_s.startswith("v0-"), key_s)
93         assert ann["service-name"] == "storage"
94         s = NativeStorageServer(key_s, ann)
95         serverid = s.get_serverid()
96         old = self.servers.get(serverid)
97         if old:
98             if old.get_announcement() == ann:
99                 return # duplicate
100             # replacement
101             del self.servers[serverid]
102             old.stop_connecting()
103             # now we forget about them and start using the new one
104         self.servers[serverid] = s
105         s.start_connecting(self.tub, self._trigger_connections)
106         # the descriptor will manage their own Reconnector, and each time we
107         # need servers, we'll ask them if they're connected or not.
108
109     def _trigger_connections(self):
110         # when one connection is established, reset the timers on all others,
111         # to trigger a reconnection attempt in one second. This is intended
112         # to accelerate server connections when we've been offline for a
113         # while. The goal is to avoid hanging out for a long time with
114         # connections to only a subset of the servers, which would increase
115         # the chances that we'll put shares in weird places (and not update
116         # existing shares of mutable files). See #374 for more details.
117         for dsc in self.servers.values():
118             dsc.try_to_connect()
119
120     def get_servers_for_psi(self, peer_selection_index):
121         # return a list of server objects (IServers)
122         assert self.permute_peers == True
123         def _permuted(server):
124             seed = server.get_permutation_seed()
125             return sha1(peer_selection_index + seed).digest()
126         return sorted(self.get_connected_servers(), key=_permuted)
127
128     def get_all_serverids(self):
129         return frozenset(self.servers.keys())
130
131     def get_connected_servers(self):
132         return frozenset([s for s in self.servers.values() if s.get_rref()])
133
134     def get_known_servers(self):
135         return frozenset(self.servers.values())
136
137     def get_nickname_for_serverid(self, serverid):
138         if serverid in self.servers:
139             return self.servers[serverid].get_nickname()
140         return None
141
142 class NativeStorageServer:
143     """I hold information about a storage server that we want to connect to.
144     If we are connected, I hold the RemoteReference, their host address, and
145     the their version information. I remember information about when we were
146     last connected too, even if we aren't currently connected.
147
148     @ivar announcement_time: when we first heard about this service
149     @ivar last_connect_time: when we last established a connection
150     @ivar last_loss_time: when we last lost a connection
151
152     @ivar version: the server's versiondict, from the most recent announcement
153     @ivar nickname: the server's self-reported nickname (unicode), same
154
155     @ivar rref: the RemoteReference, if connected, otherwise None
156     @ivar remote_host: the IAddress, if connected, otherwise None
157     """
158     implements(IServer)
159
160     VERSION_DEFAULTS = {
161         "http://allmydata.org/tahoe/protocols/storage/v1" :
162         { "maximum-immutable-share-size": 2**32,
163           "tolerates-immutable-read-overrun": False,
164           "delete-mutable-shares-with-zero-length-writev": False,
165           },
166         "application-version": "unknown: no get_version()",
167         }
168
169     def __init__(self, key_s, ann, min_shares=1):
170         self.key_s = key_s
171         self.announcement = ann
172         self.min_shares = min_shares
173
174         assert "anonymous-storage-FURL" in ann, ann
175         furl = str(ann["anonymous-storage-FURL"])
176         m = re.match(r'pb://(\w+)@', furl)
177         assert m, furl
178         tubid_s = m.group(1).lower()
179         self._tubid = base32.a2b(tubid_s)
180         assert "permutation-seed-base32" in ann, ann
181         ps = base32.a2b(str(ann["permutation-seed-base32"]))
182         self._permutation_seed = ps
183
184         if key_s:
185             self._long_description = key_s
186             if key_s.startswith("v0-"):
187                 # remove v0- prefix from abbreviated name
188                 self._short_description = key_s[3:3+8]
189             else:
190                 self._short_description = key_s[:8]
191         else:
192             self._long_description = tubid_s
193             self._short_description = tubid_s[:8]
194
195         self.announcement_time = time.time()
196         self.last_connect_time = None
197         self.last_loss_time = None
198         self.remote_host = None
199         self.rref = None
200         self._reconnector = None
201         self._trigger_cb = None
202
203     # Special methods used by copy.copy() and copy.deepcopy(). When those are
204     # used in allmydata.immutable.filenode to copy CheckResults during
205     # repair, we want it to treat the IServer instances as singletons, and
206     # not attempt to duplicate them..
207     def __copy__(self):
208         return self
209     def __deepcopy__(self, memodict):
210         return self
211
212     def __repr__(self):
213         return "<NativeStorageServer for %s>" % self.get_name()
214     def get_serverid(self):
215         return self._tubid # XXX replace with self.key_s
216     def get_permutation_seed(self):
217         return self._permutation_seed
218     def get_version(self):
219         if self.rref:
220             return self.rref.version
221         return None
222     def get_name(self): # keep methodname short
223         # TODO: decide who adds [] in the short description. It should
224         # probably be the output side, not here.
225         return self._short_description
226     def get_longname(self):
227         return self._long_description
228     def get_lease_seed(self):
229         return self._tubid
230     def get_foolscap_write_enabler_seed(self):
231         return self._tubid
232
233     def get_nickname(self):
234         return self.announcement["nickname"].decode("utf-8")
235     def get_announcement(self):
236         return self.announcement
237     def get_remote_host(self):
238         return self.remote_host
239     def get_last_connect_time(self):
240         return self.last_connect_time
241     def get_last_loss_time(self):
242         return self.last_loss_time
243     def get_announcement_time(self):
244         return self.announcement_time
245
246     def start_connecting(self, tub, trigger_cb):
247         furl = str(self.announcement["anonymous-storage-FURL"])
248         self._trigger_cb = trigger_cb
249         self._reconnector = tub.connectTo(furl, self._got_connection)
250
251     def _got_connection(self, rref):
252         lp = log.msg(format="got connection to %(name)s, getting versions",
253                      name=self.get_name(),
254                      facility="tahoe.storage_broker", umid="coUECQ")
255         if self._trigger_cb:
256             eventually(self._trigger_cb)
257         default = self.VERSION_DEFAULTS
258         d = add_version_to_remote_reference(rref, default)
259         d.addCallback(self._got_versioned_service, lp)
260         d.addErrback(log.err, format="storageclient._got_connection",
261                      name=self.get_name(), umid="Sdq3pg")
262
263     def _got_versioned_service(self, rref, lp):
264         log.msg(format="%(name)s provided version info %(version)s",
265                 name=self.get_name(), version=rref.version,
266                 facility="tahoe.storage_broker", umid="SWmJYg",
267                 level=log.NOISY, parent=lp)
268
269         self.last_connect_time = time.time()
270         self.remote_host = rref.getPeer()
271         self.rref = rref
272         rref.notifyOnDisconnect(self._lost)
273
274     def get_rref(self):
275         return self.rref
276
277     def _lost(self):
278         log.msg(format="lost connection to %(name)s", name=self.get_name(),
279                 facility="tahoe.storage_broker", umid="zbRllw")
280         self.last_loss_time = time.time()
281         self.rref = None
282         self.remote_host = None
283
284     def stop_connecting(self):
285         # used when this descriptor has been superceded by another
286         self._reconnector.stopConnecting()
287
288     def try_to_connect(self):
289         # used when the broker wants us to hurry up
290         self._reconnector.reset()
291
292 class UnknownServerTypeError(Exception):
293     pass