1 import os, stat, time, weakref
2 from allmydata import node
4 from zope.interface import implements
5 from twisted.internet import reactor, defer
6 from twisted.application import service
7 from twisted.application.internet import TimerService
8 from pycryptopp.publickey import rsa
11 from allmydata.storage.server import StorageServer
12 from allmydata import storage_client
13 from allmydata.immutable.upload import Uploader
14 from allmydata.immutable.offloaded import Helper
15 from allmydata.control import ControlServer
16 from allmydata.introducer.client import IntroducerClient
17 from allmydata.util import hashutil, base32, pollmixin, log, keyutil
18 from allmydata.util.encodingutil import get_filesystem_encoding
19 from allmydata.util.abbreviate import parse_abbreviated_size
20 from allmydata.util.time_format import parse_duration, parse_date
21 from allmydata.stats import StatsProvider
22 from allmydata.history import History
23 from allmydata.interfaces import IStatsProducer, SDMF_VERSION, MDMF_VERSION
24 from allmydata.nodemaker import NodeMaker
25 from allmydata.blacklist import Blacklist
26 from allmydata.node import OldConfigOptionError
36 return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
39 def __init__(self, lease_secret, convergence_secret):
40 self._lease_secret = lease_secret
41 self._convergence_secret = convergence_secret
43 def get_renewal_secret(self):
44 return hashutil.my_renewal_secret_hash(self._lease_secret)
46 def get_cancel_secret(self):
47 return hashutil.my_cancel_secret_hash(self._lease_secret)
49 def get_convergence_secret(self):
50 return self._convergence_secret
53 """I create RSA keys for mutable files. Each call to generate() returns a
54 single keypair. The keysize is specified first by the keysize= argument
55 to generate(), then with a default set by set_default_keysize(), then
56 with a built-in default of 2048 bits."""
59 self.default_keysize = 2048
61 def set_remote_generator(self, keygen):
63 def set_default_keysize(self, keysize):
64 """Call this to override the size of the RSA keys created for new
65 mutable files which don't otherwise specify a size. This will affect
66 all subsequent calls to generate() without a keysize= argument. The
67 default size is 2048 bits. Test cases should call this method once
68 during setup, to cause me to create smaller keys, so the unit tests
70 self.default_keysize = keysize
72 def generate(self, keysize=None):
73 """I return a Deferred that fires with a (verifyingkey, signingkey)
74 pair. I accept a keysize in bits (2048 bit keys are standard, smaller
75 keys are used for testing). If you do not provide a keysize, I will
76 use my default, which is set by a call to set_default_keysize(). If
77 set_default_keysize() has never been called, I will create 2048 bit
79 keysize = keysize or self.default_keysize
81 d = self._remote.callRemote('get_rsa_key_pair', keysize)
82 def make_key_objs((verifying_key, signing_key)):
83 v = rsa.create_verifying_key_from_string(verifying_key)
84 s = rsa.create_signing_key_from_string(signing_key)
86 d.addCallback(make_key_objs)
89 # RSA key generation for a 2048 bit key takes between 0.8 and 3.2
91 signer = rsa.generate(keysize)
92 verifier = signer.get_verifying_key()
93 return defer.succeed( (verifier, signer) )
95 class Terminator(service.Service):
97 self._clients = weakref.WeakKeyDictionary()
98 def register(self, c):
99 self._clients[c] = None
100 def stopService(self):
101 for c in self._clients:
103 return service.Service.stopService(self)
106 class Client(node.Node, pollmixin.PollMixin):
107 implements(IStatsProducer)
109 PORTNUMFILE = "client.port"
112 SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"
114 # This means that if a storage server treats me as though I were a
115 # 1.0.0 storage client, it will work as they expect.
116 OLDEST_SUPPORTED_VERSION = "1.0.0"
118 # this is a tuple of (needed, desired, total, max_segment_size). 'needed'
119 # is the number of shares required to reconstruct a file. 'desired' means
120 # that we will abort an upload unless we can allocate space for at least
121 # this many. 'total' is the total number of shares created by encoding.
122 # If everybody has room then this is is how many we will upload.
123 DEFAULT_ENCODING_PARAMETERS = {"k": 3,
126 "max_segment_size": 128*KiB,
129 def __init__(self, basedir="."):
130 node.Node.__init__(self, basedir)
131 self.started_timestamp = time.time()
132 self.logSource="Client"
133 self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy()
134 self.init_introducer_client()
135 self.init_stats_provider()
140 if self.get_config("helper", "enabled", False, boolean=True):
142 self._key_generator = KeyGenerator()
143 key_gen_furl = self.get_config("client", "key_generator.furl", None)
145 self.init_key_gen(key_gen_furl)
147 # ControlServer and Helper are attached after Tub startup
148 self.init_ftp_server()
149 self.init_sftp_server()
150 self.init_drop_uploader()
152 hotline_file = os.path.join(self.basedir,
153 self.SUICIDE_PREVENTION_HOTLINE_FILE)
154 if os.path.exists(hotline_file):
155 age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
156 self.log("hotline file noticed (%ds old), starting timer" % age)
157 hotline = TimerService(1.0, self._check_hotline, hotline_file)
158 hotline.setServiceParent(self)
160 # this needs to happen last, so it can use getServiceNamed() to
161 # acquire references to StorageServer and other web-statusable things
162 webport = self.get_config("node", "web.port", None)
164 self.init_web(webport) # strports string
166 def init_introducer_client(self):
167 self.introducer_furl = self.get_config("client", "introducer.furl")
168 ic = IntroducerClient(self.tub, self.introducer_furl,
170 str(allmydata.__full_version__),
171 str(self.OLDEST_SUPPORTED_VERSION),
172 self.get_app_versions())
173 self.introducer_client = ic
174 # hold off on starting the IntroducerClient until our tub has been
175 # started, so we'll have a useful address on our RemoteReference, so
176 # that the introducer's status page will show us.
177 d = self.when_tub_ready()
178 def _start_introducer_client(res):
179 ic.setServiceParent(self)
180 d.addCallback(_start_introducer_client)
181 d.addErrback(log.err, facility="tahoe.init",
182 level=log.BAD, umid="URyI5w")
184 def init_stats_provider(self):
185 gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
186 self.stats_provider = StatsProvider(self, gatherer_furl)
187 self.add_service(self.stats_provider)
188 self.stats_provider.register_producer(self)
191 return { 'node.uptime': time.time() - self.started_timestamp }
193 def init_secrets(self):
194 lease_s = self.get_or_create_private_config("secret", _make_secret)
195 lease_secret = base32.a2b(lease_s)
196 convergence_s = self.get_or_create_private_config('convergence',
198 self.convergence = base32.a2b(convergence_s)
199 self._secret_holder = SecretHolder(lease_secret, self.convergence)
201 def _maybe_create_node_key(self):
202 # we only create the key once. On all subsequent runs, we re-use the
205 sk_vs,vk_vs = keyutil.make_keypair()
207 # for a while (between releases, before 1.10) this was known as
208 # server.privkey, but now it lives in node.privkey. This fallback can
209 # be removed after 1.10 is released.
210 sk_vs = self.get_private_config("server.privkey", None)
212 sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
213 sk,vk_vs = keyutil.parse_privkey(sk_vs.strip())
214 self.write_config("node.pubkey", vk_vs+"\n")
215 self._server_key = sk
217 def _init_permutation_seed(self, ss):
218 seed = self.get_config_from_file("permutation-seed")
220 have_shares = ss.have_shares()
222 # if the server has shares but not a recorded
223 # permutation-seed, then it has been around since pre-#466
224 # days, and the clients who uploaded those shares used our
225 # TubID as a permutation-seed. We should keep using that same
226 # seed to keep the shares in the same place in the permuted
227 # ring, so those clients don't have to perform excessive
229 seed = base32.b2a(self.nodeid)
231 # otherwise, we're free to use the more natural seed of our
232 # pubkey-based serverid
233 vk_bytes = self._server_key.get_verifying_key_bytes()
234 seed = base32.b2a(vk_bytes)
235 self.write_config("permutation-seed", seed+"\n")
238 def init_storage(self):
239 # should we run a storage server (and publish it for others to use)?
240 if not self.get_config("storage", "enabled", True, boolean=True):
242 readonly = self.get_config("storage", "readonly", False, boolean=True)
244 self._maybe_create_node_key()
246 storedir = os.path.join(self.basedir, self.STOREDIR)
248 data = self.get_config("storage", "reserved_space", None)
251 reserved = parse_abbreviated_size(data)
253 log.msg("[storage]reserved_space= contains unparseable value %s"
257 discard = self.get_config("storage", "debug_discard", False,
260 expire = self.get_config("storage", "expire.enabled", False, boolean=True)
262 mode = self.get_config("storage", "expire.mode") # require a mode
264 mode = self.get_config("storage", "expire.mode", "age")
266 o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
267 if o_l_d is not None:
268 o_l_d = parse_duration(o_l_d)
271 if mode == "cutoff-date":
272 cutoff_date = self.get_config("storage", "expire.cutoff_date")
273 cutoff_date = parse_date(cutoff_date)
276 if self.get_config("storage", "expire.immutable", True, boolean=True):
277 sharetypes.append("immutable")
278 if self.get_config("storage", "expire.mutable", True, boolean=True):
279 sharetypes.append("mutable")
280 expiration_sharetypes = tuple(sharetypes)
282 ss = StorageServer(storedir, self.nodeid,
283 reserved_space=reserved,
284 discard_storage=discard,
285 readonly_storage=readonly,
286 stats_provider=self.stats_provider,
287 expiration_enabled=expire,
288 expiration_mode=mode,
289 expiration_override_lease_duration=o_l_d,
290 expiration_cutoff_date=cutoff_date,
291 expiration_sharetypes=expiration_sharetypes)
294 d = self.when_tub_ready()
295 # we can't do registerReference until the Tub is ready
297 furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding())
298 furl = self.tub.registerReference(ss, furlFile=furl_file)
299 ann = {"anonymous-storage-FURL": furl,
300 "permutation-seed-base32": self._init_permutation_seed(ss),
302 self.introducer_client.publish("storage", ann, self._server_key)
303 d.addCallback(_publish)
304 d.addErrback(log.err, facility="tahoe.init",
305 level=log.BAD, umid="aLGBKw")
307 def init_client(self):
308 helper_furl = self.get_config("client", "helper.furl", None)
309 DEP = self.DEFAULT_ENCODING_PARAMETERS
310 DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
311 DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
312 DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))
314 self.init_client_storage_broker()
315 self.history = History(self.stats_provider)
316 self.terminator = Terminator()
317 self.terminator.setServiceParent(self)
318 self.add_service(Uploader(helper_furl, self.stats_provider,
320 self.init_blacklist()
321 self.init_nodemaker()
323 def init_client_storage_broker(self):
324 # create a StorageFarmBroker object, for use by Uploader/Downloader
325 # (and everybody else who wants to use storage servers)
326 sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
327 self.storage_broker = sb
329 # load static server specifications from tahoe.cfg, if any.
330 # Not quite ready yet.
331 #if self.config.has_section("client-server-selection"):
332 # server_params = {} # maps serverid to dict of parameters
333 # for (name, value) in self.config.items("client-server-selection"):
334 # pieces = name.split(".")
335 # if pieces[0] == "server":
336 # serverid = pieces[1]
337 # if serverid not in server_params:
338 # server_params[serverid] = {}
339 # server_params[serverid][pieces[2]] = value
340 # for serverid, params in server_params.items():
341 # server_type = params.pop("type")
342 # if server_type == "tahoe-foolscap":
343 # s = storage_client.NativeStorageClient(*params)
345 # msg = ("unrecognized server type '%s' in "
346 # "tahoe.cfg [client-server-selection]server.%s.type"
347 # % (server_type, serverid))
348 # raise storage_client.UnknownServerTypeError(msg)
349 # sb.add_server(s.serverid, s)
351 # check to see if we're supposed to use the introducer too
352 if self.get_config("client-server-selection", "use_introducer",
353 default=True, boolean=True):
354 sb.use_introducer(self.introducer_client)
356 def get_storage_broker(self):
357 return self.storage_broker
359 def init_blacklist(self):
360 fn = os.path.join(self.basedir, "access.blacklist")
361 self.blacklist = Blacklist(fn)
363 def init_nodemaker(self):
364 default = self.get_config("client", "mutable.format", default="SDMF")
365 if default.upper() == "MDMF":
366 self.mutable_file_default = MDMF_VERSION
368 self.mutable_file_default = SDMF_VERSION
369 self.nodemaker = NodeMaker(self.storage_broker,
372 self.getServiceNamed("uploader"),
374 self.get_encoding_parameters(),
375 self.mutable_file_default,
379 def get_history(self):
382 def init_control(self):
383 d = self.when_tub_ready()
386 c.setServiceParent(self)
387 control_url = self.tub.registerReference(c)
388 self.write_private_config("control.furl", control_url + "\n")
389 d.addCallback(_publish)
390 d.addErrback(log.err, facility="tahoe.init",
391 level=log.BAD, umid="d3tNXA")
393 def init_helper(self):
394 d = self.when_tub_ready()
396 self.helper = Helper(os.path.join(self.basedir, "helper"),
397 self.storage_broker, self._secret_holder,
398 self.stats_provider, self.history)
399 # TODO: this is confusing. BASEDIR/private/helper.furl is created
400 # by the helper. BASEDIR/helper.furl is consumed by the client
401 # who wants to use the helper. I like having the filename be the
402 # same, since that makes 'cp' work smoothly, but the difference
403 # between config inputs and generated outputs is hard to see.
404 helper_furlfile = os.path.join(self.basedir,
405 "private", "helper.furl").encode(get_filesystem_encoding())
406 self.tub.registerReference(self.helper, furlFile=helper_furlfile)
407 d.addCallback(_publish)
408 d.addErrback(log.err, facility="tahoe.init",
409 level=log.BAD, umid="K0mW5w")
411 def init_key_gen(self, key_gen_furl):
412 d = self.when_tub_ready()
413 def _subscribe(self):
414 self.tub.connectTo(key_gen_furl, self._got_key_generator)
415 d.addCallback(_subscribe)
416 d.addErrback(log.err, facility="tahoe.init",
417 level=log.BAD, umid="z9DMzw")
419 def _got_key_generator(self, key_generator):
420 self._key_generator.set_remote_generator(key_generator)
421 key_generator.notifyOnDisconnect(self._lost_key_generator)
423 def _lost_key_generator(self):
424 self._key_generator.set_remote_generator(None)
426 def set_default_mutable_keysize(self, keysize):
427 self._key_generator.set_default_keysize(keysize)
429 def init_web(self, webport):
430 self.log("init_web(webport=%s)", args=(webport,))
432 from allmydata.webish import WebishServer
433 nodeurl_path = os.path.join(self.basedir, "node.url")
434 staticdir = self.get_config("node", "web.static", "public_html")
435 staticdir = os.path.expanduser(staticdir)
436 ws = WebishServer(self, webport, nodeurl_path, staticdir)
439 def init_ftp_server(self):
440 if self.get_config("ftpd", "enabled", False, boolean=True):
441 accountfile = self.get_config("ftpd", "accounts.file", None)
442 accounturl = self.get_config("ftpd", "accounts.url", None)
443 ftp_portstr = self.get_config("ftpd", "port", "8021")
445 from allmydata.frontends import ftpd
446 s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
447 s.setServiceParent(self)
449 def init_sftp_server(self):
450 if self.get_config("sftpd", "enabled", False, boolean=True):
451 accountfile = self.get_config("sftpd", "accounts.file", None)
452 accounturl = self.get_config("sftpd", "accounts.url", None)
453 sftp_portstr = self.get_config("sftpd", "port", "8022")
454 pubkey_file = self.get_config("sftpd", "host_pubkey_file")
455 privkey_file = self.get_config("sftpd", "host_privkey_file")
457 from allmydata.frontends import sftpd
458 s = sftpd.SFTPServer(self, accountfile, accounturl,
459 sftp_portstr, pubkey_file, privkey_file)
460 s.setServiceParent(self)
462 def init_drop_uploader(self):
463 if self.get_config("drop_upload", "enabled", False, boolean=True):
464 if self.get_config("drop_upload", "upload.dircap", None):
465 raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
466 "put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
468 upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
469 local_dir_utf8 = self.get_config("drop_upload", "local.directory")
472 from allmydata.frontends import drop_upload
473 s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
474 s.setServiceParent(self)
477 self.log("couldn't start drop-uploader: %r", args=(e,))
479 def _check_hotline(self, hotline_file):
480 if os.path.exists(hotline_file):
481 mtime = os.stat(hotline_file)[stat.ST_MTIME]
482 if mtime > time.time() - 120.0:
485 self.log("hotline file too old, shutting down")
487 self.log("hotline file missing, shutting down")
490 def get_encoding_parameters(self):
491 return self.DEFAULT_ENCODING_PARAMETERS
493 def connected_to_introducer(self):
494 if self.introducer_client:
495 return self.introducer_client.connected_to_introducer()
498 def get_renewal_secret(self): # this will go away
499 return self._secret_holder.get_renewal_secret()
501 def get_cancel_secret(self):
502 return self._secret_holder.get_cancel_secret()
504 def debug_wait_for_client_connections(self, num_clients):
505 """Return a Deferred that fires (with None) when we have connections
506 to the given number of peers. Useful for tests that set up a
507 temporary test network and need to know when it is safe to proceed
508 with an upload or download."""
510 return len(self.storage_broker.get_connected_servers()) >= num_clients
511 d = self.poll(_check, 0.5)
512 d.addCallback(lambda res: None)
516 # these four methods are the primitives for creating filenodes and
517 # dirnodes. The first takes a URI and produces a filenode or (new-style)
518 # dirnode. The other three create brand-new filenodes/dirnodes.
520 def create_node_from_uri(self, write_uri, read_uri=None, deep_immutable=False, name="<unknown name>"):
521 # This returns synchronously.
522 # Note that it does *not* validate the write_uri and read_uri; instead we
523 # may get an opaque node if there were any problems.
524 return self.nodemaker.create_from_cap(write_uri, read_uri, deep_immutable=deep_immutable, name=name)
526 def create_dirnode(self, initial_children={}, version=None):
527 d = self.nodemaker.create_new_mutable_directory(initial_children, version=version)
530 def create_immutable_dirnode(self, children, convergence=None):
531 return self.nodemaker.create_immutable_directory(children, convergence)
533 def create_mutable_file(self, contents=None, keysize=None, version=None):
534 return self.nodemaker.create_mutable_file(contents, keysize,
537 def upload(self, uploadable):
538 uploader = self.getServiceNamed("uploader")
539 return uploader.upload(uploadable)