1 import os, stat, time, weakref
2 from allmydata import node
4 from zope.interface import implements
5 from twisted.internet import reactor, defer
6 from twisted.application import service
7 from twisted.application.internet import TimerService
8 from pycryptopp.publickey import rsa
11 from allmydata.storage.server import StorageServer
12 from allmydata import storage_client
13 from allmydata.immutable.upload import Uploader
14 from allmydata.immutable.offloaded import Helper
15 from allmydata.control import ControlServer
16 from allmydata.introducer.client import IntroducerClient
17 from allmydata.util import hashutil, base32, pollmixin, log, keyutil
18 from allmydata.util.encodingutil import get_filesystem_encoding
19 from allmydata.util.abbreviate import parse_abbreviated_size
20 from allmydata.util.time_format import parse_duration, parse_date
21 from allmydata.stats import StatsProvider
22 from allmydata.history import History
23 from allmydata.interfaces import IStatsProducer, SDMF_VERSION, MDMF_VERSION
24 from allmydata.nodemaker import NodeMaker
25 from allmydata.blacklist import Blacklist
26 from allmydata.node import OldConfigOptionError
36 return base32.b2a(os.urandom(hashutil.CRYPTO_VAL_SIZE)) + "\n"
39 def __init__(self, lease_secret, convergence_secret):
40 self._lease_secret = lease_secret
41 self._convergence_secret = convergence_secret
43 def get_renewal_secret(self):
44 return hashutil.my_renewal_secret_hash(self._lease_secret)
46 def get_cancel_secret(self):
47 return hashutil.my_cancel_secret_hash(self._lease_secret)
49 def get_convergence_secret(self):
50 return self._convergence_secret
53 """I create RSA keys for mutable files. Each call to generate() returns a
54 single keypair. The keysize is specified first by the keysize= argument
55 to generate(), then with a default set by set_default_keysize(), then
56 with a built-in default of 2048 bits."""
59 self.default_keysize = 2048
61 def set_remote_generator(self, keygen):
63 def set_default_keysize(self, keysize):
64 """Call this to override the size of the RSA keys created for new
65 mutable files which don't otherwise specify a size. This will affect
66 all subsequent calls to generate() without a keysize= argument. The
67 default size is 2048 bits. Test cases should call this method once
68 during setup, to cause me to create smaller keys, so the unit tests
70 self.default_keysize = keysize
72 def generate(self, keysize=None):
73 """I return a Deferred that fires with a (verifyingkey, signingkey)
74 pair. I accept a keysize in bits (2048 bit keys are standard, smaller
75 keys are used for testing). If you do not provide a keysize, I will
76 use my default, which is set by a call to set_default_keysize(). If
77 set_default_keysize() has never been called, I will create 2048 bit
79 keysize = keysize or self.default_keysize
81 d = self._remote.callRemote('get_rsa_key_pair', keysize)
82 def make_key_objs((verifying_key, signing_key)):
83 v = rsa.create_verifying_key_from_string(verifying_key)
84 s = rsa.create_signing_key_from_string(signing_key)
86 d.addCallback(make_key_objs)
89 # RSA key generation for a 2048 bit key takes between 0.8 and 3.2
91 signer = rsa.generate(keysize)
92 verifier = signer.get_verifying_key()
93 return defer.succeed( (verifier, signer) )
95 class Terminator(service.Service):
97 self._clients = weakref.WeakKeyDictionary()
98 def register(self, c):
99 self._clients[c] = None
100 def stopService(self):
101 for c in self._clients:
103 return service.Service.stopService(self)
106 class Client(node.Node, pollmixin.PollMixin):
107 implements(IStatsProducer)
109 PORTNUMFILE = "client.port"
112 SUICIDE_PREVENTION_HOTLINE_FILE = "suicide_prevention_hotline"
114 # This means that if a storage server treats me as though I were a
115 # 1.0.0 storage client, it will work as they expect.
116 OLDEST_SUPPORTED_VERSION = "1.0.0"
118 # this is a tuple of (needed, desired, total, max_segment_size). 'needed'
119 # is the number of shares required to reconstruct a file. 'desired' means
120 # that we will abort an upload unless we can allocate space for at least
121 # this many. 'total' is the total number of shares created by encoding.
122 # If everybody has room then this is is how many we will upload.
123 DEFAULT_ENCODING_PARAMETERS = {"k": 3,
126 "max_segment_size": 128*KiB,
129 def __init__(self, basedir="."):
130 node.Node.__init__(self, basedir)
131 self.started_timestamp = time.time()
132 self.logSource="Client"
133 self.DEFAULT_ENCODING_PARAMETERS = self.DEFAULT_ENCODING_PARAMETERS.copy()
134 self.init_introducer_client()
135 self.init_stats_provider()
140 if self.get_config("helper", "enabled", False, boolean=True):
142 self._key_generator = KeyGenerator()
143 key_gen_furl = self.get_config("client", "key_generator.furl", None)
145 self.init_key_gen(key_gen_furl)
147 # ControlServer and Helper are attached after Tub startup
148 self.init_ftp_server()
149 self.init_sftp_server()
150 self.init_drop_uploader()
152 hotline_file = os.path.join(self.basedir,
153 self.SUICIDE_PREVENTION_HOTLINE_FILE)
154 if os.path.exists(hotline_file):
155 age = time.time() - os.stat(hotline_file)[stat.ST_MTIME]
156 self.log("hotline file noticed (%ds old), starting timer" % age)
157 hotline = TimerService(1.0, self._check_hotline, hotline_file)
158 hotline.setServiceParent(self)
160 # this needs to happen last, so it can use getServiceNamed() to
161 # acquire references to StorageServer and other web-statusable things
162 webport = self.get_config("node", "web.port", None)
164 self.init_web(webport) # strports string
166 def init_introducer_client(self):
167 self.introducer_furl = self.get_config("client", "introducer.furl")
168 ic = IntroducerClient(self.tub, self.introducer_furl,
170 str(allmydata.__full_version__),
171 str(self.OLDEST_SUPPORTED_VERSION),
172 self.get_app_versions())
173 self.introducer_client = ic
174 # hold off on starting the IntroducerClient until our tub has been
175 # started, so we'll have a useful address on our RemoteReference, so
176 # that the introducer's status page will show us.
177 d = self.when_tub_ready()
178 def _start_introducer_client(res):
179 ic.setServiceParent(self)
180 d.addCallback(_start_introducer_client)
181 d.addErrback(log.err, facility="tahoe.init",
182 level=log.BAD, umid="URyI5w")
184 def init_stats_provider(self):
185 gatherer_furl = self.get_config("client", "stats_gatherer.furl", None)
186 self.stats_provider = StatsProvider(self, gatherer_furl)
187 self.add_service(self.stats_provider)
188 self.stats_provider.register_producer(self)
191 return { 'node.uptime': time.time() - self.started_timestamp }
193 def init_secrets(self):
194 lease_s = self.get_or_create_private_config("secret", _make_secret)
195 lease_secret = base32.a2b(lease_s)
196 convergence_s = self.get_or_create_private_config('convergence',
198 self.convergence = base32.a2b(convergence_s)
199 self._secret_holder = SecretHolder(lease_secret, self.convergence)
201 def _maybe_create_node_key(self):
202 # we only create the key once. On all subsequent runs, we re-use the
205 sk_vs,vk_vs = keyutil.make_keypair()
207 # for a while (between releases, before 1.10) this was known as
208 # server.privkey, but now it lives in node.privkey. This fallback can
209 # be removed after 1.10 is released.
210 sk_vs = self.get_private_config("server.privkey", None)
212 sk_vs = self.get_or_create_private_config("node.privkey", _make_key)
213 sk,vk_vs = keyutil.parse_privkey(sk_vs.strip())
214 self.write_config("node.pubkey", vk_vs+"\n")
215 self._server_key = sk
216 self.node_key_s = vk_vs
218 def _init_permutation_seed(self, ss):
219 seed = self.get_config_from_file("permutation-seed")
221 have_shares = ss.have_shares()
223 # if the server has shares but not a recorded
224 # permutation-seed, then it has been around since pre-#466
225 # days, and the clients who uploaded those shares used our
226 # TubID as a permutation-seed. We should keep using that same
227 # seed to keep the shares in the same place in the permuted
228 # ring, so those clients don't have to perform excessive
230 seed = base32.b2a(self.nodeid)
232 # otherwise, we're free to use the more natural seed of our
233 # pubkey-based serverid
234 vk_bytes = self._server_key.get_verifying_key_bytes()
235 seed = base32.b2a(vk_bytes)
236 self.write_config("permutation-seed", seed+"\n")
239 def init_storage(self):
240 # should we run a storage server (and publish it for others to use)?
241 if not self.get_config("storage", "enabled", True, boolean=True):
243 readonly = self.get_config("storage", "readonly", False, boolean=True)
245 self._maybe_create_node_key()
247 storedir = os.path.join(self.basedir, self.STOREDIR)
249 data = self.get_config("storage", "reserved_space", None)
252 reserved = parse_abbreviated_size(data)
254 log.msg("[storage]reserved_space= contains unparseable value %s"
258 discard = self.get_config("storage", "debug_discard", False,
261 expire = self.get_config("storage", "expire.enabled", False, boolean=True)
263 mode = self.get_config("storage", "expire.mode") # require a mode
265 mode = self.get_config("storage", "expire.mode", "age")
267 o_l_d = self.get_config("storage", "expire.override_lease_duration", None)
268 if o_l_d is not None:
269 o_l_d = parse_duration(o_l_d)
272 if mode == "cutoff-date":
273 cutoff_date = self.get_config("storage", "expire.cutoff_date")
274 cutoff_date = parse_date(cutoff_date)
277 if self.get_config("storage", "expire.immutable", True, boolean=True):
278 sharetypes.append("immutable")
279 if self.get_config("storage", "expire.mutable", True, boolean=True):
280 sharetypes.append("mutable")
281 expiration_sharetypes = tuple(sharetypes)
283 ss = StorageServer(storedir, self.nodeid,
284 reserved_space=reserved,
285 discard_storage=discard,
286 readonly_storage=readonly,
287 stats_provider=self.stats_provider,
288 expiration_enabled=expire,
289 expiration_mode=mode,
290 expiration_override_lease_duration=o_l_d,
291 expiration_cutoff_date=cutoff_date,
292 expiration_sharetypes=expiration_sharetypes)
295 d = self.when_tub_ready()
296 # we can't do registerReference until the Tub is ready
298 furl_file = os.path.join(self.basedir, "private", "storage.furl").encode(get_filesystem_encoding())
299 furl = self.tub.registerReference(ss, furlFile=furl_file)
300 ann = {"anonymous-storage-FURL": furl,
301 "permutation-seed-base32": self._init_permutation_seed(ss),
303 self.introducer_client.publish("storage", ann, self._server_key)
304 d.addCallback(_publish)
305 d.addErrback(log.err, facility="tahoe.init",
306 level=log.BAD, umid="aLGBKw")
308 def init_client(self):
309 helper_furl = self.get_config("client", "helper.furl", None)
310 DEP = self.DEFAULT_ENCODING_PARAMETERS
311 DEP["k"] = int(self.get_config("client", "shares.needed", DEP["k"]))
312 DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
313 DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))
315 self.init_client_storage_broker()
316 self.history = History(self.stats_provider)
317 self.terminator = Terminator()
318 self.terminator.setServiceParent(self)
319 self.add_service(Uploader(helper_furl, self.stats_provider,
321 self.init_blacklist()
322 self.init_nodemaker()
324 def init_client_storage_broker(self):
325 # create a StorageFarmBroker object, for use by Uploader/Downloader
326 # (and everybody else who wants to use storage servers)
327 sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True)
328 self.storage_broker = sb
330 # load static server specifications from tahoe.cfg, if any.
331 # Not quite ready yet.
332 #if self.config.has_section("client-server-selection"):
333 # server_params = {} # maps serverid to dict of parameters
334 # for (name, value) in self.config.items("client-server-selection"):
335 # pieces = name.split(".")
336 # if pieces[0] == "server":
337 # serverid = pieces[1]
338 # if serverid not in server_params:
339 # server_params[serverid] = {}
340 # server_params[serverid][pieces[2]] = value
341 # for serverid, params in server_params.items():
342 # server_type = params.pop("type")
343 # if server_type == "tahoe-foolscap":
344 # s = storage_client.NativeStorageClient(*params)
346 # msg = ("unrecognized server type '%s' in "
347 # "tahoe.cfg [client-server-selection]server.%s.type"
348 # % (server_type, serverid))
349 # raise storage_client.UnknownServerTypeError(msg)
350 # sb.add_server(s.serverid, s)
352 # check to see if we're supposed to use the introducer too
353 if self.get_config("client-server-selection", "use_introducer",
354 default=True, boolean=True):
355 sb.use_introducer(self.introducer_client)
357 def get_storage_broker(self):
358 return self.storage_broker
360 def init_blacklist(self):
361 fn = os.path.join(self.basedir, "access.blacklist")
362 self.blacklist = Blacklist(fn)
364 def init_nodemaker(self):
365 default = self.get_config("client", "mutable.format", default="SDMF")
366 if default.upper() == "MDMF":
367 self.mutable_file_default = MDMF_VERSION
369 self.mutable_file_default = SDMF_VERSION
370 self.nodemaker = NodeMaker(self.storage_broker,
373 self.getServiceNamed("uploader"),
375 self.get_encoding_parameters(),
376 self.mutable_file_default,
380 def get_history(self):
383 def init_control(self):
384 d = self.when_tub_ready()
387 c.setServiceParent(self)
388 control_url = self.tub.registerReference(c)
389 self.write_private_config("control.furl", control_url + "\n")
390 d.addCallback(_publish)
391 d.addErrback(log.err, facility="tahoe.init",
392 level=log.BAD, umid="d3tNXA")
394 def init_helper(self):
395 d = self.when_tub_ready()
397 self.helper = Helper(os.path.join(self.basedir, "helper"),
398 self.storage_broker, self._secret_holder,
399 self.stats_provider, self.history)
400 # TODO: this is confusing. BASEDIR/private/helper.furl is created
401 # by the helper. BASEDIR/helper.furl is consumed by the client
402 # who wants to use the helper. I like having the filename be the
403 # same, since that makes 'cp' work smoothly, but the difference
404 # between config inputs and generated outputs is hard to see.
405 helper_furlfile = os.path.join(self.basedir,
406 "private", "helper.furl").encode(get_filesystem_encoding())
407 self.tub.registerReference(self.helper, furlFile=helper_furlfile)
408 d.addCallback(_publish)
409 d.addErrback(log.err, facility="tahoe.init",
410 level=log.BAD, umid="K0mW5w")
412 def init_key_gen(self, key_gen_furl):
413 d = self.when_tub_ready()
414 def _subscribe(self):
415 self.tub.connectTo(key_gen_furl, self._got_key_generator)
416 d.addCallback(_subscribe)
417 d.addErrback(log.err, facility="tahoe.init",
418 level=log.BAD, umid="z9DMzw")
420 def _got_key_generator(self, key_generator):
421 self._key_generator.set_remote_generator(key_generator)
422 key_generator.notifyOnDisconnect(self._lost_key_generator)
424 def _lost_key_generator(self):
425 self._key_generator.set_remote_generator(None)
427 def set_default_mutable_keysize(self, keysize):
428 self._key_generator.set_default_keysize(keysize)
430 def init_web(self, webport):
431 self.log("init_web(webport=%s)", args=(webport,))
433 from allmydata.webish import WebishServer
434 nodeurl_path = os.path.join(self.basedir, "node.url")
435 staticdir = self.get_config("node", "web.static", "public_html")
436 staticdir = os.path.expanduser(staticdir)
437 ws = WebishServer(self, webport, nodeurl_path, staticdir)
440 def init_ftp_server(self):
441 if self.get_config("ftpd", "enabled", False, boolean=True):
442 accountfile = self.get_config("ftpd", "accounts.file", None)
443 accounturl = self.get_config("ftpd", "accounts.url", None)
444 ftp_portstr = self.get_config("ftpd", "port", "8021")
446 from allmydata.frontends import ftpd
447 s = ftpd.FTPServer(self, accountfile, accounturl, ftp_portstr)
448 s.setServiceParent(self)
450 def init_sftp_server(self):
451 if self.get_config("sftpd", "enabled", False, boolean=True):
452 accountfile = self.get_config("sftpd", "accounts.file", None)
453 accounturl = self.get_config("sftpd", "accounts.url", None)
454 sftp_portstr = self.get_config("sftpd", "port", "8022")
455 pubkey_file = self.get_config("sftpd", "host_pubkey_file")
456 privkey_file = self.get_config("sftpd", "host_privkey_file")
458 from allmydata.frontends import sftpd
459 s = sftpd.SFTPServer(self, accountfile, accounturl,
460 sftp_portstr, pubkey_file, privkey_file)
461 s.setServiceParent(self)
463 def init_drop_uploader(self):
464 if self.get_config("drop_upload", "enabled", False, boolean=True):
465 if self.get_config("drop_upload", "upload.dircap", None):
466 raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
467 "put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
469 upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
470 local_dir_utf8 = self.get_config("drop_upload", "local.directory")
473 from allmydata.frontends import drop_upload
474 s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
475 s.setServiceParent(self)
478 self.log("couldn't start drop-uploader: %r", args=(e,))
480 def _check_hotline(self, hotline_file):
481 if os.path.exists(hotline_file):
482 mtime = os.stat(hotline_file)[stat.ST_MTIME]
483 if mtime > time.time() - 120.0:
486 self.log("hotline file too old, shutting down")
488 self.log("hotline file missing, shutting down")
491 def get_encoding_parameters(self):
492 return self.DEFAULT_ENCODING_PARAMETERS
494 def connected_to_introducer(self):
495 if self.introducer_client:
496 return self.introducer_client.connected_to_introducer()
499 def get_renewal_secret(self): # this will go away
500 return self._secret_holder.get_renewal_secret()
502 def get_cancel_secret(self):
503 return self._secret_holder.get_cancel_secret()
505 def debug_wait_for_client_connections(self, num_clients):
506 """Return a Deferred that fires (with None) when we have connections
507 to the given number of peers. Useful for tests that set up a
508 temporary test network and need to know when it is safe to proceed
509 with an upload or download."""
511 return len(self.storage_broker.get_connected_servers()) >= num_clients
512 d = self.poll(_check, 0.5)
513 d.addCallback(lambda res: None)
517 # these four methods are the primitives for creating filenodes and
518 # dirnodes. The first takes a URI and produces a filenode or (new-style)
519 # dirnode. The other three create brand-new filenodes/dirnodes.
521 def create_node_from_uri(self, write_uri, read_uri=None, deep_immutable=False, name="<unknown name>"):
522 # This returns synchronously.
523 # Note that it does *not* validate the write_uri and read_uri; instead we
524 # may get an opaque node if there were any problems.
525 return self.nodemaker.create_from_cap(write_uri, read_uri, deep_immutable=deep_immutable, name=name)
527 def create_dirnode(self, initial_children={}, version=None):
528 d = self.nodemaker.create_new_mutable_directory(initial_children, version=version)
531 def create_immutable_dirnode(self, children, convergence=None):
532 return self.nodemaker.create_immutable_directory(children, convergence)
534 def create_mutable_file(self, contents=None, keysize=None, version=None):
535 return self.nodemaker.create_mutable_file(contents, keysize,
538 def upload(self, uploadable):
539 uploader = self.getServiceNamed("uploader")
540 return uploader.upload(uploadable)