]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blobdiff - src/allmydata/client.py
Add simple auth-token to get JSON data
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / client.py
index 41840e85c6d613de7acbeb4c7328eee46c6a9d75..12ebb148aa2672f0f9d988991110956b69a24969 100644 (file)
@@ -1,5 +1,6 @@
 import os, stat, time, weakref
 from allmydata import node
+from base64 import urlsafe_b64encode
 
 from zope.interface import implements
 from twisted.internet import reactor, defer
@@ -129,7 +130,9 @@ class Client(node.Node, pollmixin.PollMixin):
                                    }
 
     def __init__(self, basedir="."):
+        #print "Client.__init__(%r)" % (basedir,)
         node.Node.__init__(self, basedir)
+        self.connected_enough_d = defer.Deferred()
         self.started_timestamp = time.time()
         self.logSource="Client"
         self.encoding_params = self.DEFAULT_ENCODING_PARAMETERS.copy()
@@ -150,7 +153,7 @@ class Client(node.Node, pollmixin.PollMixin):
         # ControlServer and Helper are attached after Tub startup
         self.init_ftp_server()
         self.init_sftp_server()
-        self.init_drop_uploader()
+        self.init_magic_folder()
 
         # If the node sees an exit_trigger file, it will poll every second to see
         # whether the file still exists, and what its mtime is. If the file does not
@@ -332,6 +335,9 @@ class Client(node.Node, pollmixin.PollMixin):
         DEP["n"] = int(self.get_config("client", "shares.total", DEP["n"]))
         DEP["happy"] = int(self.get_config("client", "shares.happy", DEP["happy"]))
 
+        # for the CLI to authenticate to local JSON endpoints
+        self._auth_token = self._create_or_read_auth_token()
+
         self.init_client_storage_broker()
         self.history = History(self.stats_provider)
         self.terminator = Terminator()
@@ -341,12 +347,44 @@ class Client(node.Node, pollmixin.PollMixin):
         self.init_blacklist()
         self.init_nodemaker()
 
+    def get_auth_token(self):
+        """
+        This returns a local authentication token, which is just some
+        random data in "api_auth_token" which must be echoed to API
+        calls.
+
+        Currently only the URI '/magic' for magic-folder status; other
+        endpoints are invited to include this as well, as appropriate.
+        """
+        return self._auth_token
+
+    def _create_or_read_auth_token(self):
+        """
+        This returns the current auth-token data, possibly creating it and
+        writing 'private/api_auth_token' in the process.
+        """
+        fname = os.path.join(self.basedir, 'private', 'api_auth_token')
+        try:
+            with open(fname, 'rb') as f:
+                data = f.read()
+        except (OSError, IOError):
+            log.msg("Creating '%s'." % (fname,))
+            with open(fname, 'wb') as f:
+                data = urlsafe_b64encode(os.urandom(32))
+                f.write(data)
+        return data
+
     def init_client_storage_broker(self):
         # create a StorageFarmBroker object, for use by Uploader/Downloader
         # (and everybody else who wants to use storage servers)
         ps = self.get_config("client", "peers.preferred", "").split(",")
         preferred_peers = tuple([p.strip() for p in ps if p != ""])
-        sb = storage_client.StorageFarmBroker(self.tub, permute_peers=True, preferred_peers=preferred_peers)
+
+        connection_threshold = min(self.encoding_params["k"],
+                                   self.encoding_params["happy"] + 1)
+
+        sb = storage_client.StorageFarmBroker(self.tub, True, connection_threshold,
+                                              self.connected_enough_d, preferred_peers=preferred_peers)
         self.storage_broker = sb
 
         # load static server specifications from tahoe.cfg, if any.
@@ -488,22 +526,32 @@ class Client(node.Node, pollmixin.PollMixin):
                                  sftp_portstr, pubkey_file, privkey_file)
             s.setServiceParent(self)
 
-    def init_drop_uploader(self):
+    def init_magic_folder(self):
+        #print "init_magic_folder"
         if self.get_config("drop_upload", "enabled", False, boolean=True):
-            if self.get_config("drop_upload", "upload.dircap", None):
-                raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
-                                           "put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
-
-            upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
-            local_dir_utf8 = self.get_config("drop_upload", "local.directory")
-
-            try:
-                from allmydata.frontends import drop_upload
-                s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
-                s.setServiceParent(self)
-                s.startService()
-            except Exception, e:
-                self.log("couldn't start drop-uploader: %r", args=(e,))
+            raise OldConfigOptionError("The [drop_upload] section must be renamed to [magic_folder].\n"
+                                       "See docs/frontends/magic-folder.rst for more information.")
+
+        if self.get_config("magic_folder", "enabled", False, boolean=True):
+            #print "magic folder enabled"
+            upload_dircap = self.get_private_config("magic_folder_dircap")
+            collective_dircap = self.get_private_config("collective_dircap")
+
+            local_dir_config = self.get_config("magic_folder", "local.directory").decode("utf-8")
+            local_dir = abspath_expanduser_unicode(local_dir_config, base=self.basedir)
+
+            dbfile = os.path.join(self.basedir, "private", "magicfolderdb.sqlite")
+            dbfile = abspath_expanduser_unicode(dbfile)
+
+            from allmydata.frontends import magic_folder
+            umask = self.get_config("magic_folder", "download.umask", 0077)
+            s = magic_folder.MagicFolder(self, upload_dircap, collective_dircap, local_dir, dbfile, umask)
+            self._magic_folder = s
+            s.setServiceParent(self)
+            s.startService()
+
+            # start processing the upload queue when we've connected to enough servers
+            self.connected_enough_d.addCallback(lambda ign: s.ready())
 
     def _check_exit_trigger(self, exit_trigger_file):
         if os.path.exists(exit_trigger_file):