]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blobdiff - src/allmydata/frontends/magic_folder.py
Delete redundant is_ready attribute from MagicFolder.
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / frontends / magic_folder.py
index 82780a02f658d462157b2ce550340ec5f9b9b239..353839aaa84e67dd76bf8642bf42c99c8d734923 100644 (file)
@@ -1,7 +1,6 @@
 
 import sys, os
 import os.path
-import shutil
 from collections import deque
 import time
 
@@ -14,7 +13,7 @@ from allmydata.util import fileutil
 from allmydata.interfaces import IDirectoryNode
 from allmydata.util import log
 from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
-from allmydata.util.assertutil import precondition
+from allmydata.util.assertutil import precondition, _assert
 from allmydata.util.deferredutil import HookMixin
 from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
      extend_filepath, unicode_from_filepath, unicode_segments_from, \
@@ -60,10 +59,11 @@ class MagicFolder(service.MultiService):
         self._client = client
         self._db = db
 
-        self.is_ready = False
+        upload_dirnode = self._client.create_node_from_uri(upload_dircap)
+        collective_dirnode = self._client.create_node_from_uri(collective_dircap)
 
-        self.uploader = Uploader(client, local_path_u, db, upload_dircap, pending_delay, clock)
-        self.downloader = Downloader(client, local_path_u, db, collective_dircap, clock)
+        self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock)
+        self.downloader = Downloader(client, local_path_u, db, collective_dirnode, upload_dirnode.get_readonly_uri(), clock)
 
     def startService(self):
         # TODO: why is this being called more than once?
@@ -77,7 +77,6 @@ class MagicFolder(service.MultiService):
         """ready is used to signal us to start
         processing the upload and download items...
         """
-        self.is_ready = True
         d = self.uploader.start_scanning()
         d2 = self.downloader.start_scanning()
         d.addCallback(lambda ign: d2)
@@ -121,6 +120,7 @@ class QueueMixin(HookMixin):
         self._turn_delay = 0
 
     def _get_filepath(self, relpath_u):
+        self._log("_get_filepath(%r)" % (relpath_u,))
         return extend_filepath(self._local_filepath, relpath_u.split(u"/"))
 
     def _get_relpath(self, filepath):
@@ -174,18 +174,19 @@ class QueueMixin(HookMixin):
 
 
 class Uploader(QueueMixin):
-    def __init__(self, client, local_path_u, db, upload_dircap, pending_delay, clock):
+    def __init__(self, client, local_path_u, db, upload_dirnode, pending_delay, clock):
         QueueMixin.__init__(self, client, local_path_u, db, 'uploader', clock)
 
         self.is_ready = False
 
-        # TODO: allow a path rather than a cap URI.
-        self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
-        if not IDirectoryNode.providedBy(self._upload_dirnode):
-            raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
-        if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
-            raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
+        if not IDirectoryNode.providedBy(upload_dirnode):
+            raise AssertionError("The URI in '%s' does not refer to a directory."
+                                 % os.path.join('private', 'magic_folder_dircap'))
+        if upload_dirnode.is_unknown() or upload_dirnode.is_readonly():
+            raise AssertionError("The URI in '%s' is not a writecap to a directory."
+                                 % os.path.join('private', 'magic_folder_dircap'))
 
+        self._upload_dirnode = upload_dirnode
         self._inotify = get_inotify_module()
         self._notifier = self._inotify.INotify()
 
@@ -253,7 +254,7 @@ class Uploader(QueueMixin):
 
         d = defer.succeed(None)
         for child in children:
-            assert isinstance(child, unicode), child
+            _assert(isinstance(child, unicode), child=child)
             d.addCallback(lambda ign, child=child:
                           ("%s/%s" % (reldir_u, child) if reldir_u else child))
             def _add_pending(relpath_u):
@@ -296,6 +297,7 @@ class Uploader(QueueMixin):
         if relpath_u is None:
             return
         precondition(isinstance(relpath_u, unicode), relpath_u)
+        precondition(not relpath_u.endswith(u'/'), relpath_u)
 
         d = defer.succeed(None)
 
@@ -350,9 +352,12 @@ class Uploader(QueueMixin):
                 self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
                 return None
             elif pathinfo.isdir:
-                self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+                if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False):
+                    self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+
                 uploadable = Data("", self._client.convergence)
                 encoded_path_u += magicpath.path2magic(u"/")
+                self._log("encoded_path_u =  %r" % (encoded_path_u,))
                 upload_d = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata={"version":0}, overwrite=True)
                 def _succeeded(ign):
                     self._log("created subdirectory %r" % (relpath_u,))
@@ -405,7 +410,7 @@ class Uploader(QueueMixin):
             return res
         def _failed(f):
             self._count('objects_failed')
-            self._log("%r while processing %r" % (f, relpath_u))
+            self._log("%s while processing %r" % (f, relpath_u))
             return f
         d.addCallbacks(_succeeded, _failed)
         return d
@@ -481,20 +486,31 @@ class WriteFileMixin(object):
         fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
         return conflict_path_u
 
+    def _rename_deleted_file(self, abspath_u):
+        self._log('renaming deleted file to backup: %s' % (abspath_u,))
+        try:
+            fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup')
+        except IOError:
+            # XXX is this the correct error?
+            self._log("Already gone: '%s'" % (abspath_u,))
+        return abspath_u
+
 
 class Downloader(QueueMixin, WriteFileMixin):
     REMOTE_SCAN_INTERVAL = 3  # facilitates tests
 
-    def __init__(self, client, local_path_u, db, collective_dircap, clock):
+    def __init__(self, client, local_path_u, db, collective_dirnode, upload_readonly_dircap, clock):
         QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
 
-        # TODO: allow a path rather than a cap URI.
-        self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
+        if not IDirectoryNode.providedBy(collective_dirnode):
+            raise AssertionError("The URI in '%s' does not refer to a directory."
+                                 % os.path.join('private', 'collective_dircap'))
+        if collective_dirnode.is_unknown() or not collective_dirnode.is_readonly():
+            raise AssertionError("The URI in '%s' is not a readonly cap to a directory."
+                                 % os.path.join('private', 'collective_dircap'))
 
-        if not IDirectoryNode.providedBy(self._collective_dirnode):
-            raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
-        if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
-            raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
+        self._collective_dirnode = collective_dirnode
+        self._upload_readonly_dircap = upload_readonly_dircap
 
         self._turn_delay = self.REMOTE_SCAN_INTERVAL
         self._download_scan_batch = {} # path -> [(filenode, metadata)]
@@ -505,6 +521,7 @@ class Downloader(QueueMixin, WriteFileMixin):
         self._log("all files %s" % files)
 
         d = self._scan_remote_collective()
+        d.addBoth(self._logcb, "after _scan_remote_collective 0")
         self._turn_deque()
         return d
 
@@ -599,24 +616,22 @@ class Downloader(QueueMixin, WriteFileMixin):
         self._log("_scan_remote_collective")
         self._download_scan_batch = {} # XXX
 
-        if self._collective_dirnode is None:
-            return
-        collective_dirmap_d = self._collective_dirnode.list()
-        def do_list(result):
-            others = [x for x in result.keys()]
-            return result, others
-        collective_dirmap_d.addCallback(do_list)
-        def scan_collective(result):
-            d = defer.succeed(None)
-            collective_dirmap, others_list = result
-            for dir_name in others_list:
-                d.addCallback(lambda x, dir_name=dir_name: self._scan_remote(dir_name, collective_dirmap[dir_name][0]))
-                # XXX todo add errback
-            return d
-        collective_dirmap_d.addCallback(scan_collective)
-        collective_dirmap_d.addCallback(self._filter_scan_batch)
-        collective_dirmap_d.addCallback(self._add_batch_to_download_queue)
-        return collective_dirmap_d
+        d = self._collective_dirnode.list()
+        def scan_collective(dirmap):
+            d2 = defer.succeed(None)
+            for dir_name in dirmap:
+                (dirnode, metadata) = dirmap[dir_name]
+                if dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+                    d2.addCallback(lambda ign, dir_name=dir_name: self._scan_remote(dir_name, dirnode))
+                    def _err(f):
+                        self._log("failed to scan DMD for client %r: %s" % (dir_name, f))
+                        # XXX what should we do to make this failure more visible to users?
+                    d2.addErrback(_err)
+            return d2
+        d.addCallback(scan_collective)
+        d.addCallback(self._filter_scan_batch)
+        d.addCallback(self._add_batch_to_download_queue)
+        return d
 
     def _add_batch_to_download_queue(self, result):
         self._log("result = %r" % (result,))
@@ -645,7 +660,7 @@ class Downloader(QueueMixin, WriteFileMixin):
 
     def _when_queue_is_empty(self):
         d = task.deferLater(self._clock, self._turn_delay, self._scan_remote_collective)
-        d.addBoth(self._logcb, "after _scan_remote_collective")
+        d.addBoth(self._logcb, "after _scan_remote_collective 1")
         d.addCallback(lambda ign: self._turn_deque())
         return d
 
@@ -697,14 +712,17 @@ class Downloader(QueueMixin, WriteFileMixin):
                 #local_last_uploaded_uri = ...
 
             if relpath_u.endswith(u"/"):
-                self._log("mkdir(%r)" % (abspath_u,))
-                d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
-                d.addCallback(lambda ign: abspath_u)
+                if metadata.get('deleted', False):
+                    self._log("rmdir(%r) ignored" % (abspath_u,))
+                else:
+                    self._log("mkdir(%r)" % (abspath_u,))
+                    d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
+                    d.addCallback(lambda ign: abspath_u)
             else:
-                d.addCallback(lambda ign: file_node.download_best_version())
                 if metadata.get('deleted', False):
-                    d.addCallback(lambda result: self._unlink_deleted_file(abspath_u, result))
+                    d.addCallback(lambda ign: self._rename_deleted_file(abspath_u))
                 else:
+                    d.addCallback(lambda ign: file_node.download_best_version())
                     d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
                                                                                is_conflict=is_conflict))
 
@@ -719,11 +737,3 @@ class Downloader(QueueMixin, WriteFileMixin):
             return None
         d.addErrback(trap_conflicts)
         return d
-
-    def _unlink_deleted_file(self, abspath_u, result):
-        try:
-            self._log('unlinking: %s' % (abspath_u,))
-            shutil.move(abspath_u, abspath_u + '.backup')
-        except IOError:
-            self._log("Already gone: '%s'" % (abspath_u,))
-        return abspath_u