from allmydata.interfaces import IDirectoryNode
from allmydata.util import log
from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
-from allmydata.util.assertutil import precondition
+from allmydata.util.assertutil import precondition, _assert
from allmydata.util.deferredutil import HookMixin
from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
extend_filepath, unicode_from_filepath, unicode_segments_from, \
self._client = client
self._db = db
- self.is_ready = False
+ upload_dirnode = self._client.create_node_from_uri(upload_dircap)
+ collective_dirnode = self._client.create_node_from_uri(collective_dircap)
- self.uploader = Uploader(client, local_path_u, db, upload_dircap, pending_delay, clock)
- self.downloader = Downloader(client, local_path_u, db, collective_dircap, clock)
+ self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock)
+ self.downloader = Downloader(client, local_path_u, db, collective_dirnode, upload_dirnode.get_readonly_uri(), clock)
def startService(self):
# TODO: why is this being called more than once?
"""ready is used to signal us to start
processing the upload and download items...
"""
- self.is_ready = True
d = self.uploader.start_scanning()
d2 = self.downloader.start_scanning()
d.addCallback(lambda ign: d2)
self._deque = deque()
self._lazy_tail = defer.succeed(None)
- self._pending = set()
self._stopped = False
self._turn_delay = 0
def _get_filepath(self, relpath_u):
+ self._log("_get_filepath(%r)" % (relpath_u,))
return extend_filepath(self._local_filepath, relpath_u.split(u"/"))
def _get_relpath(self, filepath):
class Uploader(QueueMixin):
- def __init__(self, client, local_path_u, db, upload_dircap, pending_delay, clock):
+ def __init__(self, client, local_path_u, db, upload_dirnode, pending_delay, clock):
QueueMixin.__init__(self, client, local_path_u, db, 'uploader', clock)
self.is_ready = False
- # TODO: allow a path rather than a cap URI.
- self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
- if not IDirectoryNode.providedBy(self._upload_dirnode):
- raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
- if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
+ if not IDirectoryNode.providedBy(upload_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+ if upload_dirnode.is_unknown() or upload_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a writecap to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+ self._upload_dirnode = upload_dirnode
self._inotify = get_inotify_module()
self._notifier = self._inotify.INotify()
+ self._pending = set()
if hasattr(self._notifier, 'set_pending_delay'):
self._notifier.set_pending_delay(pending_delay)
- # TODO: what about IN_MOVE_SELF, IN_MOVED_FROM, or IN_UNMOUNT?
+ # TODO: what about IN_MOVE_SELF and IN_UNMOUNT?
#
self.mask = ( self._inotify.IN_CREATE
| self._inotify.IN_CLOSE_WRITE
d = defer.succeed(None)
for child in children:
- assert isinstance(child, unicode), child
+ _assert(isinstance(child, unicode), child=child)
d.addCallback(lambda ign, child=child:
("%s/%s" % (reldir_u, child) if reldir_u else child))
def _add_pending(relpath_u):
return defer.succeed(None)
def _process(self, relpath_u):
+ # Uploader
self._log("_process(%r)" % (relpath_u,))
if relpath_u is None:
return
precondition(isinstance(relpath_u, unicode), relpath_u)
+ precondition(not relpath_u.endswith(u'/'), relpath_u)
d = defer.succeed(None)
fp = self._get_filepath(relpath_u)
pathinfo = get_pathinfo(unicode_from_filepath(fp))
- self._log("pending = %r, about to remove %r" % (self._pending, relpath_u))
+ self._log("about to remove %r from pending set %r" %
+ (relpath_u, self._pending))
self._pending.remove(relpath_u)
encoded_path_u = magicpath.path2magic(relpath_u)
self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
return None
elif pathinfo.isdir:
- self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+ if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False):
+ self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+
uploadable = Data("", self._client.convergence)
encoded_path_u += magicpath.path2magic(u"/")
+ self._log("encoded_path_u = %r" % (encoded_path_u,))
upload_d = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata={"version":0}, overwrite=True)
def _succeeded(ign):
self._log("created subdirectory %r" % (relpath_u,))
return res
def _failed(f):
self._count('objects_failed')
- self._log("%r while processing %r" % (f, relpath_u))
+ self._log("%s while processing %r" % (f, relpath_u))
return f
d.addCallbacks(_succeeded, _failed)
return d
class Downloader(QueueMixin, WriteFileMixin):
REMOTE_SCAN_INTERVAL = 3 # facilitates tests
- def __init__(self, client, local_path_u, db, collective_dircap, clock):
+ def __init__(self, client, local_path_u, db, collective_dirnode, upload_readonly_dircap, clock):
QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
- # TODO: allow a path rather than a cap URI.
- self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
+ if not IDirectoryNode.providedBy(collective_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'collective_dircap'))
+ if collective_dirnode.is_unknown() or not collective_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a readonly cap to a directory."
+ % os.path.join('private', 'collective_dircap'))
- if not IDirectoryNode.providedBy(self._collective_dirnode):
- raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
- if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
+ self._collective_dirnode = collective_dirnode
+ self._upload_readonly_dircap = upload_readonly_dircap
self._turn_delay = self.REMOTE_SCAN_INTERVAL
self._download_scan_batch = {} # path -> [(filenode, metadata)]
self._log("all files %s" % files)
d = self._scan_remote_collective()
+ d.addBoth(self._logcb, "after _scan_remote_collective 0")
self._turn_deque()
return d
self._log("_scan_remote_collective")
self._download_scan_batch = {} # XXX
- if self._collective_dirnode is None:
- return
- collective_dirmap_d = self._collective_dirnode.list()
- def do_list(result):
- others = [x for x in result.keys()]
- return result, others
- collective_dirmap_d.addCallback(do_list)
- def scan_collective(result):
- d = defer.succeed(None)
- collective_dirmap, others_list = result
- for dir_name in others_list:
- d.addCallback(lambda x, dir_name=dir_name: self._scan_remote(dir_name, collective_dirmap[dir_name][0]))
- # XXX todo add errback
- return d
- collective_dirmap_d.addCallback(scan_collective)
- collective_dirmap_d.addCallback(self._filter_scan_batch)
- collective_dirmap_d.addCallback(self._add_batch_to_download_queue)
- return collective_dirmap_d
+ d = self._collective_dirnode.list()
+ def scan_collective(dirmap):
+ d2 = defer.succeed(None)
+ for dir_name in dirmap:
+ (dirnode, metadata) = dirmap[dir_name]
+ if dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+ d2.addCallback(lambda ign, dir_name=dir_name: self._scan_remote(dir_name, dirnode))
+ def _err(f):
+ self._log("failed to scan DMD for client %r: %s" % (dir_name, f))
+ # XXX what should we do to make this failure more visible to users?
+ d2.addErrback(_err)
+ return d2
+ d.addCallback(scan_collective)
+ d.addCallback(self._filter_scan_batch)
+ d.addCallback(self._add_batch_to_download_queue)
+ return d
def _add_batch_to_download_queue(self, result):
self._log("result = %r" % (result,))
self._deque.extend(result)
self._log("deque after = %r" % (self._deque,))
self._count('objects_queued', len(result))
- self._log("pending = %r" % (self._pending,))
- self._pending.update(map(lambda x: x[0], result))
- self._log("pending after = %r" % (self._pending,))
def _filter_scan_batch(self, result):
self._log("_filter_scan_batch")
def _when_queue_is_empty(self):
d = task.deferLater(self._clock, self._turn_delay, self._scan_remote_collective)
- d.addBoth(self._logcb, "after _scan_remote_collective")
+ d.addBoth(self._logcb, "after _scan_remote_collective 1")
d.addCallback(lambda ign: self._turn_deque())
return d
def _process(self, item, now=None):
+ # Downloader
self._log("_process(%r)" % (item,))
if now is None:
now = time.time()
fp = self._get_filepath(relpath_u)
abspath_u = unicode_from_filepath(fp)
conflict_path_u = self._get_conflicted_filename(abspath_u)
+
d = defer.succeed(None)
def do_update_db(written_abspath_u):
d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
d.addCallback(lambda ign: abspath_u)
else:
- d.addCallback(lambda ign: file_node.download_best_version())
if metadata.get('deleted', False):
d.addCallback(lambda ign: self._rename_deleted_file(abspath_u))
else:
+ d.addCallback(lambda ign: file_node.download_best_version())
d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
is_conflict=is_conflict))
d.addCallbacks(do_update_db, failed)
- def remove_from_pending(res):
- self._pending.remove(relpath_u)
- return res
- d.addBoth(remove_from_pending)
def trap_conflicts(f):
f.trap(ConflictError)
return None