from twisted.python import runtime
from twisted.application import service
+from zope.interface import Interface, Attribute, implementer
+
from allmydata.util import fileutil
from allmydata.interfaces import IDirectoryNode
from allmydata.util import log
-from allmydata.util.fileutil import precondition_abspath, get_pathinfo
-from allmydata.util.assertutil import precondition
+from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
+from allmydata.util.assertutil import precondition, _assert
from allmydata.util.deferredutil import HookMixin
+from allmydata.util.progress import PercentProgress
from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
extend_filepath, unicode_from_filepath, unicode_segments_from, \
quote_filepath, quote_local_unicode_path, quote_output, FilenameEncodingError
from allmydata.immutable.upload import FileName, Data
from allmydata import magicfolderdb, magicpath
-
+defer.setDebugging(True)
IN_EXCL_UNLINK = 0x04000000L
def get_inotify_module():
raise
+def is_new_file(pathinfo, db_entry):
+ if db_entry is None:
+ return True
+
+ if not pathinfo.exists and db_entry.size is None:
+ return False
+
+ return ((pathinfo.size, pathinfo.ctime, pathinfo.mtime) !=
+ (db_entry.size, db_entry.ctime, db_entry.mtime))
+
+
class MagicFolder(service.MultiService):
name = 'magic-folder'
- def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile,
- pending_delay=1.0, clock=reactor):
+ def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask,
+ pending_delay=1.0, clock=None):
precondition_abspath(local_path_u)
service.MultiService.__init__(self)
+ immediate = clock is not None
+ clock = clock or reactor
db = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1))
if db is None:
return Failure(Exception('ERROR: Unable to load magic folder db.'))
self._client = client
self._db = db
- self.is_ready = False
+ upload_dirnode = self._client.create_node_from_uri(upload_dircap)
+ collective_dirnode = self._client.create_node_from_uri(collective_dircap)
- self.uploader = Uploader(client, local_path_u, db, upload_dircap, pending_delay, clock)
- self.downloader = Downloader(client, local_path_u, db, collective_dircap, clock)
+ self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock, immediate)
+ self.downloader = Downloader(client, local_path_u, db, collective_dirnode,
+ upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending, umask)
def startService(self):
# TODO: why is this being called more than once?
"""ready is used to signal us to start
processing the upload and download items...
"""
- self.is_ready = True
- d = self.uploader.start_scanning()
- d2 = self.downloader.start_scanning()
- d.addCallback(lambda ign: d2)
- return d
+ self.uploader.start_uploading() # synchronous
+ return self.downloader.start_downloading()
def finish(self):
print "finish"
% quote_local_unicode_path(self._local_path_u))
self._deque = deque()
+ # do we also want to bound on "maximum age"?
+ self._process_history = deque(maxlen=20)
self._lazy_tail = defer.succeed(None)
- self._pending = set()
self._stopped = False
self._turn_delay = 0
+ def get_status(self):
+ """
+ Returns an iterable of instances that implement IQueuedItem
+ """
+ for item in self._deque:
+ yield item
+ for item in self._process_history:
+ yield item
+
def _get_filepath(self, relpath_u):
+ self._log("_get_filepath(%r)" % (relpath_u,))
return extend_filepath(self._local_filepath, relpath_u.split(u"/"))
def _get_relpath(self, filepath):
print s
#open("events", "ab+").write(msg)
- def _append_to_deque(self, relpath_u):
- self._log("_append_to_deque(%r)" % (relpath_u,))
- if relpath_u in self._pending or magicpath.should_ignore_file(relpath_u):
- return
- self._deque.append(relpath_u)
- self._pending.add(relpath_u)
- self._count('objects_queued')
- if self.is_ready:
- self._clock.callLater(0, self._turn_deque)
-
def _turn_deque(self):
- self._log("_turn_deque")
- if self._stopped:
- self._log("stopped")
- return
try:
- item = self._deque.pop()
- self._log("popped %r" % (item,))
- self._count('objects_queued', -1)
- except IndexError:
- self._log("deque is now empty")
- self._lazy_tail.addCallback(lambda ign: self._when_queue_is_empty())
- else:
- self._lazy_tail.addCallback(lambda ign: self._process(item))
- self._lazy_tail.addBoth(self._call_hook, 'processed')
- self._lazy_tail.addErrback(log.err)
- self._lazy_tail.addCallback(lambda ign: task.deferLater(self._clock, self._turn_delay, self._turn_deque))
+ self._log("_turn_deque")
+ if self._stopped:
+ self._log("stopped")
+ return
+ try:
+ item = IQueuedItem(self._deque.pop())
+ self._process_history.append(item)
+
+ self._log("popped %r, now have %d" % (item, len(self._deque)))
+ self._count('objects_queued', -1)
+ except IndexError:
+ self._log("deque is now empty")
+ self._lazy_tail.addBoth(self._logcb, "whawhat empty")
+ self._lazy_tail.addCallback(lambda ign: self._when_queue_is_empty())
+ self._lazy_tail.addBoth(self._logcb, "got past _when_queue_is_empty")
+ else:
+ self._log("_turn_deque else clause")
+ self._lazy_tail.addBoth(self._logcb, "whawhat else %r" % (item,))
+ self._lazy_tail.addCallback(lambda ign: self._process(item))
+ self._lazy_tail.addBoth(self._logcb, "got past _process")
+ self._lazy_tail.addBoth(self._call_hook, 'processed', async=True)
+ self._lazy_tail.addBoth(self._logcb, "got past _call_hook (turn_delay = %r)" % (self._turn_delay,))
+ self._lazy_tail.addErrback(log.err)
+ self._lazy_tail.addCallback(lambda ign: task.deferLater(self._clock, self._turn_delay, self._turn_deque))
+ self._lazy_tail.addBoth(self._logcb, "got past deferLater")
+ except Exception as e:
+ self._log("---- turn deque exception %s" % (e,))
+ raise
+
+
+# this isn't in interfaces.py because it's very specific to QueueMixin
+class IQueuedItem(Interface):
+ relpath_u = Attribute("The path this item represents")
+ progress = Attribute("A PercentProgress instance")
+
+ def set_status(self, status, current_time=None):
+ """
+ """
+
+ def status_time(self, state):
+ """
+ Get the time of particular state change, or None
+ """
+
+ def status_history(self):
+ """
+ All status changes, sorted latest -> oldest
+ """
+
+
+@implementer(IQueuedItem)
+class QueuedItem(object):
+ def __init__(self, relpath_u, progress):
+ self.relpath_u = relpath_u
+ self.progress = progress
+ self._status_history = dict()
+
+ def set_status(self, status, current_time=None):
+ if current_time is None:
+ current_time = time.time()
+ self._status_history[status] = current_time
+
+ def status_time(self, state):
+ """
+ Returns None if there's no status-update for 'state', else returns
+ the timestamp when that state was reached.
+ """
+ return self._status_history.get(state, None)
+
+ def status_history(self):
+ """
+ Returns a list of 2-tuples of (state, timestamp) sorted by timestamp
+ """
+ hist = self._status_history.items()
+ hist.sort(lambda a, b: cmp(a[1], b[1]))
+ return hist
+
+
+class UploadItem(QueuedItem):
+ """
+ Represents a single item the _deque of the Uploader
+ """
+ pass
class Uploader(QueueMixin):
- def __init__(self, client, local_path_u, db, upload_dircap, pending_delay, clock):
+ def __init__(self, client, local_path_u, db, upload_dirnode, pending_delay, clock,
+ immediate=False):
QueueMixin.__init__(self, client, local_path_u, db, 'uploader', clock)
self.is_ready = False
+ self._immediate = immediate
- # TODO: allow a path rather than a cap URI.
- self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
- if not IDirectoryNode.providedBy(self._upload_dirnode):
- raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
- if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
+ if not IDirectoryNode.providedBy(upload_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+ if upload_dirnode.is_unknown() or upload_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a writecap to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+ self._upload_dirnode = upload_dirnode
self._inotify = get_inotify_module()
self._notifier = self._inotify.INotify()
+ self._pending = set() # of unicode relpaths
+
+ self._periodic_full_scan_duration = 10 * 60 # perform a full scan every 10 minutes
if hasattr(self._notifier, 'set_pending_delay'):
self._notifier.set_pending_delay(pending_delay)
- # TODO: what about IN_MOVE_SELF, IN_MOVED_FROM, or IN_UNMOUNT?
+ # TODO: what about IN_MOVE_SELF and IN_UNMOUNT?
#
self.mask = ( self._inotify.IN_CREATE
| self._inotify.IN_CLOSE_WRITE
self._log("stop")
self._notifier.stopReading()
self._count('dirs_monitored', -1)
+ self.periodic_callid.cancel()
if hasattr(self._notifier, 'wait_until_stopped'):
d = self._notifier.wait_until_stopped()
else:
d.addCallback(lambda ign: self._lazy_tail)
return d
- def start_scanning(self):
- self._log("start_scanning")
+ def start_uploading(self):
+ self._log("start_uploading")
self.is_ready = True
- self._pending = self._db.get_all_relpaths()
- self._log("all_files %r" % (self._pending))
- d = self._scan(u"")
- def _add_pending(ign):
- # This adds all of the files that were in the db but not already processed
- # (normally because they have been deleted on disk).
- self._log("adding %r" % (self._pending))
- self._deque.extend(self._pending)
- d.addCallback(_add_pending)
- d.addCallback(lambda ign: self._turn_deque())
- return d
+
+ all_relpaths = self._db.get_all_relpaths()
+ self._log("all relpaths: %r" % (all_relpaths,))
+
+ for relpath_u in all_relpaths:
+ self._add_pending(relpath_u)
+
+ self._full_scan()
+
+ def _extend_queue_and_keep_going(self, relpaths_u):
+ self._log("_extend_queue_and_keep_going %r" % (relpaths_u,))
+ for relpath_u in relpaths_u:
+ progress = PercentProgress()
+ item = UploadItem(relpath_u, progress)
+ item.set_status('queued', self._clock.seconds())
+ self._deque.append(item)
+
+ self._count('objects_queued', len(relpaths_u))
+
+ if self.is_ready:
+ if self._immediate: # for tests
+ self._turn_deque()
+ else:
+ self._clock.callLater(0, self._turn_deque)
+
+ def _full_scan(self):
+ self.periodic_callid = self._clock.callLater(self._periodic_full_scan_duration, self._full_scan)
+ print "FULL SCAN"
+ self._log("_pending %r" % (self._pending))
+ self._scan(u"")
+ self._extend_queue_and_keep_going(self._pending)
+
+ def _add_pending(self, relpath_u):
+ self._log("add pending %r" % (relpath_u,))
+ if not magicpath.should_ignore_file(relpath_u):
+ self._pending.add(relpath_u)
def _scan(self, reldir_u):
+ # Scan a directory by (synchronously) adding the paths of all its children to self._pending.
+ # Note that this doesn't add them to the deque -- that will
+
self._log("scan %r" % (reldir_u,))
fp = self._get_filepath(reldir_u)
try:
raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error"
% quote_filepath(fp))
- d = defer.succeed(None)
for child in children:
- assert isinstance(child, unicode), child
- d.addCallback(lambda ign, child=child:
- ("%s/%s" % (reldir_u, child) if reldir_u else child))
- def _add_pending(relpath_u):
- if magicpath.should_ignore_file(relpath_u):
- return None
+ _assert(isinstance(child, unicode), child=child)
+ self._add_pending("%s/%s" % (reldir_u, child) if reldir_u != u"" else child)
- self._pending.add(relpath_u)
- return relpath_u
- d.addCallback(_add_pending)
- # This call to _process doesn't go through the deque, and probably should.
- d.addCallback(self._process)
- d.addBoth(self._call_hook, 'processed')
- d.addErrback(log.err)
-
- return d
+ def is_pending(self, relpath_u):
+ return relpath_u in self._pending
def _notify(self, opaque, path, events_mask):
self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
+ relpath_u = self._get_relpath(path)
# We filter out IN_CREATE events not associated with a directory.
# Acting on IN_CREATE for files could cause us to read and upload
if ((events_mask & self._inotify.IN_CREATE) != 0 and
(events_mask & self._inotify.IN_ISDIR) == 0):
- self._log("ignoring inotify event for creation of file %r\n" % (path,))
+ self._log("ignoring event for %r (creation of non-directory)\n" % (relpath_u,))
+ return
+ if relpath_u in self._pending:
+ self._log("not queueing %r because it is already pending" % (relpath_u,))
+ return
+ if magicpath.should_ignore_file(relpath_u):
+ self._log("ignoring event for %r (ignorable path)" % (relpath_u,))
return
- relpath_u = self._get_relpath(path)
- self._append_to_deque(relpath_u)
+ self._pending.add(relpath_u)
+ self._extend_queue_and_keep_going([relpath_u])
def _when_queue_is_empty(self):
return defer.succeed(None)
- def _process(self, relpath_u):
+ def _process(self, item):
+ # Uploader
+ relpath_u = item.relpath_u
self._log("_process(%r)" % (relpath_u,))
+ item.set_status('started', self._clock.seconds())
+
if relpath_u is None:
+ item.set_status('invalid_path', self._clock.seconds())
return
precondition(isinstance(relpath_u, unicode), relpath_u)
+ precondition(not relpath_u.endswith(u'/'), relpath_u)
d = defer.succeed(None)
- def _maybe_upload(val, now=None):
+ def _maybe_upload(ign, now=None):
+ self._log("_maybe_upload: relpath_u=%r, now=%r" % (relpath_u, now))
if now is None:
now = time.time()
fp = self._get_filepath(relpath_u)
pathinfo = get_pathinfo(unicode_from_filepath(fp))
- self._log("pending = %r, about to remove %r" % (self._pending, relpath_u))
+ self._log("about to remove %r from pending set %r" %
+ (relpath_u, self._pending))
self._pending.remove(relpath_u)
encoded_path_u = magicpath.path2magic(relpath_u)
# FIXME merge this with the 'isfile' case.
self._log("notified object %s disappeared (this is normal)" % quote_filepath(fp))
self._count('objects_disappeared')
- if not self._db.check_file_db_exists(relpath_u):
+
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
return None
- last_downloaded_timestamp = now
- last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ last_downloaded_timestamp = now # is this correct?
- current_version = self._db.get_local_file_version(relpath_u)
- if current_version is None:
- new_version = 0
+ if is_new_file(pathinfo, db_entry):
+ new_version = db_entry.version + 1
else:
- new_version = current_version + 1
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
+ return
metadata = { 'version': new_version,
'deleted': True,
'last_downloaded_timestamp': last_downloaded_timestamp }
- if last_downloaded_uri is not None:
- metadata['last_downloaded_uri'] = last_downloaded_uri
+ if db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
empty_uploadable = Data("", self._client.convergence)
- d2 = self._upload_dirnode.add_file(encoded_path_u, empty_uploadable,
- metadata=metadata, overwrite=True)
+ d2 = self._upload_dirnode.add_file(
+ encoded_path_u, empty_uploadable,
+ metadata=metadata,
+ overwrite=True,
+ progress=item.progress,
+ )
def _add_db_entry(filenode):
filecap = filenode.get_uri()
+ last_downloaded_uri = metadata.get('last_downloaded_uri', None)
self._db.did_upload_version(relpath_u, new_version, filecap,
- last_downloaded_uri, last_downloaded_timestamp, pathinfo)
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
self._count('files_uploaded')
d2.addCallback(_add_db_entry)
return d2
self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
return None
elif pathinfo.isdir:
- self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+ print "ISDIR "
+ if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False):
+ self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+
uploadable = Data("", self._client.convergence)
encoded_path_u += magicpath.path2magic(u"/")
- upload_d = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata={"version":0}, overwrite=True)
- def _succeeded(ign):
+ self._log("encoded_path_u = %r" % (encoded_path_u,))
+ upload_d = self._upload_dirnode.add_file(
+ encoded_path_u, uploadable,
+ metadata={"version": 0},
+ overwrite=True,
+ progress=item.progress,
+ )
+ def _dir_succeeded(ign):
self._log("created subdirectory %r" % (relpath_u,))
self._count('directories_created')
- def _failed(f):
+ def _dir_failed(f):
self._log("failed to create subdirectory %r" % (relpath_u,))
return f
- upload_d.addCallbacks(_succeeded, _failed)
+ upload_d.addCallbacks(_dir_succeeded, _dir_failed)
upload_d.addCallback(lambda ign: self._scan(relpath_u))
+ upload_d.addCallback(lambda ign: self._extend_queue_and_keep_going(self._pending))
return upload_d
elif pathinfo.isfile:
- last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ db_entry = self._db.get_db_entry(relpath_u)
+
last_downloaded_timestamp = now
- current_version = self._db.get_local_file_version(relpath_u)
- if current_version is None:
+ if db_entry is None:
new_version = 0
- elif self._db.is_new_file(pathinfo, relpath_u):
- new_version = current_version + 1
+ elif is_new_file(pathinfo, db_entry):
+ new_version = db_entry.version + 1
else:
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
return None
metadata = { 'version': new_version,
'last_downloaded_timestamp': last_downloaded_timestamp }
- if last_downloaded_uri is not None:
- metadata['last_downloaded_uri'] = last_downloaded_uri
+ if db_entry is not None and db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
uploadable = FileName(unicode_from_filepath(fp), self._client.convergence)
- d2 = self._upload_dirnode.add_file(encoded_path_u, uploadable,
- metadata=metadata, overwrite=True)
+ d2 = self._upload_dirnode.add_file(
+ encoded_path_u, uploadable,
+ metadata=metadata,
+ overwrite=True,
+ progress=item.progress,
+ )
def _add_db_entry(filenode):
filecap = filenode.get_uri()
last_downloaded_uri = metadata.get('last_downloaded_uri', None)
self._db.did_upload_version(relpath_u, new_version, filecap,
- last_downloaded_uri, last_downloaded_timestamp, pathinfo)
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
self._count('files_uploaded')
d2.addCallback(_add_db_entry)
return d2
def _succeeded(res):
self._count('objects_succeeded')
+ item.set_status('success', self._clock.seconds())
return res
def _failed(f):
self._count('objects_failed')
- self._log("%r while processing %r" % (f, relpath_u))
+ self._log("%s while processing %r" % (f, relpath_u))
+ item.set_status('failure', self._clock.seconds())
return f
d.addCallbacks(_succeeded, _failed)
return d
class WriteFileMixin(object):
FUDGE_SECONDS = 10.0
+ def _get_conflicted_filename(self, abspath_u):
+ return abspath_u + u".conflict"
+
def _write_downloaded_file(self, abspath_u, file_contents, is_conflict=False, now=None):
self._log("_write_downloaded_file(%r, <%d bytes>, is_conflict=%r, now=%r)"
% (abspath_u, len(file_contents), is_conflict, now))
# ensure parent directory exists
head, tail = os.path.split(abspath_u)
- mode = 0777 # XXX
- fileutil.make_dirs(head, mode)
- fileutil.write(replacement_path_u, file_contents)
+ old_mask = os.umask(self._umask)
+ try:
+ fileutil.make_dirs(head, (~ self._umask) & 0777)
+ fileutil.write(replacement_path_u, file_contents)
+ finally:
+ os.umask(old_mask)
+
os.utime(replacement_path_u, (now, now - self.FUDGE_SECONDS))
if is_conflict:
+ print "0x00 ------------ <><> is conflict; calling _rename_conflicted_file... %r %r" % (abspath_u, replacement_path_u)
return self._rename_conflicted_file(abspath_u, replacement_path_u)
else:
try:
def _rename_conflicted_file(self, abspath_u, replacement_path_u):
self._log("_rename_conflicted_file(%r, %r)" % (abspath_u, replacement_path_u))
- conflict_path_u = abspath_u + u".conflict"
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+ print "XXX rename %r %r" % (replacement_path_u, conflict_path_u)
+ if os.path.isfile(replacement_path_u):
+ print "%r exists" % (replacement_path_u,)
+ if os.path.isfile(conflict_path_u):
+ print "%r exists" % (conflict_path_u,)
+
fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
return conflict_path_u
+ def _rename_deleted_file(self, abspath_u):
+ self._log('renaming deleted file to backup: %s' % (abspath_u,))
+ try:
+ fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup')
+ except OSError:
+ self._log("Already gone: '%s'" % (abspath_u,))
+ return abspath_u
+
+
+class DownloadItem(QueuedItem):
+ """
+ Represents a single item in the _deque of the Downloader
+ """
+ def __init__(self, relpath_u, progress, filenode, metadata):
+ super(DownloadItem, self).__init__(relpath_u, progress)
+ self.file_node = filenode
+ self.metadata = metadata
+
class Downloader(QueueMixin, WriteFileMixin):
REMOTE_SCAN_INTERVAL = 3 # facilitates tests
- def __init__(self, client, local_path_u, db, collective_dircap, clock):
+ def __init__(self, client, local_path_u, db, collective_dirnode,
+ upload_readonly_dircap, clock, is_upload_pending, umask):
QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
- # TODO: allow a path rather than a cap URI.
- self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
+ if not IDirectoryNode.providedBy(collective_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'collective_dircap'))
+ if collective_dirnode.is_unknown() or not collective_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a readonly cap to a directory."
+ % os.path.join('private', 'collective_dircap'))
- if not IDirectoryNode.providedBy(self._collective_dirnode):
- raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
- if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
+ self._collective_dirnode = collective_dirnode
+ self._upload_readonly_dircap = upload_readonly_dircap
+ self._is_upload_pending = is_upload_pending
+ self._umask = umask
+ def start_downloading(self):
+ self._log("start_downloading")
self._turn_delay = self.REMOTE_SCAN_INTERVAL
- self._download_scan_batch = {} # path -> [(filenode, metadata)]
-
- def start_scanning(self):
- self._log("start_scanning")
files = self._db.get_all_relpaths()
self._log("all files %s" % files)
- d = self._scan_remote_collective()
+ d = self._scan_remote_collective(scan_self=True)
+ d.addBoth(self._logcb, "after _scan_remote_collective 0")
self._turn_deque()
return d
def stop(self):
+ self._log("stop")
self._stopped = True
d = defer.succeed(None)
d.addCallback(lambda ign: self._lazy_tail)
self._log("nope")
return False
self._log("yep")
- v = self._db.get_local_file_version(relpath_u)
- self._log("v = %r" % (v,))
- return (v is None or v < remote_version)
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
+ return True
+ self._log("version %r" % (db_entry.version,))
+ return (db_entry.version < remote_version)
def _get_local_latest(self, relpath_u):
"""
"""
if not self._get_filepath(relpath_u).exists():
return None
- return self._db.get_local_file_version(relpath_u)
+ db_entry = self._db.get_db_entry(relpath_u)
+ return None if db_entry is None else db_entry.version
def _get_collective_latest_file(self, filename):
"""
collective_dirmap_d.addCallback(highest_version)
return collective_dirmap_d
- def _append_to_batch(self, name, file_node, metadata):
- if self._download_scan_batch.has_key(name):
- self._download_scan_batch[name] += [(file_node, metadata)]
- else:
- self._download_scan_batch[name] = [(file_node, metadata)]
-
- def _scan_remote(self, nickname, dirnode):
- self._log("_scan_remote nickname %r" % (nickname,))
+ def _scan_remote_dmd(self, nickname, dirnode, scan_batch):
+ self._log("_scan_remote_dmd nickname %r" % (nickname,))
d = dirnode.list()
def scan_listing(listing_map):
for encoded_relpath_u in listing_map.keys():
local_version = self._get_local_latest(relpath_u)
remote_version = metadata.get('version', None)
self._log("%r has local version %r, remote version %r" % (relpath_u, local_version, remote_version))
+
if local_version is None or remote_version is None or local_version < remote_version:
self._log("%r added to download queue" % (relpath_u,))
- self._append_to_batch(relpath_u, file_node, metadata)
+ if scan_batch.has_key(relpath_u):
+ scan_batch[relpath_u] += [(file_node, metadata)]
+ else:
+ scan_batch[relpath_u] = [(file_node, metadata)]
+
d.addCallback(scan_listing)
- d.addBoth(self._logcb, "end of _scan_remote")
+ d.addBoth(self._logcb, "end of _scan_remote_dmd")
return d
- def _scan_remote_collective(self):
+ def _scan_remote_collective(self, scan_self=False):
self._log("_scan_remote_collective")
- self._download_scan_batch = {} # XXX
-
- if self._collective_dirnode is None:
- return
- collective_dirmap_d = self._collective_dirnode.list()
- def do_list(result):
- others = [x for x in result.keys()]
- return result, others
- collective_dirmap_d.addCallback(do_list)
- def scan_collective(result):
- d = defer.succeed(None)
- collective_dirmap, others_list = result
- for dir_name in others_list:
- d.addCallback(lambda x, dir_name=dir_name: self._scan_remote(dir_name, collective_dirmap[dir_name][0]))
- # XXX todo add errback
- return d
- collective_dirmap_d.addCallback(scan_collective)
- collective_dirmap_d.addCallback(self._filter_scan_batch)
- collective_dirmap_d.addCallback(self._add_batch_to_download_queue)
- return collective_dirmap_d
+ scan_batch = {} # path -> [(filenode, metadata)]
+
+ d = self._collective_dirnode.list()
+ def scan_collective(dirmap):
+ d2 = defer.succeed(None)
+ for dir_name in dirmap:
+ (dirnode, metadata) = dirmap[dir_name]
+ if scan_self or dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+ d2.addCallback(lambda ign, dir_name=dir_name, dirnode=dirnode:
+ self._scan_remote_dmd(dir_name, dirnode, scan_batch))
+ def _err(f, dir_name=dir_name):
+ self._log("failed to scan DMD for client %r: %s" % (dir_name, f))
+ # XXX what should we do to make this failure more visible to users?
+ d2.addErrback(_err)
+
+ return d2
+ d.addCallback(scan_collective)
+
+ def _filter_batch_to_deque(ign):
+ self._log("deque = %r, scan_batch = %r" % (self._deque, scan_batch))
+ for relpath_u in scan_batch.keys():
+ file_node, metadata = max(scan_batch[relpath_u], key=lambda x: x[1]['version'])
+
+ if self._should_download(relpath_u, metadata['version']):
+ to_dl = DownloadItem(
+ relpath_u,
+ PercentProgress(file_node.get_size()),
+ file_node,
+ metadata,
+ )
+ to_dl.set_status('queued', self._clock.seconds())
+ self._deque.append(to_dl)
+ else:
+ self._log("Excluding %r" % (relpath_u,))
+ self._call_hook(None, 'processed', async=True)
- def _add_batch_to_download_queue(self, result):
- self._log("result = %r" % (result,))
- self._log("deque = %r" % (self._deque,))
- self._deque.extend(result)
- self._log("deque after = %r" % (self._deque,))
- self._count('objects_queued', len(result))
- self._log("pending = %r" % (self._pending,))
- self._pending.update(map(lambda x: x[0], result))
- self._log("pending after = %r" % (self._pending,))
-
- def _filter_scan_batch(self, result):
- self._log("_filter_scan_batch")
- extension = [] # consider whether this should be a dict
- for relpath_u in self._download_scan_batch.keys():
- if relpath_u in self._pending:
- continue
- file_node, metadata = max(self._download_scan_batch[relpath_u], key=lambda x: x[1]['version'])
- if self._should_download(relpath_u, metadata['version']):
- extension += [(relpath_u, file_node, metadata)]
- else:
- self._count('objects_excluded')
- self._call_hook(None, 'processed')
- return extension
+ self._log("deque after = %r" % (self._deque,))
+ d.addCallback(_filter_batch_to_deque)
+ return d
def _when_queue_is_empty(self):
- d = task.deferLater(self._clock, self._turn_delay, self._scan_remote_collective)
- d.addBoth(self._logcb, "after _scan_remote_collective")
+ d = task.deferLater(self._clock, self.REMOTE_SCAN_INTERVAL, self._scan_remote_collective)
+ d.addBoth(self._logcb, "after _scan_remote_collective 1")
d.addCallback(lambda ign: self._turn_deque())
return d
def _process(self, item, now=None):
+ # Downloader
self._log("_process(%r)" % (item,))
- if now is None:
- now = time.time()
- (relpath_u, file_node, metadata) = item
- fp = self._get_filepath(relpath_u)
+ if now is None: # XXX why can we pass in now?
+ now = time.time() # self._clock.seconds()
+
+ self._log("started! %s" % (now,))
+ item.set_status('started', now)
+ fp = self._get_filepath(item.relpath_u)
abspath_u = unicode_from_filepath(fp)
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
d = defer.succeed(None)
- if relpath_u.endswith(u"/"):
- self._log("mkdir(%r)" % (abspath_u,))
- d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
- d.addCallback(lambda ign: abspath_u)
- else:
- d.addCallback(lambda ign: file_node.download_best_version())
- d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents, is_conflict=False))
def do_update_db(written_abspath_u):
- filecap = file_node.get_uri()
- last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ filecap = item.file_node.get_uri()
+ last_uploaded_uri = item.metadata.get('last_uploaded_uri', None)
last_downloaded_uri = filecap
last_downloaded_timestamp = now
written_pathinfo = get_pathinfo(written_abspath_u)
- if not written_pathinfo.exists:
+
+ if not written_pathinfo.exists and not item.metadata.get('deleted', False):
raise Exception("downloaded object %s disappeared" % quote_local_unicode_path(written_abspath_u))
- self._db.did_upload_version(relpath_u, metadata['version'], last_uploaded_uri,
- last_downloaded_uri, last_downloaded_timestamp, written_pathinfo)
+ self._db.did_upload_version(
+ item.relpath_u, item.metadata['version'], last_uploaded_uri,
+ last_downloaded_uri, last_downloaded_timestamp, written_pathinfo,
+ )
self._count('objects_downloaded')
+ item.set_status('success', self._clock.seconds())
+
def failed(f):
+ item.set_status('failure', self._clock.seconds())
self._log("download failed: %s" % (str(f),))
self._count('objects_failed')
return f
+
+ if os.path.isfile(conflict_path_u):
+ def fail(res):
+ raise ConflictError("download failed: already conflicted: %r" % (item.relpath_u,))
+ d.addCallback(fail)
+ else:
+ is_conflict = False
+ db_entry = self._db.get_db_entry(item.relpath_u)
+ dmd_last_downloaded_uri = item.metadata.get('last_downloaded_uri', None)
+ dmd_last_uploaded_uri = item.metadata.get('last_uploaded_uri', None)
+ if db_entry:
+ if dmd_last_downloaded_uri is not None and db_entry.last_downloaded_uri is not None:
+ if dmd_last_downloaded_uri != db_entry.last_downloaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif dmd_last_uploaded_uri is not None and dmd_last_uploaded_uri != db_entry.last_uploaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif self._is_upload_pending(item.relpath_u):
+ is_conflict = True
+ self._count('objects_conflicted')
+
+ if item.relpath_u.endswith(u"/"):
+ if item.metadata.get('deleted', False):
+ self._log("rmdir(%r) ignored" % (abspath_u,))
+ else:
+ self._log("mkdir(%r)" % (abspath_u,))
+ d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
+ d.addCallback(lambda ign: abspath_u)
+ else:
+ if item.metadata.get('deleted', False):
+ d.addCallback(lambda ign: self._rename_deleted_file(abspath_u))
+ else:
+ d.addCallback(lambda ign: item.file_node.download_best_version(progress=item.progress))
+ d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
+ is_conflict=is_conflict))
+
d.addCallbacks(do_update_db, failed)
- def remove_from_pending(res):
- self._pending.remove(relpath_u)
- return res
- d.addBoth(remove_from_pending)
+
+ def trap_conflicts(f):
+ f.trap(ConflictError)
+ return None
+ d.addErrback(trap_conflicts)
return d