import sys, os
import os.path
+import shutil
from collections import deque
import time
from allmydata.util import fileutil
from allmydata.interfaces import IDirectoryNode
from allmydata.util import log
-from allmydata.util.fileutil import precondition_abspath, get_pathinfo
+from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
from allmydata.util.assertutil import precondition
from allmydata.util.deferredutil import HookMixin
from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
self._log("%s += %r" % (counter_name, delta))
self._client.stats_provider.count(ctr, delta)
+ def _logcb(self, res, msg):
+ self._log("%s: %r" % (msg, res))
+ return res
+
def _log(self, msg):
s = "Magic Folder %s %s: %s" % (quote_output(self._client.nickname), self._name, msg)
self._client.log(s)
self._clock.callLater(0, self._turn_deque)
def _turn_deque(self):
+ self._log("_turn_deque")
if self._stopped:
+ self._log("stopped")
return
try:
item = self._deque.pop()
+ self._log("popped %r" % (item,))
self._count('objects_queued', -1)
except IndexError:
self._log("deque is now empty")
if hasattr(self._notifier, 'set_pending_delay'):
self._notifier.set_pending_delay(pending_delay)
- # We don't watch for IN_CREATE, because that would cause us to read and upload a
- # possibly-incomplete file before the application has closed it. There should always
- # be an IN_CLOSE_WRITE after an IN_CREATE (I think).
# TODO: what about IN_MOVE_SELF, IN_MOVED_FROM, or IN_UNMOUNT?
#
- self.mask = ( self._inotify.IN_CLOSE_WRITE
+ self.mask = ( self._inotify.IN_CREATE
+ | self._inotify.IN_CLOSE_WRITE
| self._inotify.IN_MOVED_TO
| self._inotify.IN_MOVED_FROM
| self._inotify.IN_DELETE
def _notify(self, opaque, path, events_mask):
self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
+
+ # We filter out IN_CREATE events not associated with a directory.
+ # Acting on IN_CREATE for files could cause us to read and upload
+ # a possibly-incomplete file before the application has closed it.
+ # There should always be an IN_CLOSE_WRITE after an IN_CREATE, I think.
+ # It isn't possible to avoid watching for IN_CREATE at all, because
+ # it is the only event notified for a directory creation.
+
+ if ((events_mask & self._inotify.IN_CREATE) != 0 and
+ (events_mask & self._inotify.IN_ISDIR) == 0):
+ self._log("ignoring inotify event for creation of file %r\n" % (path,))
+ return
+
relpath_u = self._get_relpath(path)
self._append_to_deque(relpath_u)
current_version = self._db.get_local_file_version(relpath_u)
if current_version is None:
new_version = 0
- else:
+ elif self._db.is_new_file(pathinfo, relpath_u):
new_version = current_version + 1
+ else:
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
+ return
metadata = { 'version': new_version,
'deleted': True,
elif self._db.is_new_file(pathinfo, relpath_u):
new_version = current_version + 1
else:
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
return None
metadata = { 'version': new_version,
class WriteFileMixin(object):
FUDGE_SECONDS = 10.0
+ def _get_conflicted_filename(self, abspath_u):
+ return abspath_u + u".conflict"
+
def _write_downloaded_file(self, abspath_u, file_contents, is_conflict=False, now=None):
self._log("_write_downloaded_file(%r, <%d bytes>, is_conflict=%r, now=%r)"
% (abspath_u, len(file_contents), is_conflict, now))
fileutil.write(replacement_path_u, file_contents)
os.utime(replacement_path_u, (now, now - self.FUDGE_SECONDS))
if is_conflict:
+ print "0x00 ------------ <><> is conflict; calling _rename_conflicted_file... %r %r" % (abspath_u, replacement_path_u)
return self._rename_conflicted_file(abspath_u, replacement_path_u)
else:
try:
def _rename_conflicted_file(self, abspath_u, replacement_path_u):
self._log("_rename_conflicted_file(%r, %r)" % (abspath_u, replacement_path_u))
- conflict_path_u = abspath_u + u".conflict"
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+ print "XXX rename %r %r" % (replacement_path_u, conflict_path_u)
+ if os.path.isfile(replacement_path_u):
+ print "%r exists" % (replacement_path_u,)
+ if os.path.isfile(conflict_path_u):
+ print "%r exists" % (conflict_path_u,)
+
fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
return conflict_path_u
We check the remote metadata version against our magic-folder db version number;
latest version wins.
"""
+ self._log("_should_download(%r, %r)" % (relpath_u, remote_version))
if magicpath.should_ignore_file(relpath_u):
+ self._log("nope")
return False
+ self._log("yep")
v = self._db.get_local_file_version(relpath_u)
+ self._log("v = %r" % (v,))
return (v is None or v < remote_version)
def _get_local_latest(self, relpath_u):
self._log("%r added to download queue" % (relpath_u,))
self._append_to_batch(relpath_u, file_node, metadata)
d.addCallback(scan_listing)
+ d.addBoth(self._logcb, "end of _scan_remote")
return d
def _scan_remote_collective(self):
self._log("pending after = %r" % (self._pending,))
def _filter_scan_batch(self, result):
+ self._log("_filter_scan_batch")
extension = [] # consider whether this should be a dict
for relpath_u in self._download_scan_batch.keys():
if relpath_u in self._pending:
file_node, metadata = max(self._download_scan_batch[relpath_u], key=lambda x: x[1]['version'])
if self._should_download(relpath_u, metadata['version']):
extension += [(relpath_u, file_node, metadata)]
+ else:
+ self._log("Excluding %r" % (relpath_u,))
+ self._count('objects_excluded')
+ self._call_hook(None, 'processed')
return extension
def _when_queue_is_empty(self):
d = task.deferLater(self._clock, self._turn_delay, self._scan_remote_collective)
+ d.addBoth(self._logcb, "after _scan_remote_collective")
d.addCallback(lambda ign: self._turn_deque())
return d
if now is None:
now = time.time()
(relpath_u, file_node, metadata) = item
- d = file_node.download_best_version()
- def succeeded(res):
- fp = self._get_filepath(relpath_u)
- abspath_u = unicode_from_filepath(fp)
- d2 = defer.succeed(res)
- d2.addCallback(lambda result: self._write_downloaded_file(abspath_u, result, is_conflict=False))
- def do_update_db(written_abspath_u):
- filecap = file_node.get_uri()
- last_uploaded_uri = metadata.get('last_uploaded_uri', None)
- last_downloaded_uri = filecap
- last_downloaded_timestamp = now
- written_pathinfo = get_pathinfo(written_abspath_u)
- if not written_pathinfo.exists:
- raise Exception("downloaded file %s disappeared" % quote_local_unicode_path(written_abspath_u))
-
- self._db.did_upload_version(relpath_u, metadata['version'], last_uploaded_uri,
- last_downloaded_uri, last_downloaded_timestamp, written_pathinfo)
- d2.addCallback(do_update_db)
- # XXX handle failure here with addErrback...
+ fp = self._get_filepath(relpath_u)
+ abspath_u = unicode_from_filepath(fp)
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+ d = defer.succeed(None)
+
+ def do_update_db(written_abspath_u):
+ filecap = file_node.get_uri()
+ last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ last_downloaded_uri = filecap
+ last_downloaded_timestamp = now
+ written_pathinfo = get_pathinfo(written_abspath_u)
+
+ if not written_pathinfo.exists and not metadata.get('deleted', False):
+ raise Exception("downloaded object %s disappeared" % quote_local_unicode_path(written_abspath_u))
+
+ self._db.did_upload_version(relpath_u, metadata['version'], last_uploaded_uri,
+ last_downloaded_uri, last_downloaded_timestamp, written_pathinfo)
self._count('objects_downloaded')
- return d2
def failed(f):
self._log("download failed: %s" % (str(f),))
- self._count('objects_download_failed')
+ self._count('objects_failed')
return f
- d.addCallbacks(succeeded, failed)
+
+ if os.path.isfile(conflict_path_u):
+ def fail(res):
+ raise ConflictError("download failed: already conflicted: %r" % (relpath_u,))
+ d.addCallback(fail)
+ else:
+ is_conflict = False
+ if self._db.check_file_db_exists(relpath_u):
+ dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ local_last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ print "metadata %r" % (metadata,)
+ print "<<<<--- if %r != %r" % (dmd_last_downloaded_uri, local_last_downloaded_uri)
+ if dmd_last_downloaded_uri is not None and dmd_last_downloaded_uri != local_last_downloaded_uri:
+ is_conflict = True
+ #dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ #local_last_uploaded_uri = ...
+
+ if relpath_u.endswith(u"/"):
+ self._log("mkdir(%r)" % (abspath_u,))
+ d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
+ d.addCallback(lambda ign: abspath_u)
+ else:
+ d.addCallback(lambda ign: file_node.download_best_version())
+ if metadata.get('deleted', False):
+ d.addCallback(lambda result: self._unlink_deleted_file(abspath_u, result))
+ else:
+ d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
+ is_conflict=is_conflict))
+
+ d.addCallbacks(do_update_db, failed)
+
def remove_from_pending(res):
self._pending.remove(relpath_u)
return res
d.addBoth(remove_from_pending)
+ def trap_conflicts(f):
+ f.trap(ConflictError)
+ return None
+ d.addErrback(trap_conflicts)
return d
+
+ def _unlink_deleted_file(self, abspath_u, result):
+ try:
+ self._log('unlinking: %s' % (abspath_u,))
+ shutil.move(abspath_u, abspath_u + '.backup')
+ except IOError:
+ self._log("Already gone: '%s'" % (abspath_u,))
+ return abspath_u