import sys, os
import os.path
-import shutil
from collections import deque
import time
from allmydata.util import fileutil
from allmydata.interfaces import IDirectoryNode
from allmydata.util import log
-from allmydata.util.fileutil import precondition_abspath, get_pathinfo
-from allmydata.util.assertutil import precondition
+from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
+from allmydata.util.assertutil import precondition, _assert
from allmydata.util.deferredutil import HookMixin
from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
extend_filepath, unicode_from_filepath, unicode_segments_from, \
# TODO: allow a path rather than a cap URI.
self._upload_dirnode = self._client.create_node_from_uri(upload_dircap)
if not IDirectoryNode.providedBy(self._upload_dirnode):
- raise AssertionError("The URI in 'private/magic_folder_dircap' does not refer to a directory.")
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
if self._upload_dirnode.is_unknown() or self._upload_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/magic_folder_dircap' is not a writecap to a directory.")
+ raise AssertionError("The URI in '%s' is not a writecap to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
self._inotify = get_inotify_module()
self._notifier = self._inotify.INotify()
d = defer.succeed(None)
for child in children:
- assert isinstance(child, unicode), child
+ _assert(isinstance(child, unicode), child=child)
d.addCallback(lambda ign, child=child:
("%s/%s" % (reldir_u, child) if reldir_u else child))
def _add_pending(relpath_u):
elif self._db.is_new_file(pathinfo, relpath_u):
new_version = current_version + 1
else:
- self._log("Not uploading '{0}'".format(relpath_u))
+ self._log("Not uploading %r" % (relpath_u,))
self._count('objects_not_uploaded')
return
elif self._db.is_new_file(pathinfo, relpath_u):
new_version = current_version + 1
else:
- self._log("Not uploading '{0}'".format(relpath_u))
+ self._log("Not uploading %r" % (relpath_u,))
self._count('objects_not_uploaded')
return None
return res
def _failed(f):
self._count('objects_failed')
- self._log("%r while processing %r" % (f, relpath_u))
+ self._log("%s while processing %r" % (f, relpath_u))
return f
d.addCallbacks(_succeeded, _failed)
return d
class WriteFileMixin(object):
FUDGE_SECONDS = 10.0
+ def _get_conflicted_filename(self, abspath_u):
+ return abspath_u + u".conflict"
+
def _write_downloaded_file(self, abspath_u, file_contents, is_conflict=False, now=None):
self._log("_write_downloaded_file(%r, <%d bytes>, is_conflict=%r, now=%r)"
% (abspath_u, len(file_contents), is_conflict, now))
fileutil.write(replacement_path_u, file_contents)
os.utime(replacement_path_u, (now, now - self.FUDGE_SECONDS))
if is_conflict:
+ print "0x00 ------------ <><> is conflict; calling _rename_conflicted_file... %r %r" % (abspath_u, replacement_path_u)
return self._rename_conflicted_file(abspath_u, replacement_path_u)
else:
try:
def _rename_conflicted_file(self, abspath_u, replacement_path_u):
self._log("_rename_conflicted_file(%r, %r)" % (abspath_u, replacement_path_u))
- conflict_path_u = abspath_u + u".conflict"
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+ print "XXX rename %r %r" % (replacement_path_u, conflict_path_u)
+ if os.path.isfile(replacement_path_u):
+ print "%r exists" % (replacement_path_u,)
+ if os.path.isfile(conflict_path_u):
+ print "%r exists" % (conflict_path_u,)
+
fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
return conflict_path_u
+ def _rename_deleted_file(self, abspath_u):
+ self._log('renaming deleted file to backup: %s' % (abspath_u,))
+ try:
+ fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup')
+ except IOError:
+ # XXX is this the correct error?
+ self._log("Already gone: '%s'" % (abspath_u,))
+ return abspath_u
+
class Downloader(QueueMixin, WriteFileMixin):
REMOTE_SCAN_INTERVAL = 3 # facilitates tests
self._collective_dirnode = self._client.create_node_from_uri(collective_dircap)
if not IDirectoryNode.providedBy(self._collective_dirnode):
- raise AssertionError("The URI in 'private/collective_dircap' does not refer to a directory.")
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'collective_dircap'))
if self._collective_dirnode.is_unknown() or not self._collective_dirnode.is_readonly():
- raise AssertionError("The URI in 'private/collective_dircap' is not a readonly cap to a directory.")
+ raise AssertionError("The URI in '%s' is not a readonly cap to a directory."
+ % os.path.join('private', 'collective_dircap'))
self._turn_delay = self.REMOTE_SCAN_INTERVAL
self._download_scan_batch = {} # path -> [(filenode, metadata)]
if self._should_download(relpath_u, metadata['version']):
extension += [(relpath_u, file_node, metadata)]
else:
- self._log("Excluding '{0}'".format(relpath_u))
+ self._log("Excluding %r" % (relpath_u,))
self._count('objects_excluded')
self._call_hook(None, 'processed')
return extension
(relpath_u, file_node, metadata) = item
fp = self._get_filepath(relpath_u)
abspath_u = unicode_from_filepath(fp)
-
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
d = defer.succeed(None)
- if relpath_u.endswith(u"/"):
- self._log("mkdir(%r)" % (abspath_u,))
- d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
- d.addCallback(lambda ign: abspath_u)
- else:
- d.addCallback(lambda ign: file_node.download_best_version())
- if metadata.get('deleted', False):
- d.addCallback(lambda result: self._unlink_deleted_file(abspath_u, result))
- else:
- d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents, is_conflict=False))
def do_update_db(written_abspath_u):
filecap = file_node.get_uri()
last_downloaded_uri = filecap
last_downloaded_timestamp = now
written_pathinfo = get_pathinfo(written_abspath_u)
+
if not written_pathinfo.exists and not metadata.get('deleted', False):
raise Exception("downloaded object %s disappeared" % quote_local_unicode_path(written_abspath_u))
self._log("download failed: %s" % (str(f),))
self._count('objects_failed')
return f
+
+ if os.path.isfile(conflict_path_u):
+ def fail(res):
+ raise ConflictError("download failed: already conflicted: %r" % (relpath_u,))
+ d.addCallback(fail)
+ else:
+ is_conflict = False
+ if self._db.check_file_db_exists(relpath_u):
+ dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ local_last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ print "metadata %r" % (metadata,)
+ print "<<<<--- if %r != %r" % (dmd_last_downloaded_uri, local_last_downloaded_uri)
+ if dmd_last_downloaded_uri is not None and local_last_downloaded_uri is not None:
+ if dmd_last_downloaded_uri != local_last_downloaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+
+ #dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ #local_last_uploaded_uri = ...
+
+ if relpath_u.endswith(u"/"):
+ if metadata.get('deleted', False):
+ self._log("rmdir(%r) ignored" % (abspath_u,))
+ else:
+ self._log("mkdir(%r)" % (abspath_u,))
+ d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
+ d.addCallback(lambda ign: abspath_u)
+ else:
+ if metadata.get('deleted', False):
+ d.addCallback(lambda ign: self._rename_deleted_file(abspath_u))
+ else:
+ d.addCallback(lambda ign: file_node.download_best_version())
+ d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
+ is_conflict=is_conflict))
+
d.addCallbacks(do_update_db, failed)
+
def remove_from_pending(res):
self._pending.remove(relpath_u)
return res
d.addBoth(remove_from_pending)
+ def trap_conflicts(f):
+ f.trap(ConflictError)
+ return None
+ d.addErrback(trap_conflicts)
return d
-
- def _unlink_deleted_file(self, abspath_u, result):
- try:
- self._log('unlinking: %s' % (abspath_u,))
- shutil.move(abspath_u, abspath_u + '.backup')
- except IOError:
- self._log("Already gone: '%s'" % (abspath_u,))
- return abspath_u