raise
+def is_new_file(pathinfo, db_entry):
+ if db_entry is None:
+ return True
+
+ if not pathinfo.exists and db_entry.size is None:
+ return False
+
+ return ((pathinfo.size, pathinfo.ctime, pathinfo.mtime) !=
+ (db_entry.size, db_entry.ctime, db_entry.mtime))
+
+
class MagicFolder(service.MultiService):
name = 'magic-folder'
- def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile,
+ def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask,
pending_delay=1.0, clock=None):
precondition_abspath(local_path_u)
self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock, immediate)
self.downloader = Downloader(client, local_path_u, db, collective_dirnode,
- upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending)
+ upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending, umask)
def startService(self):
# TODO: why is this being called more than once?
last_downloaded_timestamp = now # is this correct?
- if self._db.is_new_file(pathinfo, relpath_u):
+ if is_new_file(pathinfo, db_entry):
new_version = db_entry.version + 1
else:
self._log("Not uploading %r" % (relpath_u,))
if db_entry is None:
new_version = 0
- elif self._db.is_new_file(pathinfo, relpath_u):
+ elif is_new_file(pathinfo, db_entry):
new_version = db_entry.version + 1
else:
self._log("Not uploading %r" % (relpath_u,))
# ensure parent directory exists
head, tail = os.path.split(abspath_u)
- mode = 0777 # XXX
- fileutil.make_dirs(head, mode)
- fileutil.write(replacement_path_u, file_contents)
+ old_mask = os.umask(self._umask)
+ try:
+ fileutil.make_dirs(head, (~ self._umask) & 0777)
+ fileutil.write(replacement_path_u, file_contents)
+ finally:
+ os.umask(old_mask)
+
os.utime(replacement_path_u, (now, now - self.FUDGE_SECONDS))
if is_conflict:
print "0x00 ------------ <><> is conflict; calling _rename_conflicted_file... %r %r" % (abspath_u, replacement_path_u)
REMOTE_SCAN_INTERVAL = 3 # facilitates tests
def __init__(self, client, local_path_u, db, collective_dirnode,
- upload_readonly_dircap, clock, is_upload_pending):
+ upload_readonly_dircap, clock, is_upload_pending, umask):
QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
if not IDirectoryNode.providedBy(collective_dirnode):
self._collective_dirnode = collective_dirnode
self._upload_readonly_dircap = upload_readonly_dircap
self._is_upload_pending = is_upload_pending
-
+ self._umask = umask
self._turn_delay = self.REMOTE_SCAN_INTERVAL
def start_scanning(self):
files = self._db.get_all_relpaths()
self._log("all files %s" % files)
- d = self._scan_remote_collective()
+ d = self._scan_remote_collective(scan_self=True)
d.addBoth(self._logcb, "after _scan_remote_collective 0")
self._turn_deque()
return d
d.addBoth(self._logcb, "end of _scan_remote_dmd")
return d
- def _scan_remote_collective(self):
+ def _scan_remote_collective(self, scan_self=False):
self._log("_scan_remote_collective")
scan_batch = {} # path -> [(filenode, metadata)]
d2 = defer.succeed(None)
for dir_name in dirmap:
(dirnode, metadata) = dirmap[dir_name]
- if dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+ if scan_self or dirnode.get_readonly_uri() != self._upload_readonly_dircap:
d2.addCallback(lambda ign, dir_name=dir_name, dirnode=dirnode:
self._scan_remote_dmd(dir_name, dirnode, scan_batch))
def _err(f, dir_name=dir_name):
self._deque.append( (relpath_u, file_node, metadata) )
else:
self._log("Excluding %r" % (relpath_u,))
- self._count('objects_excluded')
self._call_hook(None, 'processed')
self._log("deque after = %r" % (self._deque,))
else:
is_conflict = False
db_entry = self._db.get_db_entry(relpath_u)
+ dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
if db_entry:
- dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
- print "metadata %r" % (metadata,)
- print "<<<<--- if %r != %r" % (dmd_last_downloaded_uri, db_entry.last_downloaded_uri)
if dmd_last_downloaded_uri is not None and db_entry.last_downloaded_uri is not None:
if dmd_last_downloaded_uri != db_entry.last_downloaded_uri:
is_conflict = True
self._count('objects_conflicted')
- else:
- dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
- print ">>>> if %r != %r" % (dmd_last_uploaded_uri, db_entry.last_uploaded_uri)
- if dmd_last_uploaded_uri is not None and dmd_last_uploaded_uri != db_entry.last_uploaded_uri:
- is_conflict = True
- self._count('objects_conflicted')
- else:
- # XXX todo: mark as conflict if file is in pending upload set
- if self._is_upload_pending(relpath_u):
- is_conflict = True
- self._count('objects_conflicted')
+ elif dmd_last_uploaded_uri is not None and dmd_last_uploaded_uri != db_entry.last_uploaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif self._is_upload_pending(relpath_u):
+ is_conflict = True
+ self._count('objects_conflicted')
if relpath_u.endswith(u"/"):
if metadata.get('deleted', False):