collective_dirnode = self._client.create_node_from_uri(collective_dircap)
self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock, immediate)
- self.downloader = Downloader(client, local_path_u, db, collective_dirnode, upload_dirnode.get_readonly_uri(), clock)
+ self.downloader = Downloader(client, local_path_u, db, collective_dirnode,
+ upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending)
def startService(self):
# TODO: why is this being called more than once?
return d
+ def is_pending(self, relpath_u):
+ return relpath_u in self._pending
+
def _notify(self, opaque, path, events_mask):
self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
relpath_u = self._get_relpath(path)
# FIXME merge this with the 'isfile' case.
self._log("notified object %s disappeared (this is normal)" % quote_filepath(fp))
self._count('objects_disappeared')
- if not self._db.check_file_db_exists(relpath_u):
+
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
return None
- last_downloaded_timestamp = now
- last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ last_downloaded_timestamp = now # is this correct?
- current_version = self._db.get_local_file_version(relpath_u)
- if current_version is None:
- new_version = 0
- elif self._db.is_new_file(pathinfo, relpath_u):
- new_version = current_version + 1
+ if self._db.is_new_file(pathinfo, relpath_u):
+ new_version = db_entry.version + 1
else:
self._log("Not uploading %r" % (relpath_u,))
self._count('objects_not_uploaded')
metadata = { 'version': new_version,
'deleted': True,
'last_downloaded_timestamp': last_downloaded_timestamp }
- if last_downloaded_uri is not None:
- metadata['last_downloaded_uri'] = last_downloaded_uri
+ if db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
empty_uploadable = Data("", self._client.convergence)
d2 = self._upload_dirnode.add_file(encoded_path_u, empty_uploadable,
def _add_db_entry(filenode):
filecap = filenode.get_uri()
+ last_downloaded_uri = metadata.get('last_downloaded_uri', None)
self._db.did_upload_version(relpath_u, new_version, filecap,
- last_downloaded_uri, last_downloaded_timestamp, pathinfo)
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
self._count('files_uploaded')
d2.addCallback(_add_db_entry)
return d2
upload_d.addCallback(lambda ign: self._scan(relpath_u))
return upload_d
elif pathinfo.isfile:
- last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
+ db_entry = self._db.get_db_entry(relpath_u)
+
last_downloaded_timestamp = now
- current_version = self._db.get_local_file_version(relpath_u)
- if current_version is None:
+ if db_entry is None:
new_version = 0
elif self._db.is_new_file(pathinfo, relpath_u):
- new_version = current_version + 1
+ new_version = db_entry.version + 1
else:
self._log("Not uploading %r" % (relpath_u,))
self._count('objects_not_uploaded')
metadata = { 'version': new_version,
'last_downloaded_timestamp': last_downloaded_timestamp }
- if last_downloaded_uri is not None:
- metadata['last_downloaded_uri'] = last_downloaded_uri
+ if db_entry is not None and db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
uploadable = FileName(unicode_from_filepath(fp), self._client.convergence)
d2 = self._upload_dirnode.add_file(encoded_path_u, uploadable,
filecap = filenode.get_uri()
last_downloaded_uri = metadata.get('last_downloaded_uri', None)
self._db.did_upload_version(relpath_u, new_version, filecap,
- last_downloaded_uri, last_downloaded_timestamp, pathinfo)
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
self._count('files_uploaded')
d2.addCallback(_add_db_entry)
return d2
self._log('renaming deleted file to backup: %s' % (abspath_u,))
try:
fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup')
- except IOError:
- # XXX is this the correct error?
+ except OSError:
self._log("Already gone: '%s'" % (abspath_u,))
return abspath_u
class Downloader(QueueMixin, WriteFileMixin):
REMOTE_SCAN_INTERVAL = 3 # facilitates tests
- def __init__(self, client, local_path_u, db, collective_dirnode, upload_readonly_dircap, clock):
+ def __init__(self, client, local_path_u, db, collective_dirnode,
+ upload_readonly_dircap, clock, is_upload_pending):
QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
if not IDirectoryNode.providedBy(collective_dirnode):
self._collective_dirnode = collective_dirnode
self._upload_readonly_dircap = upload_readonly_dircap
+ self._is_upload_pending = is_upload_pending
self._turn_delay = self.REMOTE_SCAN_INTERVAL
files = self._db.get_all_relpaths()
self._log("all files %s" % files)
- d = self._scan_remote_collective()
+ d = self._scan_remote_collective(scan_self=True)
d.addBoth(self._logcb, "after _scan_remote_collective 0")
self._turn_deque()
return d
self._log("nope")
return False
self._log("yep")
- v = self._db.get_local_file_version(relpath_u)
- self._log("v = %r" % (v,))
- return (v is None or v < remote_version)
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
+ return True
+ self._log("version %r" % (db_entry.version,))
+ return (db_entry.version < remote_version)
def _get_local_latest(self, relpath_u):
"""
"""
if not self._get_filepath(relpath_u).exists():
return None
- return self._db.get_local_file_version(relpath_u)
+ db_entry = self._db.get_db_entry(relpath_u)
+ return None if db_entry is None else db_entry.version
def _get_collective_latest_file(self, filename):
"""
d.addBoth(self._logcb, "end of _scan_remote_dmd")
return d
- def _scan_remote_collective(self):
+ def _scan_remote_collective(self, scan_self=False):
self._log("_scan_remote_collective")
scan_batch = {} # path -> [(filenode, metadata)]
d2 = defer.succeed(None)
for dir_name in dirmap:
(dirnode, metadata) = dirmap[dir_name]
- if dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+ if scan_self or dirnode.get_readonly_uri() != self._upload_readonly_dircap:
d2.addCallback(lambda ign, dir_name=dir_name, dirnode=dirnode:
self._scan_remote_dmd(dir_name, dirnode, scan_batch))
def _err(f, dir_name=dir_name):
d.addCallback(fail)
else:
is_conflict = False
- if self._db.check_file_db_exists(relpath_u):
- dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
- local_last_downloaded_uri = self._db.get_last_downloaded_uri(relpath_u)
- print "metadata %r" % (metadata,)
- print "<<<<--- if %r != %r" % (dmd_last_downloaded_uri, local_last_downloaded_uri)
- if dmd_last_downloaded_uri is not None and local_last_downloaded_uri is not None:
- if dmd_last_downloaded_uri != local_last_downloaded_uri:
+ db_entry = self._db.get_db_entry(relpath_u)
+ dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ if db_entry:
+ if dmd_last_downloaded_uri is not None and db_entry.last_downloaded_uri is not None:
+ if dmd_last_downloaded_uri != db_entry.last_downloaded_uri:
is_conflict = True
self._count('objects_conflicted')
- else:
- dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
- local_last_uploaded_uri = self._db.get_last_uploaded_uri(relpath_u)
- print ">>>> if %r != %r" % (dmd_last_uploaded_uri, local_last_uploaded_uri)
- if dmd_last_uploaded_uri != local_last_uploaded_uri:
- is_conflict = True
- self._count('objects_conflicted')
+ elif dmd_last_uploaded_uri is not None and dmd_last_uploaded_uri != db_entry.last_uploaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif self._is_upload_pending(relpath_u):
+ is_conflict = True
+ self._count('objects_conflicted')
+
if relpath_u.endswith(u"/"):
if metadata.get('deleted', False):
self._log("rmdir(%r) ignored" % (abspath_u,))