sudo umount '$(TMPDIR)'
rmdir '$(TMPDIR)'
+.PHONY: smoketest
+smoketest:
+ -python ./src/allmydata/test/check_magicfolder_smoke.py kill
+ -rm -rf smoke_magicfolder/
+ python ./src/allmydata/test/check_magicfolder_smoke.py
# code coverage: install the "coverage" package from PyPI, do "make test-coverage" to
# do a unit test run with coverage-gathering enabled, then use "make coverage-output" to
}
def __init__(self, basedir="."):
+ #print "Client.__init__(%r)" % (basedir,)
node.Node.__init__(self, basedir)
self.connected_enough_d = defer.Deferred()
self.started_timestamp = time.time()
# ControlServer and Helper are attached after Tub startup
self.init_ftp_server()
self.init_sftp_server()
- self.init_drop_uploader()
+ self.init_magic_folder()
# If the node sees an exit_trigger file, it will poll every second to see
# whether the file still exists, and what its mtime is. If the file does not
sftp_portstr, pubkey_file, privkey_file)
s.setServiceParent(self)
- def init_drop_uploader(self):
+ def init_magic_folder(self):
+ #print "init_magic_folder"
if self.get_config("drop_upload", "enabled", False, boolean=True):
- if self.get_config("drop_upload", "upload.dircap", None):
- raise OldConfigOptionError("The [drop_upload]upload.dircap option is no longer supported; please "
- "put the cap in a 'private/drop_upload_dircap' file, and delete this option.")
-
- upload_dircap = self.get_or_create_private_config("drop_upload_dircap")
- local_dir_utf8 = self.get_config("drop_upload", "local.directory")
-
- try:
- from allmydata.frontends import drop_upload
- s = drop_upload.DropUploader(self, upload_dircap, local_dir_utf8)
- s.setServiceParent(self)
- s.startService()
-
- # start processing the upload queue when we've connected to enough servers
- self.connected_enough_d.addCallback(s.ready)
- except Exception, e:
- self.log("couldn't start drop-uploader: %r", args=(e,))
+ raise OldConfigOptionError("The [drop_upload] section must be renamed to [magic_folder].\n"
+ "See docs/frontends/magic-folder.rst for more information.")
+
+ if self.get_config("magic_folder", "enabled", False, boolean=True):
+ #print "magic folder enabled"
+ upload_dircap = self.get_private_config("magic_folder_dircap")
+ collective_dircap = self.get_private_config("collective_dircap")
+
+ local_dir_config = self.get_config("magic_folder", "local.directory").decode("utf-8")
+ local_dir = abspath_expanduser_unicode(local_dir_config, base=self.basedir)
+
+ dbfile = os.path.join(self.basedir, "private", "magicfolderdb.sqlite")
+ dbfile = abspath_expanduser_unicode(dbfile)
+
+ from allmydata.frontends import magic_folder
+ umask = self.get_config("magic_folder", "download.umask", 0077)
+ s = magic_folder.MagicFolder(self, upload_dircap, collective_dircap, local_dir, dbfile, umask)
+ s.setServiceParent(self)
+ s.startService()
+
+ # start processing the upload queue when we've connected to enough servers
+ self.connected_enough_d.addCallback(lambda ign: s.ready())
def _check_exit_trigger(self, exit_trigger_file):
if os.path.exists(exit_trigger_file):
+++ /dev/null
-
-import sys
-
-from twisted.internet import defer
-from twisted.python.filepath import FilePath
-from twisted.application import service
-from foolscap.api import eventually
-
-from allmydata.interfaces import IDirectoryNode
-
-from allmydata.util.encodingutil import quote_output, get_filesystem_encoding
-from allmydata.util.fileutil import abspath_expanduser_unicode
-from allmydata.immutable.upload import FileName
-
-
-class DropUploader(service.MultiService):
- name = 'drop-upload'
-
- def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
- service.MultiService.__init__(self)
-
- try:
- local_dir_u = abspath_expanduser_unicode(local_dir_utf8.decode('utf-8'))
- if sys.platform == "win32":
- local_dir = local_dir_u
- else:
- local_dir = local_dir_u.encode(get_filesystem_encoding())
- except (UnicodeEncodeError, UnicodeDecodeError):
- raise AssertionError("The '[drop_upload] local.directory' parameter %s was not valid UTF-8 or "
- "could not be represented in the filesystem encoding."
- % quote_output(local_dir_utf8))
-
- self._client = client
- self._stats_provider = client.stats_provider
- self._convergence = client.convergence
- self._local_path = FilePath(local_dir)
-
- self.is_upload_ready = False
-
- if inotify is None:
- from twisted.internet import inotify
- self._inotify = inotify
-
- if not self._local_path.exists():
- raise AssertionError("The '[drop_upload] local.directory' parameter was %s but there is no directory at that location." % quote_output(local_dir_u))
- if not self._local_path.isdir():
- raise AssertionError("The '[drop_upload] local.directory' parameter was %s but the thing at that location is not a directory." % quote_output(local_dir_u))
-
- # TODO: allow a path rather than a cap URI.
- self._parent = self._client.create_node_from_uri(upload_dircap)
- if not IDirectoryNode.providedBy(self._parent):
- raise AssertionError("The URI in 'private/drop_upload_dircap' does not refer to a directory.")
- if self._parent.is_unknown() or self._parent.is_readonly():
- raise AssertionError("The URI in 'private/drop_upload_dircap' is not a writecap to a directory.")
-
- self._uploaded_callback = lambda ign: None
-
- self._notifier = inotify.INotify()
-
- # We don't watch for IN_CREATE, because that would cause us to read and upload a
- # possibly-incomplete file before the application has closed it. There should always
- # be an IN_CLOSE_WRITE after an IN_CREATE (I think).
- # TODO: what about IN_MOVE_SELF or IN_UNMOUNT?
- mask = inotify.IN_CLOSE_WRITE | inotify.IN_MOVED_TO | inotify.IN_ONLYDIR
- self._notifier.watch(self._local_path, mask=mask, callbacks=[self._notify])
-
- def startService(self):
- service.MultiService.startService(self)
- d = self._notifier.startReading()
- self._stats_provider.count('drop_upload.dirs_monitored', 1)
- return d
-
- def upload_ready(self):
- """upload_ready is used to signal us to start
- processing the upload items...
- """
- self.is_upload_ready = True
-
- def _notify(self, opaque, path, events_mask):
- self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
-
- self._stats_provider.count('drop_upload.files_queued', 1)
- eventually(self._process, opaque, path, events_mask)
-
- def _process(self, opaque, path, events_mask):
- d = defer.succeed(None)
-
- # FIXME: if this already exists as a mutable file, we replace the directory entry,
- # but we should probably modify the file (as the SFTP frontend does).
- def _add_file(ign):
- name = path.basename()
- # on Windows the name is already Unicode
- if not isinstance(name, unicode):
- name = name.decode(get_filesystem_encoding())
-
- u = FileName(path.path, self._convergence)
- return self._parent.add_file(name, u)
- d.addCallback(_add_file)
-
- def _succeeded(ign):
- self._stats_provider.count('drop_upload.files_queued', -1)
- self._stats_provider.count('drop_upload.files_uploaded', 1)
- def _failed(f):
- self._stats_provider.count('drop_upload.files_queued', -1)
- if path.exists():
- self._log("drop-upload: %r failed to upload due to %r" % (path.path, f))
- self._stats_provider.count('drop_upload.files_failed', 1)
- return f
- else:
- self._log("drop-upload: notified file %r disappeared "
- "(this is normal for temporary files): %r" % (path.path, f))
- self._stats_provider.count('drop_upload.files_disappeared', 1)
- return None
- d.addCallbacks(_succeeded, _failed)
- d.addBoth(self._uploaded_callback)
- return d
-
- def set_uploaded_callback(self, callback):
- """This sets a function that will be called after a file has been uploaded."""
- self._uploaded_callback = callback
-
- def finish(self, for_tests=False):
- self._notifier.stopReading()
- self._stats_provider.count('drop_upload.dirs_monitored', -1)
- if for_tests and hasattr(self._notifier, 'wait_until_stopped'):
- return self._notifier.wait_until_stopped()
- else:
- return defer.succeed(None)
-
- def _log(self, msg):
- self._client.log(msg)
- #open("events", "ab+").write(msg)
--- /dev/null
+
+import sys, os
+import os.path
+from collections import deque
+import time
+
+from twisted.internet import defer, reactor, task
+from twisted.python.failure import Failure
+from twisted.python import runtime
+from twisted.application import service
+
+from allmydata.util import fileutil
+from allmydata.interfaces import IDirectoryNode
+from allmydata.util import log
+from allmydata.util.fileutil import precondition_abspath, get_pathinfo, ConflictError
+from allmydata.util.assertutil import precondition, _assert
+from allmydata.util.deferredutil import HookMixin
+from allmydata.util.encodingutil import listdir_filepath, to_filepath, \
+ extend_filepath, unicode_from_filepath, unicode_segments_from, \
+ quote_filepath, quote_local_unicode_path, quote_output, FilenameEncodingError
+from allmydata.immutable.upload import FileName, Data
+from allmydata import magicfolderdb, magicpath
+
+
+IN_EXCL_UNLINK = 0x04000000L
+
+def get_inotify_module():
+ try:
+ if sys.platform == "win32":
+ from allmydata.windows import inotify
+ elif runtime.platform.supportsINotify():
+ from twisted.internet import inotify
+ else:
+ raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n"
+ "This currently requires Linux or Windows.")
+ return inotify
+ except (ImportError, AttributeError) as e:
+ log.msg(e)
+ if sys.platform == "win32":
+ raise NotImplementedError("filesystem notification needed for Magic Folder is not supported.\n"
+ "Windows support requires at least Vista, and has only been tested on Windows 7.")
+ raise
+
+
+def is_new_file(pathinfo, db_entry):
+ if db_entry is None:
+ return True
+
+ if not pathinfo.exists and db_entry.size is None:
+ return False
+
+ return ((pathinfo.size, pathinfo.ctime, pathinfo.mtime) !=
+ (db_entry.size, db_entry.ctime, db_entry.mtime))
+
+
+class MagicFolder(service.MultiService):
+ name = 'magic-folder'
+
+ def __init__(self, client, upload_dircap, collective_dircap, local_path_u, dbfile, umask,
+ pending_delay=1.0, clock=None):
+ precondition_abspath(local_path_u)
+
+ service.MultiService.__init__(self)
+
+ immediate = clock is not None
+ clock = clock or reactor
+ db = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1))
+ if db is None:
+ return Failure(Exception('ERROR: Unable to load magic folder db.'))
+
+ # for tests
+ self._client = client
+ self._db = db
+
+ upload_dirnode = self._client.create_node_from_uri(upload_dircap)
+ collective_dirnode = self._client.create_node_from_uri(collective_dircap)
+
+ self.uploader = Uploader(client, local_path_u, db, upload_dirnode, pending_delay, clock, immediate)
+ self.downloader = Downloader(client, local_path_u, db, collective_dirnode,
+ upload_dirnode.get_readonly_uri(), clock, self.uploader.is_pending, umask)
+
+ def startService(self):
+ # TODO: why is this being called more than once?
+ if self.running:
+ return defer.succeed(None)
+ print "%r.startService" % (self,)
+ service.MultiService.startService(self)
+ return self.uploader.start_monitoring()
+
+ def ready(self):
+ """ready is used to signal us to start
+ processing the upload and download items...
+ """
+ d = self.uploader.start_scanning()
+ d2 = self.downloader.start_scanning()
+ d.addCallback(lambda ign: d2)
+ return d
+
+ def finish(self):
+ print "finish"
+ d = self.uploader.stop()
+ d2 = self.downloader.stop()
+ d.addCallback(lambda ign: d2)
+ return d
+
+ def remove_service(self):
+ return service.MultiService.disownServiceParent(self)
+
+
+class QueueMixin(HookMixin):
+ def __init__(self, client, local_path_u, db, name, clock):
+ self._client = client
+ self._local_path_u = local_path_u
+ self._local_filepath = to_filepath(local_path_u)
+ self._db = db
+ self._name = name
+ self._clock = clock
+ self._hooks = {'processed': None, 'started': None}
+ self.started_d = self.set_hook('started')
+
+ if not self._local_filepath.exists():
+ raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
+ "but there is no directory at that location."
+ % quote_local_unicode_path(self._local_path_u))
+ if not self._local_filepath.isdir():
+ raise AssertionError("The '[magic_folder] local.directory' parameter was %s "
+ "but the thing at that location is not a directory."
+ % quote_local_unicode_path(self._local_path_u))
+
+ self._deque = deque()
+ self._lazy_tail = defer.succeed(None)
+ self._stopped = False
+ self._turn_delay = 0
+
+ def _get_filepath(self, relpath_u):
+ self._log("_get_filepath(%r)" % (relpath_u,))
+ return extend_filepath(self._local_filepath, relpath_u.split(u"/"))
+
+ def _get_relpath(self, filepath):
+ self._log("_get_relpath(%r)" % (filepath,))
+ segments = unicode_segments_from(filepath, self._local_filepath)
+ self._log("segments = %r" % (segments,))
+ return u"/".join(segments)
+
+ def _count(self, counter_name, delta=1):
+ ctr = 'magic_folder.%s.%s' % (self._name, counter_name)
+ self._log("%s += %r" % (counter_name, delta))
+ self._client.stats_provider.count(ctr, delta)
+
+ def _logcb(self, res, msg):
+ self._log("%s: %r" % (msg, res))
+ return res
+
+ def _log(self, msg):
+ s = "Magic Folder %s %s: %s" % (quote_output(self._client.nickname), self._name, msg)
+ self._client.log(s)
+ print s
+ #open("events", "ab+").write(msg)
+
+ def _turn_deque(self):
+ self._log("_turn_deque")
+ if self._stopped:
+ self._log("stopped")
+ return
+ try:
+ item = self._deque.pop()
+ self._log("popped %r" % (item,))
+ self._count('objects_queued', -1)
+ except IndexError:
+ self._log("deque is now empty")
+ self._lazy_tail.addCallback(lambda ign: self._when_queue_is_empty())
+ else:
+ self._lazy_tail.addCallback(lambda ign: self._process(item))
+ self._lazy_tail.addBoth(self._call_hook, 'processed')
+ self._lazy_tail.addErrback(log.err)
+ self._lazy_tail.addCallback(lambda ign: task.deferLater(self._clock, self._turn_delay, self._turn_deque))
+
+
+class Uploader(QueueMixin):
+ def __init__(self, client, local_path_u, db, upload_dirnode, pending_delay, clock,
+ immediate=False):
+ QueueMixin.__init__(self, client, local_path_u, db, 'uploader', clock)
+
+ self.is_ready = False
+ self._immediate = immediate
+
+ if not IDirectoryNode.providedBy(upload_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+ if upload_dirnode.is_unknown() or upload_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a writecap to a directory."
+ % os.path.join('private', 'magic_folder_dircap'))
+
+ self._upload_dirnode = upload_dirnode
+ self._inotify = get_inotify_module()
+ self._notifier = self._inotify.INotify()
+ self._pending = set()
+
+ if hasattr(self._notifier, 'set_pending_delay'):
+ self._notifier.set_pending_delay(pending_delay)
+
+ # TODO: what about IN_MOVE_SELF and IN_UNMOUNT?
+ #
+ self.mask = ( self._inotify.IN_CREATE
+ | self._inotify.IN_CLOSE_WRITE
+ | self._inotify.IN_MOVED_TO
+ | self._inotify.IN_MOVED_FROM
+ | self._inotify.IN_DELETE
+ | self._inotify.IN_ONLYDIR
+ | IN_EXCL_UNLINK
+ )
+ self._notifier.watch(self._local_filepath, mask=self.mask, callbacks=[self._notify],
+ recursive=True)
+
+ def start_monitoring(self):
+ self._log("start_monitoring")
+ d = defer.succeed(None)
+ d.addCallback(lambda ign: self._notifier.startReading())
+ d.addCallback(lambda ign: self._count('dirs_monitored'))
+ d.addBoth(self._call_hook, 'started')
+ return d
+
+ def stop(self):
+ self._log("stop")
+ self._notifier.stopReading()
+ self._count('dirs_monitored', -1)
+ if hasattr(self._notifier, 'wait_until_stopped'):
+ d = self._notifier.wait_until_stopped()
+ else:
+ d = defer.succeed(None)
+ d.addCallback(lambda ign: self._lazy_tail)
+ return d
+
+ def start_scanning(self):
+ self._log("start_scanning")
+ self.is_ready = True
+ self._pending = self._db.get_all_relpaths()
+ self._log("all_files %r" % (self._pending))
+ d = self._scan(u"")
+ def _add_pending(ign):
+ # This adds all of the files that were in the db but not already processed
+ # (normally because they have been deleted on disk).
+ self._log("adding %r" % (self._pending))
+ self._deque.extend(self._pending)
+ d.addCallback(_add_pending)
+ d.addCallback(lambda ign: self._turn_deque())
+ return d
+
+ def _scan(self, reldir_u):
+ self._log("scan %r" % (reldir_u,))
+ fp = self._get_filepath(reldir_u)
+ try:
+ children = listdir_filepath(fp)
+ except EnvironmentError:
+ raise Exception("WARNING: magic folder: permission denied on directory %s"
+ % quote_filepath(fp))
+ except FilenameEncodingError:
+ raise Exception("WARNING: magic folder: could not list directory %s due to a filename encoding error"
+ % quote_filepath(fp))
+
+ d = defer.succeed(None)
+ for child in children:
+ _assert(isinstance(child, unicode), child=child)
+ d.addCallback(lambda ign, child=child:
+ ("%s/%s" % (reldir_u, child) if reldir_u else child))
+ def _add_pending(relpath_u):
+ if magicpath.should_ignore_file(relpath_u):
+ return None
+
+ self._pending.add(relpath_u)
+ return relpath_u
+ d.addCallback(_add_pending)
+ # This call to _process doesn't go through the deque, and probably should.
+ d.addCallback(self._process)
+ d.addBoth(self._call_hook, 'processed')
+ d.addErrback(log.err)
+
+ return d
+
+ def is_pending(self, relpath_u):
+ return relpath_u in self._pending
+
+ def _notify(self, opaque, path, events_mask):
+ self._log("inotify event %r, %r, %r\n" % (opaque, path, ', '.join(self._inotify.humanReadableMask(events_mask))))
+ relpath_u = self._get_relpath(path)
+
+ # We filter out IN_CREATE events not associated with a directory.
+ # Acting on IN_CREATE for files could cause us to read and upload
+ # a possibly-incomplete file before the application has closed it.
+ # There should always be an IN_CLOSE_WRITE after an IN_CREATE, I think.
+ # It isn't possible to avoid watching for IN_CREATE at all, because
+ # it is the only event notified for a directory creation.
+
+ if ((events_mask & self._inotify.IN_CREATE) != 0 and
+ (events_mask & self._inotify.IN_ISDIR) == 0):
+ self._log("ignoring event for %r (creation of non-directory)\n" % (relpath_u,))
+ return
+ if relpath_u in self._pending:
+ self._log("ignoring event for %r (already pending)" % (relpath_u,))
+ return
+ if magicpath.should_ignore_file(relpath_u):
+ self._log("ignoring event for %r (ignorable path)" % (relpath_u,))
+ return
+
+ self._log("appending %r to deque" % (relpath_u,))
+ self._deque.append(relpath_u)
+ self._pending.add(relpath_u)
+ self._count('objects_queued')
+ if self.is_ready:
+ if self._immediate: # for tests
+ self._turn_deque()
+ else:
+ self._clock.callLater(0, self._turn_deque)
+
+ def _when_queue_is_empty(self):
+ return defer.succeed(None)
+
+ def _process(self, relpath_u):
+ # Uploader
+ self._log("_process(%r)" % (relpath_u,))
+ if relpath_u is None:
+ return
+ precondition(isinstance(relpath_u, unicode), relpath_u)
+ precondition(not relpath_u.endswith(u'/'), relpath_u)
+
+ d = defer.succeed(None)
+
+ def _maybe_upload(val, now=None):
+ if now is None:
+ now = time.time()
+ fp = self._get_filepath(relpath_u)
+ pathinfo = get_pathinfo(unicode_from_filepath(fp))
+
+ self._log("about to remove %r from pending set %r" %
+ (relpath_u, self._pending))
+ self._pending.remove(relpath_u)
+ encoded_path_u = magicpath.path2magic(relpath_u)
+
+ if not pathinfo.exists:
+ # FIXME merge this with the 'isfile' case.
+ self._log("notified object %s disappeared (this is normal)" % quote_filepath(fp))
+ self._count('objects_disappeared')
+
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
+ return None
+
+ last_downloaded_timestamp = now # is this correct?
+
+ if is_new_file(pathinfo, db_entry):
+ new_version = db_entry.version + 1
+ else:
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
+ return
+
+ metadata = { 'version': new_version,
+ 'deleted': True,
+ 'last_downloaded_timestamp': last_downloaded_timestamp }
+ if db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
+
+ empty_uploadable = Data("", self._client.convergence)
+ d2 = self._upload_dirnode.add_file(encoded_path_u, empty_uploadable,
+ metadata=metadata, overwrite=True)
+
+ def _add_db_entry(filenode):
+ filecap = filenode.get_uri()
+ last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ self._db.did_upload_version(relpath_u, new_version, filecap,
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
+ self._count('files_uploaded')
+ d2.addCallback(_add_db_entry)
+ return d2
+ elif pathinfo.islink:
+ self.warn("WARNING: cannot upload symlink %s" % quote_filepath(fp))
+ return None
+ elif pathinfo.isdir:
+ if not getattr(self._notifier, 'recursive_includes_new_subdirectories', False):
+ self._notifier.watch(fp, mask=self.mask, callbacks=[self._notify], recursive=True)
+
+ uploadable = Data("", self._client.convergence)
+ encoded_path_u += magicpath.path2magic(u"/")
+ self._log("encoded_path_u = %r" % (encoded_path_u,))
+ upload_d = self._upload_dirnode.add_file(encoded_path_u, uploadable, metadata={"version":0}, overwrite=True)
+ def _succeeded(ign):
+ self._log("created subdirectory %r" % (relpath_u,))
+ self._count('directories_created')
+ def _failed(f):
+ self._log("failed to create subdirectory %r" % (relpath_u,))
+ return f
+ upload_d.addCallbacks(_succeeded, _failed)
+ upload_d.addCallback(lambda ign: self._scan(relpath_u))
+ return upload_d
+ elif pathinfo.isfile:
+ db_entry = self._db.get_db_entry(relpath_u)
+
+ last_downloaded_timestamp = now
+
+ if db_entry is None:
+ new_version = 0
+ elif is_new_file(pathinfo, db_entry):
+ new_version = db_entry.version + 1
+ else:
+ self._log("Not uploading %r" % (relpath_u,))
+ self._count('objects_not_uploaded')
+ return None
+
+ metadata = { 'version': new_version,
+ 'last_downloaded_timestamp': last_downloaded_timestamp }
+ if db_entry is not None and db_entry.last_downloaded_uri is not None:
+ metadata['last_downloaded_uri'] = db_entry.last_downloaded_uri
+
+ uploadable = FileName(unicode_from_filepath(fp), self._client.convergence)
+ d2 = self._upload_dirnode.add_file(encoded_path_u, uploadable,
+ metadata=metadata, overwrite=True)
+
+ def _add_db_entry(filenode):
+ filecap = filenode.get_uri()
+ last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ self._db.did_upload_version(relpath_u, new_version, filecap,
+ last_downloaded_uri, last_downloaded_timestamp,
+ pathinfo)
+ self._count('files_uploaded')
+ d2.addCallback(_add_db_entry)
+ return d2
+ else:
+ self.warn("WARNING: cannot process special file %s" % quote_filepath(fp))
+ return None
+
+ d.addCallback(_maybe_upload)
+
+ def _succeeded(res):
+ self._count('objects_succeeded')
+ return res
+ def _failed(f):
+ self._count('objects_failed')
+ self._log("%s while processing %r" % (f, relpath_u))
+ return f
+ d.addCallbacks(_succeeded, _failed)
+ return d
+
+ def _get_metadata(self, encoded_path_u):
+ try:
+ d = self._upload_dirnode.get_metadata_for(encoded_path_u)
+ except KeyError:
+ return Failure()
+ return d
+
+ def _get_filenode(self, encoded_path_u):
+ try:
+ d = self._upload_dirnode.get(encoded_path_u)
+ except KeyError:
+ return Failure()
+ return d
+
+
+class WriteFileMixin(object):
+ FUDGE_SECONDS = 10.0
+
+ def _get_conflicted_filename(self, abspath_u):
+ return abspath_u + u".conflict"
+
+ def _write_downloaded_file(self, abspath_u, file_contents, is_conflict=False, now=None):
+ self._log("_write_downloaded_file(%r, <%d bytes>, is_conflict=%r, now=%r)"
+ % (abspath_u, len(file_contents), is_conflict, now))
+
+ # 1. Write a temporary file, say .foo.tmp.
+ # 2. is_conflict determines whether this is an overwrite or a conflict.
+ # 3. Set the mtime of the replacement file to be T seconds before the
+ # current local time.
+ # 4. Perform a file replacement with backup filename foo.backup,
+ # replaced file foo, and replacement file .foo.tmp. If any step of
+ # this operation fails, reclassify as a conflict and stop.
+ #
+ # Returns the path of the destination file.
+
+ precondition_abspath(abspath_u)
+ replacement_path_u = abspath_u + u".tmp" # FIXME more unique
+ backup_path_u = abspath_u + u".backup"
+ if now is None:
+ now = time.time()
+
+ # ensure parent directory exists
+ head, tail = os.path.split(abspath_u)
+
+ old_mask = os.umask(self._umask)
+ try:
+ fileutil.make_dirs(head, (~ self._umask) & 0777)
+ fileutil.write(replacement_path_u, file_contents)
+ finally:
+ os.umask(old_mask)
+
+ os.utime(replacement_path_u, (now, now - self.FUDGE_SECONDS))
+ if is_conflict:
+ print "0x00 ------------ <><> is conflict; calling _rename_conflicted_file... %r %r" % (abspath_u, replacement_path_u)
+ return self._rename_conflicted_file(abspath_u, replacement_path_u)
+ else:
+ try:
+ fileutil.replace_file(abspath_u, replacement_path_u, backup_path_u)
+ return abspath_u
+ except fileutil.ConflictError:
+ return self._rename_conflicted_file(abspath_u, replacement_path_u)
+
+ def _rename_conflicted_file(self, abspath_u, replacement_path_u):
+ self._log("_rename_conflicted_file(%r, %r)" % (abspath_u, replacement_path_u))
+
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+ print "XXX rename %r %r" % (replacement_path_u, conflict_path_u)
+ if os.path.isfile(replacement_path_u):
+ print "%r exists" % (replacement_path_u,)
+ if os.path.isfile(conflict_path_u):
+ print "%r exists" % (conflict_path_u,)
+
+ fileutil.rename_no_overwrite(replacement_path_u, conflict_path_u)
+ return conflict_path_u
+
+ def _rename_deleted_file(self, abspath_u):
+ self._log('renaming deleted file to backup: %s' % (abspath_u,))
+ try:
+ fileutil.rename_no_overwrite(abspath_u, abspath_u + u'.backup')
+ except OSError:
+ self._log("Already gone: '%s'" % (abspath_u,))
+ return abspath_u
+
+
+class Downloader(QueueMixin, WriteFileMixin):
+ REMOTE_SCAN_INTERVAL = 3 # facilitates tests
+
+ def __init__(self, client, local_path_u, db, collective_dirnode,
+ upload_readonly_dircap, clock, is_upload_pending, umask):
+ QueueMixin.__init__(self, client, local_path_u, db, 'downloader', clock)
+
+ if not IDirectoryNode.providedBy(collective_dirnode):
+ raise AssertionError("The URI in '%s' does not refer to a directory."
+ % os.path.join('private', 'collective_dircap'))
+ if collective_dirnode.is_unknown() or not collective_dirnode.is_readonly():
+ raise AssertionError("The URI in '%s' is not a readonly cap to a directory."
+ % os.path.join('private', 'collective_dircap'))
+
+ self._collective_dirnode = collective_dirnode
+ self._upload_readonly_dircap = upload_readonly_dircap
+ self._is_upload_pending = is_upload_pending
+ self._umask = umask
+
+ def start_scanning(self):
+ self._log("start_scanning")
+ files = self._db.get_all_relpaths()
+ self._log("all files %s" % files)
+
+ d = self._scan_remote_collective(scan_self=True)
+ d.addBoth(self._logcb, "after _scan_remote_collective 0")
+ self._turn_deque()
+ return d
+
+ def stop(self):
+ self._stopped = True
+ d = defer.succeed(None)
+ d.addCallback(lambda ign: self._lazy_tail)
+ return d
+
+ def _should_download(self, relpath_u, remote_version):
+ """
+ _should_download returns a bool indicating whether or not a remote object should be downloaded.
+ We check the remote metadata version against our magic-folder db version number;
+ latest version wins.
+ """
+ self._log("_should_download(%r, %r)" % (relpath_u, remote_version))
+ if magicpath.should_ignore_file(relpath_u):
+ self._log("nope")
+ return False
+ self._log("yep")
+ db_entry = self._db.get_db_entry(relpath_u)
+ if db_entry is None:
+ return True
+ self._log("version %r" % (db_entry.version,))
+ return (db_entry.version < remote_version)
+
+ def _get_local_latest(self, relpath_u):
+ """
+ _get_local_latest takes a unicode path string checks to see if this file object
+ exists in our magic-folder db; if not then return None
+ else check for an entry in our magic-folder db and return the version number.
+ """
+ if not self._get_filepath(relpath_u).exists():
+ return None
+ db_entry = self._db.get_db_entry(relpath_u)
+ return None if db_entry is None else db_entry.version
+
+ def _get_collective_latest_file(self, filename):
+ """
+ _get_collective_latest_file takes a file path pointing to a file managed by
+ magic-folder and returns a deferred that fires with the two tuple containing a
+ file node and metadata for the latest version of the file located in the
+ magic-folder collective directory.
+ """
+ collective_dirmap_d = self._collective_dirnode.list()
+ def scan_collective(result):
+ list_of_deferreds = []
+ for dir_name in result.keys():
+ # XXX make sure it's a directory
+ d = defer.succeed(None)
+ d.addCallback(lambda x, dir_name=dir_name: result[dir_name][0].get_child_and_metadata(filename))
+ list_of_deferreds.append(d)
+ deferList = defer.DeferredList(list_of_deferreds, consumeErrors=True)
+ return deferList
+ collective_dirmap_d.addCallback(scan_collective)
+ def highest_version(deferredList):
+ max_version = 0
+ metadata = None
+ node = None
+ for success, result in deferredList:
+ if success:
+ if result[1]['version'] > max_version:
+ node, metadata = result
+ max_version = result[1]['version']
+ return node, metadata
+ collective_dirmap_d.addCallback(highest_version)
+ return collective_dirmap_d
+
+ def _scan_remote_dmd(self, nickname, dirnode, scan_batch):
+ self._log("_scan_remote_dmd nickname %r" % (nickname,))
+ d = dirnode.list()
+ def scan_listing(listing_map):
+ for encoded_relpath_u in listing_map.keys():
+ relpath_u = magicpath.magic2path(encoded_relpath_u)
+ self._log("found %r" % (relpath_u,))
+
+ file_node, metadata = listing_map[encoded_relpath_u]
+ local_version = self._get_local_latest(relpath_u)
+ remote_version = metadata.get('version', None)
+ self._log("%r has local version %r, remote version %r" % (relpath_u, local_version, remote_version))
+
+ if local_version is None or remote_version is None or local_version < remote_version:
+ self._log("%r added to download queue" % (relpath_u,))
+ if scan_batch.has_key(relpath_u):
+ scan_batch[relpath_u] += [(file_node, metadata)]
+ else:
+ scan_batch[relpath_u] = [(file_node, metadata)]
+
+ d.addCallback(scan_listing)
+ d.addBoth(self._logcb, "end of _scan_remote_dmd")
+ return d
+
+ def _scan_remote_collective(self, scan_self=False):
+ self._log("_scan_remote_collective")
+ scan_batch = {} # path -> [(filenode, metadata)]
+
+ d = self._collective_dirnode.list()
+ def scan_collective(dirmap):
+ d2 = defer.succeed(None)
+ for dir_name in dirmap:
+ (dirnode, metadata) = dirmap[dir_name]
+ if scan_self or dirnode.get_readonly_uri() != self._upload_readonly_dircap:
+ d2.addCallback(lambda ign, dir_name=dir_name, dirnode=dirnode:
+ self._scan_remote_dmd(dir_name, dirnode, scan_batch))
+ def _err(f, dir_name=dir_name):
+ self._log("failed to scan DMD for client %r: %s" % (dir_name, f))
+ # XXX what should we do to make this failure more visible to users?
+ d2.addErrback(_err)
+
+ return d2
+ d.addCallback(scan_collective)
+
+ def _filter_batch_to_deque(ign):
+ self._log("deque = %r, scan_batch = %r" % (self._deque, scan_batch))
+ for relpath_u in scan_batch.keys():
+ file_node, metadata = max(scan_batch[relpath_u], key=lambda x: x[1]['version'])
+
+ if self._should_download(relpath_u, metadata['version']):
+ self._deque.append( (relpath_u, file_node, metadata) )
+ else:
+ self._log("Excluding %r" % (relpath_u,))
+ self._call_hook(None, 'processed')
+
+ self._log("deque after = %r" % (self._deque,))
+ d.addCallback(_filter_batch_to_deque)
+ return d
+
+ def _when_queue_is_empty(self):
+ d = task.deferLater(self._clock, self.REMOTE_SCAN_INTERVAL, self._scan_remote_collective)
+ d.addBoth(self._logcb, "after _scan_remote_collective 1")
+ d.addCallback(lambda ign: self._turn_deque())
+ return d
+
+ def _process(self, item, now=None):
+ # Downloader
+ self._log("_process(%r)" % (item,))
+ if now is None:
+ now = time.time()
+ (relpath_u, file_node, metadata) = item
+ fp = self._get_filepath(relpath_u)
+ abspath_u = unicode_from_filepath(fp)
+ conflict_path_u = self._get_conflicted_filename(abspath_u)
+
+ d = defer.succeed(None)
+
+ def do_update_db(written_abspath_u):
+ filecap = file_node.get_uri()
+ last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ last_downloaded_uri = filecap
+ last_downloaded_timestamp = now
+ written_pathinfo = get_pathinfo(written_abspath_u)
+
+ if not written_pathinfo.exists and not metadata.get('deleted', False):
+ raise Exception("downloaded object %s disappeared" % quote_local_unicode_path(written_abspath_u))
+
+ self._db.did_upload_version(relpath_u, metadata['version'], last_uploaded_uri,
+ last_downloaded_uri, last_downloaded_timestamp, written_pathinfo)
+ self._count('objects_downloaded')
+ def failed(f):
+ self._log("download failed: %s" % (str(f),))
+ self._count('objects_failed')
+ return f
+
+ if os.path.isfile(conflict_path_u):
+ def fail(res):
+ raise ConflictError("download failed: already conflicted: %r" % (relpath_u,))
+ d.addCallback(fail)
+ else:
+ is_conflict = False
+ db_entry = self._db.get_db_entry(relpath_u)
+ dmd_last_downloaded_uri = metadata.get('last_downloaded_uri', None)
+ dmd_last_uploaded_uri = metadata.get('last_uploaded_uri', None)
+ if db_entry:
+ if dmd_last_downloaded_uri is not None and db_entry.last_downloaded_uri is not None:
+ if dmd_last_downloaded_uri != db_entry.last_downloaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif dmd_last_uploaded_uri is not None and dmd_last_uploaded_uri != db_entry.last_uploaded_uri:
+ is_conflict = True
+ self._count('objects_conflicted')
+ elif self._is_upload_pending(relpath_u):
+ is_conflict = True
+ self._count('objects_conflicted')
+
+ if relpath_u.endswith(u"/"):
+ if metadata.get('deleted', False):
+ self._log("rmdir(%r) ignored" % (abspath_u,))
+ else:
+ self._log("mkdir(%r)" % (abspath_u,))
+ d.addCallback(lambda ign: fileutil.make_dirs(abspath_u))
+ d.addCallback(lambda ign: abspath_u)
+ else:
+ if metadata.get('deleted', False):
+ d.addCallback(lambda ign: self._rename_deleted_file(abspath_u))
+ else:
+ d.addCallback(lambda ign: file_node.download_best_version())
+ d.addCallback(lambda contents: self._write_downloaded_file(abspath_u, contents,
+ is_conflict=is_conflict))
+
+ d.addCallbacks(do_update_db, failed)
+
+ def trap_conflicts(f):
+ f.trap(ConflictError)
+ return None
+ d.addErrback(trap_conflicts)
+ return d
--- /dev/null
+
+import sys
+from collections import namedtuple
+
+from allmydata.util.dbutil import get_db, DBError
+
+
+# magic-folder db schema version 1
+SCHEMA_v1 = """
+CREATE TABLE version
+(
+ version INTEGER -- contains one row, set to 1
+);
+
+CREATE TABLE local_files
+(
+ path VARCHAR(1024) PRIMARY KEY, -- UTF-8 filename relative to local magic folder dir
+ -- note that size is before mtime and ctime here, but after in function parameters
+ size INTEGER, -- ST_SIZE, or NULL if the file has been deleted
+ mtime NUMBER, -- ST_MTIME
+ ctime NUMBER, -- ST_CTIME
+ version INTEGER,
+ last_uploaded_uri VARCHAR(256) UNIQUE, -- URI:CHK:...
+ last_downloaded_uri VARCHAR(256) UNIQUE, -- URI:CHK:...
+ last_downloaded_timestamp TIMESTAMP
+);
+"""
+
+
+def get_magicfolderdb(dbfile, stderr=sys.stderr,
+ create_version=(SCHEMA_v1, 1), just_create=False):
+ # Open or create the given backupdb file. The parent directory must
+ # exist.
+ try:
+ (sqlite3, db) = get_db(dbfile, stderr, create_version,
+ just_create=just_create, dbname="magicfolderdb")
+ if create_version[1] in (1, 2):
+ return MagicFolderDB(sqlite3, db)
+ else:
+ print >>stderr, "invalid magicfolderdb schema version specified"
+ return None
+ except DBError, e:
+ print >>stderr, e
+ return None
+
+PathEntry = namedtuple('PathEntry', 'size mtime ctime version last_uploaded_uri last_downloaded_uri last_downloaded_timestamp')
+
+class MagicFolderDB(object):
+ VERSION = 1
+
+ def __init__(self, sqlite_module, connection):
+ self.sqlite_module = sqlite_module
+ self.connection = connection
+ self.cursor = connection.cursor()
+
+ def get_db_entry(self, relpath_u):
+ """
+ Retrieve the entry in the database for a given path, or return None
+ if there is no such entry.
+ """
+ c = self.cursor
+ c.execute("SELECT size, mtime, ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp"
+ " FROM local_files"
+ " WHERE path=?",
+ (relpath_u,))
+ row = self.cursor.fetchone()
+ if not row:
+ return None
+ else:
+ (size, mtime, ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp) = row
+ return PathEntry(size=size, mtime=mtime, ctime=ctime, version=version,
+ last_uploaded_uri=last_uploaded_uri,
+ last_downloaded_uri=last_downloaded_uri,
+ last_downloaded_timestamp=last_downloaded_timestamp)
+
+ def get_all_relpaths(self):
+ """
+ Retrieve a set of all relpaths of files that have had an entry in magic folder db
+ (i.e. that have been downloaded at least once).
+ """
+ self.cursor.execute("SELECT path FROM local_files")
+ rows = self.cursor.fetchall()
+ return set([r[0] for r in rows])
+
+ def did_upload_version(self, relpath_u, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, pathinfo):
+ print "%r.did_upload_version(%r, %r, %r, %r, %r, %r)" % (self, relpath_u, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, pathinfo)
+ try:
+ print "insert"
+ self.cursor.execute("INSERT INTO local_files VALUES (?,?,?,?,?,?,?,?)",
+ (relpath_u, pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp))
+ except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
+ print "err... update"
+ self.cursor.execute("UPDATE local_files"
+ " SET size=?, mtime=?, ctime=?, version=?, last_uploaded_uri=?, last_downloaded_uri=?, last_downloaded_timestamp=?"
+ " WHERE path=?",
+ (pathinfo.size, pathinfo.mtime, pathinfo.ctime, version, last_uploaded_uri, last_downloaded_uri, last_downloaded_timestamp, relpath_u))
+ self.connection.commit()
+ print "committed"
--- /dev/null
+
+import re
+import os.path
+
+from allmydata.util.assertutil import precondition, _assert
+
+def path2magic(path):
+ return re.sub(ur'[/@]', lambda m: {u'/': u'@_', u'@': u'@@'}[m.group(0)], path)
+
+def magic2path(path):
+ return re.sub(ur'@[_@]', lambda m: {u'@_': u'/', u'@@': u'@'}[m.group(0)], path)
+
+
+IGNORE_SUFFIXES = [u'.backup', u'.tmp', u'.conflicted']
+IGNORE_PREFIXES = [u'.']
+
+def should_ignore_file(path_u):
+ precondition(isinstance(path_u, unicode), path_u=path_u)
+
+ for suffix in IGNORE_SUFFIXES:
+ if path_u.endswith(suffix):
+ return True
+
+ while path_u != u"":
+ oldpath_u = path_u
+ path_u, tail_u = os.path.split(path_u)
+ if tail_u.startswith(u"."):
+ return True
+ if path_u == oldpath_u:
+ return True # the path was absolute
+ _assert(len(path_u) < len(oldpath_u), path_u=path_u, oldpath_u=oldpath_u)
+
+ return False
c.write("enabled = false\n")
c.write("\n")
- c.write("[drop_upload]\n")
- c.write("# Shall this node automatically upload files created or modified in a local directory?\n")
- c.write("enabled = false\n")
- c.write("# To specify the target of uploads, a mutable directory writecap URI must be placed\n"
- "# in 'private/drop_upload_dircap'.\n")
- c.write("local.directory = ~/drop_upload\n")
- c.write("\n")
-
c.close()
from allmydata.util import fileutil
--- /dev/null
+
+import os
+from types import NoneType
+from cStringIO import StringIO
+
+from twisted.python import usage
+
+from allmydata.util.assertutil import precondition
+
+from .common import BaseOptions, BasedirOptions, get_aliases
+from .cli import MakeDirectoryOptions, LnOptions, CreateAliasOptions
+import tahoe_mv
+from allmydata.util.encodingutil import argv_to_abspath, argv_to_unicode, to_str, \
+ quote_local_unicode_path
+from allmydata.util import fileutil
+from allmydata.util import configutil
+from allmydata import uri
+
+INVITE_SEPARATOR = "+"
+
+class CreateOptions(BasedirOptions):
+ nickname = None
+ local_dir = None
+ synopsis = "MAGIC_ALIAS: [NICKNAME LOCAL_DIR]"
+ def parseArgs(self, alias, nickname=None, local_dir=None):
+ BasedirOptions.parseArgs(self)
+ alias = argv_to_unicode(alias)
+ if not alias.endswith(u':'):
+ raise usage.UsageError("An alias must end with a ':' character.")
+ self.alias = alias[:-1]
+ self.nickname = None if nickname is None else argv_to_unicode(nickname)
+
+ # Expand the path relative to the current directory of the CLI command, not the node.
+ self.local_dir = None if local_dir is None else argv_to_abspath(local_dir, long_path=False)
+
+ if self.nickname and not self.local_dir:
+ raise usage.UsageError("If NICKNAME is specified then LOCAL_DIR must also be specified.")
+ node_url_file = os.path.join(self['node-directory'], u"node.url")
+ self['node-url'] = fileutil.read(node_url_file).strip()
+
+def _delegate_options(source_options, target_options):
+ target_options.aliases = get_aliases(source_options['node-directory'])
+ target_options["node-url"] = source_options["node-url"]
+ target_options["node-directory"] = source_options["node-directory"]
+ target_options.stdin = StringIO("")
+ target_options.stdout = StringIO()
+ target_options.stderr = StringIO()
+ return target_options
+
+def create(options):
+ precondition(isinstance(options.alias, unicode), alias=options.alias)
+ precondition(isinstance(options.nickname, (unicode, NoneType)), nickname=options.nickname)
+ precondition(isinstance(options.local_dir, (unicode, NoneType)), local_dir=options.local_dir)
+
+ from allmydata.scripts import tahoe_add_alias
+ create_alias_options = _delegate_options(options, CreateAliasOptions())
+ create_alias_options.alias = options.alias
+
+ rc = tahoe_add_alias.create_alias(create_alias_options)
+ if rc != 0:
+ print >>options.stderr, create_alias_options.stderr.getvalue()
+ return rc
+ print >>options.stdout, create_alias_options.stdout.getvalue()
+
+ if options.nickname is not None:
+ invite_options = _delegate_options(options, InviteOptions())
+ invite_options.alias = options.alias
+ invite_options.nickname = options.nickname
+ rc = invite(invite_options)
+ if rc != 0:
+ print >>options.stderr, "magic-folder: failed to invite after create\n"
+ print >>options.stderr, invite_options.stderr.getvalue()
+ return rc
+ invite_code = invite_options.stdout.getvalue().strip()
+ join_options = _delegate_options(options, JoinOptions())
+ join_options.local_dir = options.local_dir
+ join_options.invite_code = invite_code
+ rc = join(join_options)
+ if rc != 0:
+ print >>options.stderr, "magic-folder: failed to join after create\n"
+ print >>options.stderr, join_options.stderr.getvalue()
+ return rc
+ return 0
+
+class InviteOptions(BasedirOptions):
+ nickname = None
+ synopsis = "MAGIC_ALIAS: NICKNAME"
+ stdin = StringIO("")
+ def parseArgs(self, alias, nickname=None):
+ BasedirOptions.parseArgs(self)
+ alias = argv_to_unicode(alias)
+ if not alias.endswith(u':'):
+ raise usage.UsageError("An alias must end with a ':' character.")
+ self.alias = alias[:-1]
+ self.nickname = argv_to_unicode(nickname)
+ node_url_file = os.path.join(self['node-directory'], u"node.url")
+ self['node-url'] = open(node_url_file, "r").read().strip()
+ aliases = get_aliases(self['node-directory'])
+ self.aliases = aliases
+
+def invite(options):
+ precondition(isinstance(options.alias, unicode), alias=options.alias)
+ precondition(isinstance(options.nickname, unicode), nickname=options.nickname)
+
+ from allmydata.scripts import tahoe_mkdir
+ mkdir_options = _delegate_options(options, MakeDirectoryOptions())
+ mkdir_options.where = None
+
+ rc = tahoe_mkdir.mkdir(mkdir_options)
+ if rc != 0:
+ print >>options.stderr, "magic-folder: failed to mkdir\n"
+ return rc
+
+ # FIXME this assumes caps are ASCII.
+ dmd_write_cap = mkdir_options.stdout.getvalue().strip()
+ dmd_readonly_cap = uri.from_string(dmd_write_cap).get_readonly().to_string()
+ if dmd_readonly_cap is None:
+ print >>options.stderr, "magic-folder: failed to diminish dmd write cap\n"
+ return 1
+
+ magic_write_cap = get_aliases(options["node-directory"])[options.alias]
+ magic_readonly_cap = uri.from_string(magic_write_cap).get_readonly().to_string()
+
+ # tahoe ln CLIENT_READCAP COLLECTIVE_WRITECAP/NICKNAME
+ ln_options = _delegate_options(options, LnOptions())
+ ln_options.from_file = unicode(dmd_readonly_cap, 'utf-8')
+ ln_options.to_file = u"%s/%s" % (unicode(magic_write_cap, 'utf-8'), options.nickname)
+ rc = tahoe_mv.mv(ln_options, mode="link")
+ if rc != 0:
+ print >>options.stderr, "magic-folder: failed to create link\n"
+ print >>options.stderr, ln_options.stderr.getvalue()
+ return rc
+
+ # FIXME: this assumes caps are ASCII.
+ print >>options.stdout, "%s%s%s" % (magic_readonly_cap, INVITE_SEPARATOR, dmd_write_cap)
+ return 0
+
+class JoinOptions(BasedirOptions):
+ synopsis = "INVITE_CODE LOCAL_DIR"
+ dmd_write_cap = ""
+ magic_readonly_cap = ""
+ def parseArgs(self, invite_code, local_dir):
+ BasedirOptions.parseArgs(self)
+
+ # Expand the path relative to the current directory of the CLI command, not the node.
+ self.local_dir = None if local_dir is None else argv_to_abspath(local_dir, long_path=False)
+ self.invite_code = to_str(argv_to_unicode(invite_code))
+
+def join(options):
+ fields = options.invite_code.split(INVITE_SEPARATOR)
+ if len(fields) != 2:
+ raise usage.UsageError("Invalid invite code.")
+ magic_readonly_cap, dmd_write_cap = fields
+
+ dmd_cap_file = os.path.join(options["node-directory"], u"private", u"magic_folder_dircap")
+ collective_readcap_file = os.path.join(options["node-directory"], u"private", u"collective_dircap")
+ magic_folder_db_file = os.path.join(options["node-directory"], u"private", u"magicfolderdb.sqlite")
+
+ if os.path.exists(dmd_cap_file) or os.path.exists(collective_readcap_file) or os.path.exists(magic_folder_db_file):
+ print >>options.stderr, ("\nThis client has already joined a magic folder."
+ "\nUse the 'tahoe magic-folder leave' command first.\n")
+ return 1
+
+ fileutil.write(dmd_cap_file, dmd_write_cap)
+ fileutil.write(collective_readcap_file, magic_readonly_cap)
+
+ config = configutil.get_config(os.path.join(options["node-directory"], u"tahoe.cfg"))
+ configutil.set_config(config, "magic_folder", "enabled", "True")
+ configutil.set_config(config, "magic_folder", "local.directory", options.local_dir.encode('utf-8'))
+ configutil.write_config(os.path.join(options["node-directory"], u"tahoe.cfg"), config)
+ return 0
+
+class LeaveOptions(BasedirOptions):
+ synopsis = ""
+ def parseArgs(self):
+ BasedirOptions.parseArgs(self)
+
+def leave(options):
+ from ConfigParser import SafeConfigParser
+
+ dmd_cap_file = os.path.join(options["node-directory"], u"private", u"magic_folder_dircap")
+ collective_readcap_file = os.path.join(options["node-directory"], u"private", u"collective_dircap")
+ magic_folder_db_file = os.path.join(options["node-directory"], u"private", u"magicfolderdb.sqlite")
+
+ parser = SafeConfigParser()
+ parser.read(os.path.join(options["node-directory"], u"tahoe.cfg"))
+ parser.remove_section("magic_folder")
+ f = open(os.path.join(options["node-directory"], u"tahoe.cfg"), "w")
+ parser.write(f)
+ f.close()
+
+ for f in [dmd_cap_file, collective_readcap_file, magic_folder_db_file]:
+ try:
+ fileutil.remove(f)
+ except Exception as e:
+ print >>options.stderr, ("Warning: unable to remove %s due to %s: %s"
+ % (quote_local_unicode_path(f), e.__class__.__name__, str(e)))
+
+ return 0
+
+class MagicFolderCommand(BaseOptions):
+ subCommands = [
+ ["create", None, CreateOptions, "Create a Magic Folder."],
+ ["invite", None, InviteOptions, "Invite someone to a Magic Folder."],
+ ["join", None, JoinOptions, "Join a Magic Folder."],
+ ["leave", None, LeaveOptions, "Leave a Magic Folder."],
+ ]
+ def postOptions(self):
+ if not hasattr(self, 'subOptions'):
+ raise usage.UsageError("must specify a subcommand")
+ def getSynopsis(self):
+ return "Usage: tahoe [global-options] magic SUBCOMMAND"
+ def getUsage(self, width=None):
+ t = BaseOptions.getUsage(self, width)
+ t += """\
+Please run e.g. 'tahoe magic-folder create --help' for more details on each
+subcommand.
+"""
+ return t
+
+subDispatch = {
+ "create": create,
+ "invite": invite,
+ "join": join,
+ "leave": leave,
+}
+
+def do_magic_folder(options):
+ so = options.subOptions
+ so.stdout = options.stdout
+ so.stderr = options.stderr
+ f = subDispatch[options.subCommand]
+ return f(so)
+
+subCommands = [
+ ["magic-folder", None, MagicFolderCommand,
+ "Magic Folder subcommands: use 'tahoe magic-folder' for a list."],
+]
+
+dispatch = {
+ "magic-folder": do_magic_folder,
+}
from twisted.python import usage
from allmydata.scripts.common import get_default_nodedir
-from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, admin
+from allmydata.scripts import debug, create_node, startstop_node, cli, keygen, stats_gatherer, \
+ admin, magic_folder_cli
from allmydata.util.encodingutil import quote_output, quote_local_unicode_path, get_io_encoding
def GROUP(s):
+ debug.subCommands
+ GROUP("Using the filesystem")
+ cli.subCommands
+ + magic_folder_cli.subCommands
)
optFlags = [
rc = admin.dispatch[command](so)
elif command in cli.dispatch:
rc = cli.dispatch[command](so)
+ elif command in magic_folder_cli.dispatch:
+ rc = magic_folder_cli.dispatch[command](so)
elif command in ac_dispatch:
rc = ac_dispatch[command](so, stdout, stderr)
else:
--- /dev/null
+#!/usr/bin/env python
+
+# this is a smoke-test using "./bin/tahoe" to:
+#
+# 1. create an introducer
+# 2. create 5 storage nodes
+# 3. create 2 client nodes (alice, bob)
+# 4. Alice creates a magic-folder ("magik:")
+# 5. Alice invites Bob
+# 6. Bob joins
+#
+# After that, some basic tests are performed; see the "if True:"
+# blocks to turn some on or off. Could benefit from some cleanups
+# etc. but this seems useful out of the gate for quick testing.
+#
+# TO RUN:
+# from top-level of your checkout (we use "./bin/tahoe"):
+# python src/allmydata/test/check_magicfolder_smoke.py
+#
+# This will create "./smoke_magicfolder" (which is disposable) and
+# contains all the Tahoe basedirs for the introducer, storage nodes,
+# clients, and the clients' magic-folders. NOTE that if these
+# directories already exist they will NOT be re-created. So kill the
+# grid and then "rm -rf smoke_magicfolder" if you want to re-run the
+# tests cleanly.
+#
+# Run the script with a single arg, "kill" to run "tahoe stop" on all
+# the nodes.
+#
+# This will have "tahoe start" -ed all the nodes, so you can continue
+# to play around after the script exits.
+
+from __future__ import print_function
+
+import sys
+import time
+import shutil
+import subprocess
+from os.path import join, abspath, curdir, exists
+from os import mkdir, listdir, unlink
+
+tahoe_base = abspath(curdir)
+data_base = join(tahoe_base, 'smoke_magicfolder')
+tahoe_bin = join(tahoe_base, 'bin', 'tahoe')
+python = sys.executable
+
+if not exists(data_base):
+ print("Creating", data_base)
+ mkdir(data_base)
+
+if not exists(tahoe_bin):
+ raise RuntimeError("Can't find 'tahoe' binary at %r" % (tahoe_bin,))
+
+if 'kill' in sys.argv:
+ print("Killing the grid")
+ for d in listdir(data_base):
+ print("killing", d)
+ subprocess.call(
+ [
+ python, tahoe_bin, 'stop', join(data_base, d),
+ ]
+ )
+ sys.exit(0)
+
+if not exists(join(data_base, 'introducer')):
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'create-introducer', join(data_base, 'introducer'),
+ ]
+ )
+with open(join(data_base, 'introducer', 'tahoe.cfg'), 'w') as f:
+ f.write('''
+[node]
+nickname = introducer0
+web.port = 4560
+''')
+
+subprocess.check_call(
+ [
+ python, tahoe_bin, 'start', join(data_base, 'introducer'),
+ ]
+)
+
+furl_fname = join(data_base, 'introducer', 'private', 'introducer.furl')
+while not exists(furl_fname):
+ time.sleep(1)
+furl = open(furl_fname, 'r').read()
+print("FURL", furl)
+
+for x in range(5):
+ data_dir = join(data_base, 'node%d' % x)
+ if not exists(data_dir):
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'create-node',
+ '--nickname', 'node%d' % (x,),
+ '--introducer', furl,
+ data_dir,
+ ]
+ )
+ with open(join(data_dir, 'tahoe.cfg'), 'w') as f:
+ f.write('''
+[node]
+nickname = node%(node_id)s
+web.port =
+web.static = public_html
+tub.location = localhost:%(tub_port)d
+
+[client]
+# Which services should this client connect to?
+introducer.furl = %(furl)s
+shares.needed = 2
+shares.happy = 3
+shares.total = 4
+''' % {'node_id':x, 'furl':furl, 'tub_port':(9900 + x)})
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'start', data_dir,
+ ]
+ )
+
+
+
+# alice and bob clients
+do_invites = False
+node_id = 0
+for name in ['alice', 'bob']:
+ data_dir = join(data_base, name)
+ magic_dir = join(data_base, '%s-magic' % (name,))
+ mkdir(magic_dir)
+ if not exists(data_dir):
+ do_invites = True
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'create-node',
+ '--no-storage',
+ '--nickname', name,
+ '--introducer', furl,
+ data_dir,
+ ]
+ )
+ with open(join(data_dir, 'tahoe.cfg'), 'w') as f:
+ f.write('''
+[node]
+nickname = %(name)s
+web.port = tcp:998%(node_id)d:interface=localhost
+web.static = public_html
+
+[client]
+# Which services should this client connect to?
+introducer.furl = %(furl)s
+shares.needed = 2
+shares.happy = 3
+shares.total = 4
+''' % {'name':name, 'node_id':node_id, 'furl':furl})
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'start', data_dir,
+ ]
+ )
+ node_id += 1
+
+# okay, now we have alice + bob (alice, bob)
+# now we have alice create a magic-folder, and invite bob to it
+
+if do_invites:
+ data_dir = join(data_base, 'alice')
+ # alice creates her folder, invites bob
+ print("Alice creates a magic-folder")
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'magic-folder', 'create', '--basedir', data_dir, 'magik:', 'alice',
+ join(data_base, 'alice-magic'),
+ ]
+ )
+ print("Alice invites Bob")
+ invite = subprocess.check_output(
+ [
+ python, tahoe_bin, 'magic-folder', 'invite', '--basedir', data_dir, 'magik:', 'bob',
+ ]
+ )
+ print(" invite:", invite)
+
+ # now we let "bob"/bob join
+ print("Bob joins Alice's magic folder")
+ data_dir = join(data_base, 'bob')
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'magic-folder', 'join', '--basedir', data_dir, invite,
+ join(data_base, 'bob-magic'),
+ ]
+ )
+ print("Bob has joined.")
+
+ print("Restarting alice + bob clients")
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'restart', '--basedir', join(data_base, 'alice'),
+ ]
+ )
+ subprocess.check_call(
+ [
+ python, tahoe_bin, 'restart', '--basedir', join(data_base, 'bob'),
+ ]
+ )
+
+if True:
+ for name in ['alice', 'bob']:
+ with open(join(data_base, name, 'private', 'magic_folder_dircap'), 'r') as f:
+ print("dircap %s: %s" % (name, f.read().strip()))
+
+# give storage nodes a chance to connect properly? I'm not entirely
+# sure what's up here, but I get "UnrecoverableFileError" on the
+# first_file upload from Alice "very often" otherwise
+print("waiting 3 seconds")
+time.sleep(3)
+
+if True:
+ # alice writes a file; bob should get it
+ alice_foo = join(data_base, 'alice-magic', 'first_file')
+ bob_foo = join(data_base, 'bob-magic', 'first_file')
+ with open(alice_foo, 'w') as f:
+ f.write("line one\n")
+
+ print("Waiting for:", bob_foo)
+ while True:
+ if exists(bob_foo):
+ print(" found", bob_foo)
+ with open(bob_foo, 'r') as f:
+ if f.read() == "line one\n":
+ break
+ print(" file contents still mismatched")
+ time.sleep(1)
+
+if True:
+ # bob writes a file; alice should get it
+ alice_bar = join(data_base, 'alice-magic', 'second_file')
+ bob_bar = join(data_base, 'bob-magic', 'second_file')
+ with open(bob_bar, 'w') as f:
+ f.write("line one\n")
+
+ print("Waiting for:", alice_bar)
+ while True:
+ if exists(bob_bar):
+ print(" found", bob_bar)
+ with open(bob_bar, 'r') as f:
+ if f.read() == "line one\n":
+ break
+ print(" file contents still mismatched")
+ time.sleep(1)
+
+if True:
+ # alice deletes 'first_file'
+ alice_foo = join(data_base, 'alice-magic', 'first_file')
+ bob_foo = join(data_base, 'bob-magic', 'first_file')
+ unlink(alice_foo)
+
+ print("Waiting for '%s' to disappear" % (bob_foo,))
+ while True:
+ if not exists(bob_foo):
+ print(" disappeared", bob_foo)
+ break
+ time.sleep(1)
+
+ bob_tmp = bob_foo + '.backup'
+ print("Waiting for '%s' to appear" % (bob_tmp,))
+ while True:
+ if exists(bob_tmp):
+ print(" appeared", bob_tmp)
+ break
+ time.sleep(1)
+
+if True:
+ # bob writes new content to 'second_file'; alice should get it
+ # get it.
+ alice_foo = join(data_base, 'alice-magic', 'second_file')
+ bob_foo = join(data_base, 'bob-magic', 'second_file')
+ gold_content = "line one\nsecond line\n"
+
+ with open(bob_foo, 'w') as f:
+ f.write(gold_content)
+
+ print("Waiting for:", alice_foo)
+ while True:
+ if exists(alice_foo):
+ print(" found", alice_foo)
+ with open(alice_foo, 'r') as f:
+ content = f.read()
+ if content == gold_content:
+ break
+ print(" file contents still mismatched:\n")
+ print(content)
+ time.sleep(1)
+
+if True:
+ # bob creates a sub-directory and adds a file to it
+ alice_dir = join(data_base, 'alice-magic', 'subdir')
+ bob_dir = join(data_base, 'alice-magic', 'subdir')
+ gold_content = 'a file in a subdirectory\n'
+
+ mkdir(bob_dir)
+ with open(join(bob_dir, 'subfile'), 'w') as f:
+ f.write(gold_content)
+
+ print("Waiting for Bob's subdir '%s' to appear" % (bob_dir,))
+ while True:
+ if exists(bob_dir):
+ print(" found subdir")
+ if exists(join(bob_dir, 'subfile')):
+ print(" found file")
+ with open(join(bob_dir, 'subfile'), 'r') as f:
+ if f.read() == gold_content:
+ print(" contents match")
+ break
+ time.sleep(0.1)
+
+if True:
+ # bob deletes the whole subdir
+ alice_dir = join(data_base, 'alice-magic', 'subdir')
+ bob_dir = join(data_base, 'alice-magic', 'subdir')
+ shutil.rmtree(bob_dir)
+
+ print("Waiting for Alice's subdir '%s' to disappear" % (alice_dir,))
+ while True:
+ if not exists(alice_dir):
+ print(" it's gone")
+ break
+ time.sleep(0.1)
+
+# XXX restore the file not working (but, unit-tests work; what's wrong with them?)
+# NOTE: only not-works if it's alice restoring the file!
+if True:
+ # restore 'first_file' but with different contents
+ print("re-writing 'first_file'")
+ assert not exists(join(data_base, 'bob-magic', 'first_file'))
+ assert not exists(join(data_base, 'alice-magic', 'first_file'))
+ alice_foo = join(data_base, 'alice-magic', 'first_file')
+ bob_foo = join(data_base, 'bob-magic', 'first_file')
+ if True:
+ # if we don't swap around, it works fine
+ alice_foo, bob_foo = bob_foo, alice_foo
+ gold_content = "see it again for the first time\n"
+
+ with open(bob_foo, 'w') as f:
+ f.write(gold_content)
+
+ print("Waiting for:", alice_foo)
+ while True:
+ if exists(alice_foo):
+ print(" found", alice_foo)
+ with open(alice_foo, 'r') as f:
+ content = f.read()
+ if content == gold_content:
+ break
+ print(" file contents still mismatched: %d bytes:\n" % (len(content),))
+ print(content)
+ else:
+ print(" %r not there yet" % (alice_foo,))
+ time.sleep(1)
+
+# XXX test .backup (delete a file)
+
+# port david's clock.advance stuff
+# fix clock.advance()
+# subdirectory
+# file deletes
+# conflicts
--- /dev/null
+import os.path
+import re
+
+from twisted.trial import unittest
+from twisted.internet import defer
+from twisted.internet import reactor
+from twisted.python import usage
+
+from allmydata.util.assertutil import precondition
+from allmydata.util import fileutil
+from allmydata.scripts.common import get_aliases
+from allmydata.test.no_network import GridTestMixin
+from .test_cli import CLITestMixin
+from allmydata.scripts import magic_folder_cli
+from allmydata.util.fileutil import abspath_expanduser_unicode
+from allmydata.util.encodingutil import unicode_to_argv
+from allmydata.frontends.magic_folder import MagicFolder
+from allmydata import uri
+
+
+class MagicFolderCLITestMixin(CLITestMixin, GridTestMixin):
+ def do_create_magic_folder(self, client_num):
+ d = self.do_cli("magic-folder", "create", "magic:", client_num=client_num)
+ def _done((rc,stdout,stderr)):
+ self.failUnlessEqual(rc, 0)
+ self.failUnlessIn("Alias 'magic' created", stdout)
+ self.failUnlessEqual(stderr, "")
+ aliases = get_aliases(self.get_clientdir(i=client_num))
+ self.failUnlessIn("magic", aliases)
+ self.failUnless(aliases["magic"].startswith("URI:DIR2:"))
+ d.addCallback(_done)
+ return d
+
+ def do_invite(self, client_num, nickname):
+ nickname_arg = unicode_to_argv(nickname)
+ d = self.do_cli("magic-folder", "invite", "magic:", nickname_arg, client_num=client_num)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ return (rc, stdout, stderr)
+ d.addCallback(_done)
+ return d
+
+ def do_join(self, client_num, local_dir, invite_code):
+ precondition(isinstance(local_dir, unicode), local_dir=local_dir)
+ precondition(isinstance(invite_code, str), invite_code=invite_code)
+
+ local_dir_arg = unicode_to_argv(local_dir)
+ d = self.do_cli("magic-folder", "join", invite_code, local_dir_arg, client_num=client_num)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ self.failUnlessEqual(stdout, "")
+ self.failUnlessEqual(stderr, "")
+ return (rc, stdout, stderr)
+ d.addCallback(_done)
+ return d
+
+ def do_leave(self, client_num):
+ d = self.do_cli("magic-folder", "leave", client_num=client_num)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ return (rc, stdout, stderr)
+ d.addCallback(_done)
+ return d
+
+ def check_joined_config(self, client_num, upload_dircap):
+ """Tests that our collective directory has the readonly cap of
+ our upload directory.
+ """
+ collective_readonly_cap = fileutil.read(os.path.join(self.get_clientdir(i=client_num),
+ u"private", u"collective_dircap"))
+ d = self.do_cli("ls", "--json", collective_readonly_cap, client_num=client_num)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ return (rc, stdout, stderr)
+ d.addCallback(_done)
+ def test_joined_magic_folder((rc,stdout,stderr)):
+ readonly_cap = unicode(uri.from_string(upload_dircap).get_readonly().to_string(), 'utf-8')
+ s = re.search(readonly_cap, stdout)
+ self.failUnless(s is not None)
+ return None
+ d.addCallback(test_joined_magic_folder)
+ return d
+
+ def get_caps_from_files(self, client_num):
+ collective_dircap = fileutil.read(os.path.join(self.get_clientdir(i=client_num),
+ u"private", u"collective_dircap"))
+ upload_dircap = fileutil.read(os.path.join(self.get_clientdir(i=client_num),
+ u"private", u"magic_folder_dircap"))
+ self.failIf(collective_dircap is None or upload_dircap is None)
+ return collective_dircap, upload_dircap
+
+ def check_config(self, client_num, local_dir):
+ client_config = fileutil.read(os.path.join(self.get_clientdir(i=client_num), "tahoe.cfg"))
+ local_dir_utf8 = local_dir.encode('utf-8')
+ magic_folder_config = "[magic_folder]\nenabled = True\nlocal.directory = %s" % (local_dir_utf8,)
+ self.failUnlessIn(magic_folder_config, client_config)
+
+ def create_invite_join_magic_folder(self, nickname, local_dir):
+ nickname_arg = unicode_to_argv(nickname)
+ local_dir_arg = unicode_to_argv(local_dir)
+ d = self.do_cli("magic-folder", "create", "magic:", nickname_arg, local_dir_arg)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+
+ client = self.get_client()
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ self.collective_dirnode = client.create_node_from_uri(self.collective_dircap)
+ self.upload_dirnode = client.create_node_from_uri(self.upload_dircap)
+ d.addCallback(_done)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, local_dir))
+ return d
+
+ def cleanup(self, res):
+ d = defer.succeed(None)
+ if self.magicfolder is not None:
+ d.addCallback(lambda ign: self.magicfolder.finish())
+ d.addCallback(lambda ign: res)
+ return d
+
+ def init_magicfolder(self, client_num, upload_dircap, collective_dircap, local_magic_dir, clock):
+ dbfile = abspath_expanduser_unicode(u"magicfolderdb.sqlite", base=self.get_clientdir(i=client_num))
+ magicfolder = MagicFolder(self.get_client(client_num), upload_dircap, collective_dircap, local_magic_dir,
+ dbfile, 0077, pending_delay=0.2, clock=clock)
+ magicfolder.downloader._turn_delay = 0
+
+ magicfolder.setServiceParent(self.get_client(client_num))
+ magicfolder.ready()
+ return magicfolder
+
+ def setup_alice_and_bob(self, alice_clock=reactor, bob_clock=reactor):
+ self.set_up_grid(num_clients=2)
+
+ self.alice_magicfolder = None
+ self.bob_magicfolder = None
+
+ alice_magic_dir = abspath_expanduser_unicode(u"Alice-magic", base=self.basedir)
+ self.mkdir_nonascii(alice_magic_dir)
+ bob_magic_dir = abspath_expanduser_unicode(u"Bob-magic", base=self.basedir)
+ self.mkdir_nonascii(bob_magic_dir)
+
+ # Alice creates a Magic Folder,
+ # invites herself then and joins.
+ d = self.do_create_magic_folder(0)
+ d.addCallback(lambda ign: self.do_invite(0, u"Alice\u00F8"))
+ def get_invite_code(result):
+ self.invite_code = result[1].strip()
+ d.addCallback(get_invite_code)
+ d.addCallback(lambda ign: self.do_join(0, alice_magic_dir, self.invite_code))
+ def get_alice_caps(ign):
+ self.alice_collective_dircap, self.alice_upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_alice_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.alice_upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, alice_magic_dir))
+ def get_Alice_magicfolder(result):
+ self.alice_magicfolder = self.init_magicfolder(0, self.alice_upload_dircap,
+ self.alice_collective_dircap,
+ alice_magic_dir, alice_clock)
+ return result
+ d.addCallback(get_Alice_magicfolder)
+
+ # Alice invites Bob. Bob joins.
+ d.addCallback(lambda ign: self.do_invite(0, u"Bob\u00F8"))
+ def get_invite_code(result):
+ self.invite_code = result[1].strip()
+ d.addCallback(get_invite_code)
+ d.addCallback(lambda ign: self.do_join(1, bob_magic_dir, self.invite_code))
+ def get_bob_caps(ign):
+ self.bob_collective_dircap, self.bob_upload_dircap = self.get_caps_from_files(1)
+ d.addCallback(get_bob_caps)
+ d.addCallback(lambda ign: self.check_joined_config(1, self.bob_upload_dircap))
+ d.addCallback(lambda ign: self.check_config(1, bob_magic_dir))
+ def get_Bob_magicfolder(result):
+ self.bob_magicfolder = self.init_magicfolder(1, self.bob_upload_dircap,
+ self.bob_collective_dircap,
+ bob_magic_dir, bob_clock)
+ return result
+ d.addCallback(get_Bob_magicfolder)
+ return d
+
+
+class CreateMagicFolder(MagicFolderCLITestMixin, unittest.TestCase):
+ def test_create_and_then_invite_join(self):
+ self.basedir = "cli/MagicFolder/create-and-then-invite-join"
+ self.set_up_grid()
+ local_dir = os.path.join(self.basedir, "magic")
+ abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
+
+ d = self.do_create_magic_folder(0)
+ d.addCallback(lambda ign: self.do_invite(0, u"Alice"))
+ def get_invite_code_and_join((rc, stdout, stderr)):
+ invite_code = stdout.strip()
+ return self.do_join(0, unicode(local_dir), invite_code)
+ d.addCallback(get_invite_code_and_join)
+ def get_caps(ign):
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+ return d
+
+ def test_create_error(self):
+ self.basedir = "cli/MagicFolder/create-error"
+ self.set_up_grid()
+
+ d = self.do_cli("magic-folder", "create", "m a g i c:", client_num=0)
+ def _done((rc, stdout, stderr)):
+ self.failIfEqual(rc, 0)
+ self.failUnlessIn("Alias names cannot contain spaces.", stderr)
+ d.addCallback(_done)
+ return d
+
+ def test_create_invite_join(self):
+ self.basedir = "cli/MagicFolder/create-invite-join"
+ self.set_up_grid()
+ local_dir = os.path.join(self.basedir, "magic")
+ abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
+
+ d = self.do_cli("magic-folder", "create", "magic:", "Alice", local_dir)
+ def _done((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(_done)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+ return d
+
+ def test_create_invite_join_failure(self):
+ self.basedir = "cli/MagicFolder/create-invite-join-failure"
+ os.makedirs(self.basedir)
+
+ o = magic_folder_cli.CreateOptions()
+ o.parent = magic_folder_cli.MagicFolderCommand()
+ o.parent['node-directory'] = self.basedir
+ try:
+ o.parseArgs("magic:", "Alice", "-foo")
+ except usage.UsageError as e:
+ self.failUnlessIn("cannot start with '-'", str(e))
+ else:
+ self.fail("expected UsageError")
+
+ def test_join_failure(self):
+ self.basedir = "cli/MagicFolder/create-join-failure"
+ os.makedirs(self.basedir)
+
+ o = magic_folder_cli.JoinOptions()
+ o.parent = magic_folder_cli.MagicFolderCommand()
+ o.parent['node-directory'] = self.basedir
+ try:
+ o.parseArgs("URI:invite+URI:code", "-foo")
+ except usage.UsageError as e:
+ self.failUnlessIn("cannot start with '-'", str(e))
+ else:
+ self.fail("expected UsageError")
+
+ def test_join_twice_failure(self):
+ self.basedir = "cli/MagicFolder/create-join-twice-failure"
+ os.makedirs(self.basedir)
+ self.set_up_grid()
+ local_dir = os.path.join(self.basedir, "magic")
+ abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
+
+ d = self.do_create_magic_folder(0)
+ d.addCallback(lambda ign: self.do_invite(0, u"Alice"))
+ def get_invite_code_and_join((rc, stdout, stderr)):
+ self.invite_code = stdout.strip()
+ return self.do_join(0, unicode(local_dir), self.invite_code)
+ d.addCallback(get_invite_code_and_join)
+ def get_caps(ign):
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+ def join_again(ignore):
+ return self.do_cli("magic-folder", "join", self.invite_code, local_dir, client_num=0)
+ d.addCallback(join_again)
+ def get_results(result):
+ (rc, out, err) = result
+ self.failUnlessEqual(out, "")
+ self.failUnlessIn("This client has already joined a magic folder.", err)
+ self.failUnlessIn("Use the 'tahoe magic-folder leave' command first.", err)
+ self.failIfEqual(rc, 0)
+ d.addCallback(get_results)
+ return d
+
+ def test_join_leave_join(self):
+ self.basedir = "cli/MagicFolder/create-join-leave-join"
+ os.makedirs(self.basedir)
+ self.set_up_grid()
+ local_dir = os.path.join(self.basedir, "magic")
+ abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
+
+ self.invite_code = None
+ d = self.do_create_magic_folder(0)
+ d.addCallback(lambda ign: self.do_invite(0, u"Alice"))
+ def get_invite_code_and_join((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ self.invite_code = stdout.strip()
+ return self.do_join(0, unicode(local_dir), self.invite_code)
+ d.addCallback(get_invite_code_and_join)
+ def get_caps(ign):
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+ d.addCallback(lambda ign: self.do_leave(0))
+
+ d.addCallback(lambda ign: self.do_join(0, unicode(local_dir), self.invite_code))
+ def get_caps(ign):
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+
+ return d
+
+ def test_join_failures(self):
+ self.basedir = "cli/MagicFolder/create-join-failures"
+ os.makedirs(self.basedir)
+ self.set_up_grid()
+ local_dir = os.path.join(self.basedir, "magic")
+ abs_local_dir_u = abspath_expanduser_unicode(unicode(local_dir), long_path=False)
+
+ self.invite_code = None
+ d = self.do_create_magic_folder(0)
+ d.addCallback(lambda ign: self.do_invite(0, u"Alice"))
+ def get_invite_code_and_join((rc, stdout, stderr)):
+ self.failUnlessEqual(rc, 0)
+ self.invite_code = stdout.strip()
+ return self.do_join(0, unicode(local_dir), self.invite_code)
+ d.addCallback(get_invite_code_and_join)
+ def get_caps(ign):
+ self.collective_dircap, self.upload_dircap = self.get_caps_from_files(0)
+ d.addCallback(get_caps)
+ d.addCallback(lambda ign: self.check_joined_config(0, self.upload_dircap))
+ d.addCallback(lambda ign: self.check_config(0, abs_local_dir_u))
+
+ def check_success(result):
+ (rc, out, err) = result
+ self.failUnlessEqual(rc, 0)
+ def check_failure(result):
+ (rc, out, err) = result
+ self.failIfEqual(rc, 0)
+
+ def leave(ign):
+ return self.do_cli("magic-folder", "leave", client_num=0)
+ d.addCallback(leave)
+ d.addCallback(check_success)
+
+ collective_dircap_file = os.path.join(self.get_clientdir(i=0), u"private", u"collective_dircap")
+ upload_dircap = os.path.join(self.get_clientdir(i=0), u"private", u"magic_folder_dircap")
+ magic_folder_db_file = os.path.join(self.get_clientdir(i=0), u"private", u"magicfolderdb.sqlite")
+
+ def check_join_if_file(my_file):
+ fileutil.write(my_file, "my file data")
+ d2 = self.do_cli("magic-folder", "join", self.invite_code, local_dir, client_num=0)
+ d2.addCallback(check_failure)
+ return d2
+
+ for my_file in [collective_dircap_file, upload_dircap, magic_folder_db_file]:
+ d.addCallback(lambda ign, my_file: check_join_if_file(my_file), my_file)
+ d.addCallback(leave)
+ d.addCallback(check_success)
+
+ return d
from twisted.application import service
import allmydata
-import allmydata.frontends.drop_upload
+import allmydata.frontends.magic_folder
import allmydata.util.log
from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
"introducer.furl = %s\n"
)
-class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
+class Basic(testutil.ReallyEqualMixin, testutil.NonASCIIPathMixin, unittest.TestCase):
def test_loadable(self):
basedir = "test_client.Basic.test_loadable"
os.mkdir(basedir)
_check("helper.furl = None", None)
_check("helper.furl = pb://blah\n", "pb://blah")
- def test_create_drop_uploader(self):
- class MockDropUploader(service.MultiService):
- name = 'drop-upload'
+ def test_create_magic_folder_service(self):
+ class MockMagicFolder(service.MultiService):
+ name = 'magic-folder'
- def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
+ def __init__(self, client, upload_dircap, collective_dircap, local_dir, dbfile, umask, inotify=None,
+ pending_delay=1.0):
service.MultiService.__init__(self)
self.client = client
+ self._umask = umask
self.upload_dircap = upload_dircap
- self.local_dir_utf8 = local_dir_utf8
+ self.collective_dircap = collective_dircap
+ self.local_dir = local_dir
+ self.dbfile = dbfile
self.inotify = inotify
- self.patch(allmydata.frontends.drop_upload, 'DropUploader', MockDropUploader)
+ def ready(self):
+ pass
+
+ self.patch(allmydata.frontends.magic_folder, 'MagicFolder', MockMagicFolder)
upload_dircap = "URI:DIR2:blah"
- local_dir_utf8 = u"loc\u0101l_dir".encode('utf-8')
+ local_dir_u = self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir")
+ local_dir_utf8 = local_dir_u.encode('utf-8')
config = (BASECONFIG +
"[storage]\n" +
"enabled = false\n" +
- "[drop_upload]\n" +
+ "[magic_folder]\n" +
"enabled = true\n")
- basedir1 = "test_client.Basic.test_create_drop_uploader1"
+ basedir1 = "test_client.Basic.test_create_magic_folder_service1"
os.mkdir(basedir1)
+
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
config + "local.directory = " + local_dir_utf8 + "\n")
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
- fileutil.write(os.path.join(basedir1, "private", "drop_upload_dircap"), "URI:DIR2:blah")
+ fileutil.write(os.path.join(basedir1, "private", "magic_folder_dircap"), "URI:DIR2:blah")
+ fileutil.write(os.path.join(basedir1, "private", "collective_dircap"), "URI:DIR2:meow")
self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
- config + "upload.dircap = " + upload_dircap + "\n")
+ config.replace("[magic_folder]\n", "[drop_upload]\n"))
self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
config + "local.directory = " + local_dir_utf8 + "\n")
c1 = client.Client(basedir1)
- uploader = c1.getServiceNamed('drop-upload')
- self.failUnless(isinstance(uploader, MockDropUploader), uploader)
- self.failUnlessReallyEqual(uploader.client, c1)
- self.failUnlessReallyEqual(uploader.upload_dircap, upload_dircap)
- self.failUnlessReallyEqual(uploader.local_dir_utf8, local_dir_utf8)
- self.failUnless(uploader.inotify is None, uploader.inotify)
- self.failUnless(uploader.running)
+ magicfolder = c1.getServiceNamed('magic-folder')
+ self.failUnless(isinstance(magicfolder, MockMagicFolder), magicfolder)
+ self.failUnlessReallyEqual(magicfolder.client, c1)
+ self.failUnlessReallyEqual(magicfolder.upload_dircap, upload_dircap)
+ self.failUnlessReallyEqual(os.path.basename(magicfolder.local_dir), local_dir_u)
+ self.failUnless(magicfolder.inotify is None, magicfolder.inotify)
+ self.failUnless(magicfolder.running)
class Boom(Exception):
pass
- def BoomDropUploader(client, upload_dircap, local_dir_utf8, inotify=None):
+ def BoomMagicFolder(client, upload_dircap, collective_dircap, local_dir, dbfile,
+ inotify=None, pending_delay=1.0):
raise Boom()
+ self.patch(allmydata.frontends.magic_folder, 'MagicFolder', BoomMagicFolder)
- logged_messages = []
- def mock_log(*args, **kwargs):
- logged_messages.append("%r %r" % (args, kwargs))
- self.patch(allmydata.util.log, 'msg', mock_log)
- self.patch(allmydata.frontends.drop_upload, 'DropUploader', BoomDropUploader)
-
- basedir2 = "test_client.Basic.test_create_drop_uploader2"
+ basedir2 = "test_client.Basic.test_create_magic_folder_service2"
os.mkdir(basedir2)
os.mkdir(os.path.join(basedir2, "private"))
fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
BASECONFIG +
- "[drop_upload]\n" +
+ "[magic_folder]\n" +
"enabled = true\n" +
"local.directory = " + local_dir_utf8 + "\n")
- fileutil.write(os.path.join(basedir2, "private", "drop_upload_dircap"), "URI:DIR2:blah")
- c2 = client.Client(basedir2)
- self.failUnlessRaises(KeyError, c2.getServiceNamed, 'drop-upload')
- self.failUnless([True for arg in logged_messages if "Boom" in arg],
- logged_messages)
+ fileutil.write(os.path.join(basedir2, "private", "magic_folder_dircap"), "URI:DIR2:blah")
+ fileutil.write(os.path.join(basedir2, "private", "collective_dircap"), "URI:DIR2:meow")
+ self.failUnlessRaises(Boom, client.Client, basedir2)
def flush_but_dont_ignore(res):
+++ /dev/null
-
-import os, sys
-
-from twisted.trial import unittest
-from twisted.python import filepath, runtime
-from twisted.internet import defer
-
-from allmydata.interfaces import IDirectoryNode, NoSuchChildError
-
-from allmydata.util import fake_inotify
-from allmydata.util.encodingutil import get_filesystem_encoding
-from allmydata.util.consumer import download_to_data
-from allmydata.test.no_network import GridTestMixin
-from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
-from allmydata.test.common import ShouldFailMixin
-
-from allmydata.frontends.drop_upload import DropUploader
-
-
-class DropUploadTestMixin(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, NonASCIIPathMixin):
- """
- These tests will be run both with a mock notifier, and (on platforms that support it)
- with the real INotify.
- """
-
- def _get_count(self, name):
- return self.stats_provider.get_stats()["counters"].get(name, 0)
-
- def _test(self):
- self.uploader = None
- self.set_up_grid()
- self.local_dir = os.path.join(self.basedir, self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir"))
- self.mkdir_nonascii(self.local_dir)
-
- self.client = self.g.clients[0]
- self.stats_provider = self.client.stats_provider
-
- d = self.client.create_dirnode()
- def _made_upload_dir(n):
- self.failUnless(IDirectoryNode.providedBy(n))
- self.upload_dirnode = n
- self.upload_dircap = n.get_uri()
- self.uploader = DropUploader(self.client, self.upload_dircap, self.local_dir.encode('utf-8'),
- inotify=self.inotify)
- return self.uploader.startService()
- d.addCallback(_made_upload_dir)
-
- # Write something short enough for a LIT file.
- d.addCallback(lambda ign: self._test_file(u"short", "test"))
-
- # Write to the same file again with different data.
- d.addCallback(lambda ign: self._test_file(u"short", "different"))
-
- # Test that temporary files are not uploaded.
- d.addCallback(lambda ign: self._test_file(u"tempfile", "test", temporary=True))
-
- # Test that we tolerate creation of a subdirectory.
- d.addCallback(lambda ign: os.mkdir(os.path.join(self.local_dir, u"directory")))
-
- # Write something longer, and also try to test a Unicode name if the fs can represent it.
- name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long")
- d.addCallback(lambda ign: self._test_file(name_u, "test"*100))
-
- # TODO: test that causes an upload failure.
- d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_failed'), 0))
-
- # Prevent unclean reactor errors.
- def _cleanup(res):
- d = defer.succeed(None)
- if self.uploader is not None:
- d.addCallback(lambda ign: self.uploader.finish(for_tests=True))
- d.addCallback(lambda ign: res)
- return d
- d.addBoth(_cleanup)
- return d
-
- def _test_file(self, name_u, data, temporary=False):
- previously_uploaded = self._get_count('drop_upload.files_uploaded')
- previously_disappeared = self._get_count('drop_upload.files_disappeared')
-
- d = defer.Deferred()
-
- # Note: this relies on the fact that we only get one IN_CLOSE_WRITE notification per file
- # (otherwise we would get a defer.AlreadyCalledError). Should we be relying on that?
- self.uploader.set_uploaded_callback(d.callback)
-
- path_u = os.path.join(self.local_dir, name_u)
- if sys.platform == "win32":
- path = filepath.FilePath(path_u)
- else:
- path = filepath.FilePath(path_u.encode(get_filesystem_encoding()))
-
- # We don't use FilePath.setContent() here because it creates a temporary file that
- # is renamed into place, which causes events that the test is not expecting.
- f = open(path.path, "wb")
- try:
- if temporary and sys.platform != "win32":
- os.unlink(path.path)
- f.write(data)
- finally:
- f.close()
- if temporary and sys.platform == "win32":
- os.unlink(path.path)
- self.notify_close_write(path)
-
- if temporary:
- d.addCallback(lambda ign: self.shouldFail(NoSuchChildError, 'temp file not uploaded', None,
- self.upload_dirnode.get, name_u))
- d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_disappeared'),
- previously_disappeared + 1))
- else:
- d.addCallback(lambda ign: self.upload_dirnode.get(name_u))
- d.addCallback(download_to_data)
- d.addCallback(lambda actual_data: self.failUnlessReallyEqual(actual_data, data))
- d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_uploaded'),
- previously_uploaded + 1))
-
- d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('drop_upload.files_queued'), 0))
- return d
-
-
-class MockTest(DropUploadTestMixin, unittest.TestCase):
- """This can run on any platform, and even if twisted.internet.inotify can't be imported."""
-
- def test_errors(self):
- self.basedir = "drop_upload.MockTest.test_errors"
- self.set_up_grid()
- errors_dir = os.path.join(self.basedir, "errors_dir")
- os.mkdir(errors_dir)
-
- client = self.g.clients[0]
- d = client.create_dirnode()
- def _made_upload_dir(n):
- self.failUnless(IDirectoryNode.providedBy(n))
- upload_dircap = n.get_uri()
- readonly_dircap = n.get_readonly_uri()
-
- self.shouldFail(AssertionError, 'invalid local.directory', 'could not be represented',
- DropUploader, client, upload_dircap, '\xFF', inotify=fake_inotify)
- self.shouldFail(AssertionError, 'nonexistent local.directory', 'there is no directory',
- DropUploader, client, upload_dircap, os.path.join(self.basedir, "Laputa"), inotify=fake_inotify)
-
- fp = filepath.FilePath(self.basedir).child('NOT_A_DIR')
- fp.touch()
- self.shouldFail(AssertionError, 'non-directory local.directory', 'is not a directory',
- DropUploader, client, upload_dircap, fp.path, inotify=fake_inotify)
-
- self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory',
- DropUploader, client, 'bad', errors_dir, inotify=fake_inotify)
- self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory',
- DropUploader, client, 'URI:LIT:foo', errors_dir, inotify=fake_inotify)
- self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory',
- DropUploader, client, readonly_dircap, errors_dir, inotify=fake_inotify)
- d.addCallback(_made_upload_dir)
- return d
-
- def test_drop_upload(self):
- self.inotify = fake_inotify
- self.basedir = "drop_upload.MockTest.test_drop_upload"
- return self._test()
-
- def notify_close_write(self, path):
- self.uploader._notifier.event(path, self.inotify.IN_CLOSE_WRITE)
-
-
-class RealTest(DropUploadTestMixin, unittest.TestCase):
- """This is skipped unless both Twisted and the platform support inotify."""
-
- def test_drop_upload(self):
- # We should always have runtime.platform.supportsINotify, because we're using
- # Twisted >= 10.1.
- if not runtime.platform.supportsINotify():
- raise unittest.SkipTest("Drop-upload support can only be tested for-real on an OS that supports inotify or equivalent.")
-
- self.inotify = None # use the appropriate inotify for the platform
- self.basedir = "drop_upload.RealTest.test_drop_upload"
- return self._test()
-
- def notify_close_write(self, path):
- # Writing to the file causes the notification.
- pass
--- /dev/null
+
+import os, sys
+
+from twisted.trial import unittest
+from twisted.internet import defer, task
+
+from allmydata.interfaces import IDirectoryNode
+from allmydata.util.assertutil import precondition
+
+from allmydata.util import fake_inotify, fileutil
+from allmydata.util.encodingutil import get_filesystem_encoding, to_filepath
+from allmydata.util.consumer import download_to_data
+from allmydata.test.no_network import GridTestMixin
+from allmydata.test.common_util import ReallyEqualMixin, NonASCIIPathMixin
+from allmydata.test.common import ShouldFailMixin
+from .test_cli_magic_folder import MagicFolderCLITestMixin
+
+from allmydata.frontends import magic_folder
+from allmydata.frontends.magic_folder import MagicFolder, Downloader, WriteFileMixin
+from allmydata import magicfolderdb, magicpath
+from allmydata.util.fileutil import abspath_expanduser_unicode
+from allmydata.immutable.upload import Data
+
+
+class MagicFolderTestMixin(MagicFolderCLITestMixin, ShouldFailMixin, ReallyEqualMixin, NonASCIIPathMixin):
+ """
+ These tests will be run both with a mock notifier, and (on platforms that support it)
+ with the real INotify.
+ """
+
+ def setUp(self):
+ GridTestMixin.setUp(self)
+ temp = self.mktemp()
+ self.basedir = abspath_expanduser_unicode(temp.decode(get_filesystem_encoding()))
+ self.magicfolder = None
+ self.patch(Downloader, 'REMOTE_SCAN_INTERVAL', 0)
+
+ def _get_count(self, name, client=None):
+ counters = (client or self.get_client()).stats_provider.get_stats()["counters"]
+ return counters.get('magic_folder.%s' % (name,), 0)
+
+ def _createdb(self):
+ dbfile = abspath_expanduser_unicode(u"magicfolderdb.sqlite", base=self.basedir)
+ mdb = magicfolderdb.get_magicfolderdb(dbfile, create_version=(magicfolderdb.SCHEMA_v1, 1))
+ self.failUnless(mdb, "unable to create magicfolderdb from %r" % (dbfile,))
+ self.failUnlessEqual(mdb.VERSION, 1)
+ return mdb
+
+ def _restart_client(self, ign):
+ #print "_restart_client"
+ d = self.restart_client()
+ d.addCallback(self._wait_until_started)
+ return d
+
+ def _wait_until_started(self, ign):
+ #print "_wait_until_started"
+ self.magicfolder = self.get_client().getServiceNamed('magic-folder')
+ return self.magicfolder.ready()
+
+ def test_db_basic(self):
+ fileutil.make_dirs(self.basedir)
+ self._createdb()
+
+ def test_db_persistence(self):
+ """Test that a file upload creates an entry in the database."""
+
+ fileutil.make_dirs(self.basedir)
+ db = self._createdb()
+
+ relpath1 = u"myFile1"
+ pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
+ exists=True, size=1, mtime=123, ctime=456)
+ db.did_upload_version(relpath1, 0, 'URI:LIT:1', 'URI:LIT:0', 0, pathinfo)
+
+ c = db.cursor
+ c.execute("SELECT size, mtime, ctime"
+ " FROM local_files"
+ " WHERE path=?",
+ (relpath1,))
+ row = c.fetchone()
+ self.failUnlessEqual(row, (pathinfo.size, pathinfo.mtime, pathinfo.ctime))
+
+ # Second test uses magic_folder.is_new_file instead of SQL query directly
+ # to confirm the previous upload entry in the db.
+ relpath2 = u"myFile2"
+ path2 = os.path.join(self.basedir, relpath2)
+ fileutil.write(path2, "meow\n")
+ pathinfo = fileutil.get_pathinfo(path2)
+ db.did_upload_version(relpath2, 0, 'URI:LIT:2', 'URI:LIT:1', 0, pathinfo)
+ db_entry = db.get_db_entry(relpath2)
+ self.failUnlessFalse(magic_folder.is_new_file(pathinfo, db_entry))
+
+ different_pathinfo = fileutil.PathInfo(isdir=False, isfile=True, islink=False,
+ exists=True, size=0, mtime=pathinfo.mtime, ctime=pathinfo.ctime)
+ self.failUnlessTrue(magic_folder.is_new_file(different_pathinfo, db_entry))
+
+ def test_magicfolder_start_service(self):
+ self.set_up_grid()
+
+ self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
+ base=self.basedir)
+ self.mkdir_nonascii(self.local_dir)
+
+ d = defer.succeed(None)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
+
+ d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
+ d.addCallback(self._restart_client)
+
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 1))
+ d.addBoth(self.cleanup)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.dirs_monitored'), 0))
+ return d
+
+ def test_scan_once_on_startup(self):
+ self.set_up_grid()
+ self.local_dir = abspath_expanduser_unicode(u"test_scan_once_on_startup", base=self.basedir)
+ self.mkdir_nonascii(self.local_dir)
+ self.collective_dircap = ""
+
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ d = self.setup_alice_and_bob(alice_clock, bob_clock)
+
+ def upload_stuff(ignore):
+ uploadable = Data("", self.alice_magicfolder._client.convergence)
+ return self.alice_magicfolder._client.upload(uploadable)
+ d.addCallback(upload_stuff)
+ def check_is_upload(ignore):
+ alice_clock.advance(99)
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 0, magic=self.alice_magicfolder))
+
+ d.addCallback(check_is_upload)
+ def _cleanup(ign, magicfolder, clock):
+ if magicfolder is not None:
+ d2 = magicfolder.finish()
+ clock.advance(0)
+ return d2
+ def cleanup_Alice_and_Bob(result):
+ print "cleanup alice bob test\n"
+ d = defer.succeed(None)
+ d.addCallback(_cleanup, self.alice_magicfolder, alice_clock)
+ d.addCallback(_cleanup, self.bob_magicfolder, bob_clock)
+ d.addCallback(lambda ign: result)
+ return d
+
+ d.addBoth(cleanup_Alice_and_Bob)
+ return d
+
+ def test_move_tree(self):
+ self.set_up_grid()
+
+ self.local_dir = abspath_expanduser_unicode(self.unicode_or_fallback(u"l\u00F8cal_dir", u"local_dir"),
+ base=self.basedir)
+ self.mkdir_nonascii(self.local_dir)
+
+ empty_tree_name = self.unicode_or_fallback(u"empty_tr\u00EAe", u"empty_tree")
+ empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.basedir)
+ new_empty_tree_dir = abspath_expanduser_unicode(empty_tree_name, base=self.local_dir)
+
+ small_tree_name = self.unicode_or_fallback(u"small_tr\u00EAe", u"empty_tree")
+ small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.basedir)
+ new_small_tree_dir = abspath_expanduser_unicode(small_tree_name, base=self.local_dir)
+
+ d = self.create_invite_join_magic_folder(u"Alice", self.local_dir)
+ d.addCallback(self._restart_client)
+
+ def _check_move_empty_tree(res):
+ print "_check_move_empty_tree"
+ uploaded_d = self.magicfolder.uploader.set_hook('processed')
+ self.mkdir_nonascii(empty_tree_dir)
+ os.rename(empty_tree_dir, new_empty_tree_dir)
+ self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_TO)
+
+ return uploaded_d
+ d.addCallback(_check_move_empty_tree)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 1))
+
+ def _check_move_small_tree(res):
+ print "_check_move_small_tree"
+ uploaded_d = self.magicfolder.uploader.set_hook('processed', ignore_count=1)
+ self.mkdir_nonascii(small_tree_dir)
+ what_path = abspath_expanduser_unicode(u"what", base=small_tree_dir)
+ fileutil.write(what_path, "say when")
+ os.rename(small_tree_dir, new_small_tree_dir)
+ self.notify(to_filepath(new_small_tree_dir), self.inotify.IN_MOVED_TO)
+
+ return uploaded_d
+ d.addCallback(_check_move_small_tree)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 3))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 1))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
+
+ def _check_moved_tree_is_watched(res):
+ print "_check_moved_tree_is_watched"
+ uploaded_d = self.magicfolder.uploader.set_hook('processed')
+ another_path = abspath_expanduser_unicode(u"another", base=new_small_tree_dir)
+ fileutil.write(another_path, "file")
+ self.notify(to_filepath(another_path), self.inotify.IN_CLOSE_WRITE)
+
+ return uploaded_d
+ d.addCallback(_check_moved_tree_is_watched)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
+
+ # Files that are moved out of the upload directory should no longer be watched.
+ #def _move_dir_away(ign):
+ # os.rename(new_empty_tree_dir, empty_tree_dir)
+ # # Wuh? Why don't we get this event for the real test?
+ # #self.notify(to_filepath(new_empty_tree_dir), self.inotify.IN_MOVED_FROM)
+ #d.addCallback(_move_dir_away)
+ #def create_file(val):
+ # test_file = abspath_expanduser_unicode(u"what", base=empty_tree_dir)
+ # fileutil.write(test_file, "meow")
+ # #self.notify(...)
+ # return
+ #d.addCallback(create_file)
+ #d.addCallback(lambda ign: time.sleep(1)) # XXX ICK
+ #d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ #d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 4))
+ #d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.files_uploaded'), 2))
+ #d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ #d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.directories_created'), 2))
+
+ d.addBoth(self.cleanup)
+ return d
+
+ def test_persistence(self):
+ """
+ Perform an upload of a given file and then stop the client.
+ Start a new client and magic-folder service... and verify that the file is NOT uploaded
+ a second time. This test is meant to test the database persistence along with
+ the startup and shutdown code paths of the magic-folder service.
+ """
+ self.set_up_grid()
+ self.local_dir = abspath_expanduser_unicode(u"test_persistence", base=self.basedir)
+ self.mkdir_nonascii(self.local_dir)
+ self.collective_dircap = ""
+
+ d = defer.succeed(None)
+ d.addCallback(lambda ign: self.create_invite_join_magic_folder(u"Alice", self.local_dir))
+ d.addCallback(self._restart_client)
+
+ def create_test_file(filename):
+ d2 = self.magicfolder.uploader.set_hook('processed')
+ test_file = abspath_expanduser_unicode(filename, base=self.local_dir)
+ fileutil.write(test_file, "meow %s" % filename)
+ self.notify(to_filepath(test_file), self.inotify.IN_CLOSE_WRITE)
+ return d2
+ d.addCallback(lambda ign: create_test_file(u"what1"))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addCallback(self.cleanup)
+
+ d.addCallback(self._restart_client)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 1))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addCallback(lambda ign: create_test_file(u"what2"))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'), 2))
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ d.addBoth(self.cleanup)
+ return d
+
+ @defer.inlineCallbacks
+ def test_delete(self):
+ self.set_up_grid()
+ self.local_dir = os.path.join(self.basedir, u"local_dir")
+ self.mkdir_nonascii(self.local_dir)
+
+ yield self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
+ yield self._restart_client(None)
+
+ try:
+ # create a file
+ up_proc = self.magicfolder.uploader.set_hook('processed')
+ # down_proc = self.magicfolder.downloader.set_hook('processed')
+ path = os.path.join(self.local_dir, u'foo')
+ fileutil.write(path, 'foo\n')
+ self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
+ yield up_proc
+ self.assertTrue(os.path.exists(path))
+
+ # the real test part: delete the file
+ up_proc = self.magicfolder.uploader.set_hook('processed')
+ os.unlink(path)
+ self.notify(to_filepath(path), self.inotify.IN_DELETE)
+ yield up_proc
+ self.assertFalse(os.path.exists(path))
+
+ # ensure we still have a DB entry, and that the version is 1
+ node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
+ self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,))
+ self.failUnlessEqual(metadata['version'], 1)
+
+ finally:
+ yield self.cleanup(None)
+
+ @defer.inlineCallbacks
+ def test_delete_and_restore(self):
+ self.set_up_grid()
+ self.local_dir = os.path.join(self.basedir, u"local_dir")
+ self.mkdir_nonascii(self.local_dir)
+
+ yield self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
+ yield self._restart_client(None)
+
+ try:
+ # create a file
+ up_proc = self.magicfolder.uploader.set_hook('processed')
+ # down_proc = self.magicfolder.downloader.set_hook('processed')
+ path = os.path.join(self.local_dir, u'foo')
+ fileutil.write(path, 'foo\n')
+ self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
+ yield up_proc
+ self.assertTrue(os.path.exists(path))
+
+ # delete the file
+ up_proc = self.magicfolder.uploader.set_hook('processed')
+ os.unlink(path)
+ self.notify(to_filepath(path), self.inotify.IN_DELETE)
+ yield up_proc
+ self.assertFalse(os.path.exists(path))
+
+ # ensure we still have a DB entry, and that the version is 1
+ node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
+ self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,))
+ self.failUnlessEqual(metadata['version'], 1)
+
+ # restore the file, with different contents
+ up_proc = self.magicfolder.uploader.set_hook('processed')
+ path = os.path.join(self.local_dir, u'foo')
+ fileutil.write(path, 'bar\n')
+ self.notify(to_filepath(path), self.inotify.IN_CLOSE_WRITE)
+ yield up_proc
+
+ # ensure we still have a DB entry, and that the version is 2
+ node, metadata = yield self.magicfolder.downloader._get_collective_latest_file(u'foo')
+ self.assertTrue(node is not None, "Failed to find %r in DMD" % (path,))
+ self.failUnlessEqual(metadata['version'], 2)
+
+ finally:
+ yield self.cleanup(None)
+
+ @defer.inlineCallbacks
+ def test_alice_delete_bob_restore(self):
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ yield self.setup_alice_and_bob(alice_clock, bob_clock)
+ alice_dir = self.alice_magicfolder.uploader._local_path_u
+ bob_dir = self.bob_magicfolder.uploader._local_path_u
+ alice_fname = os.path.join(alice_dir, 'blam')
+ bob_fname = os.path.join(bob_dir, 'blam')
+
+ try:
+ # alice creates a file, bob downloads it
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+
+ fileutil.write(alice_fname, 'contents0\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc # alice uploads
+
+ bob_clock.advance(0)
+ yield bob_proc # bob downloads
+
+ # check the state
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
+ 0
+ )
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
+ 1
+ )
+
+ print("BOB DELETE")
+ # now bob deletes it (bob should upload, alice download)
+ bob_proc = self.bob_magicfolder.uploader.set_hook('processed')
+ alice_proc = self.alice_magicfolder.downloader.set_hook('processed')
+ os.unlink(bob_fname)
+ self.notify(to_filepath(bob_fname), self.inotify.IN_DELETE, magic=self.bob_magicfolder)
+
+ bob_clock.advance(0)
+ yield bob_proc
+ alice_clock.advance(0)
+ yield alice_proc
+
+ # check versions
+ node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam')
+ self.assertTrue(metadata['deleted'])
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
+
+ print("ALICE RESTORE")
+ # now alice restores it (alice should upload, bob download)
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+ fileutil.write(alice_fname, 'new contents\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc
+ bob_clock.advance(0)
+ yield bob_proc
+
+ # check versions
+ node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam')
+ self.assertTrue('deleted' not in metadata or not metadata['deleted'])
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2)
+
+ finally:
+ # cleanup
+ d0 = self.alice_magicfolder.finish()
+ alice_clock.advance(0)
+ yield d0
+
+ d1 = self.bob_magicfolder.finish()
+ bob_clock.advance(0)
+ yield d1
+
+ @defer.inlineCallbacks
+ def test_alice_sees_bobs_delete_with_error(self):
+ # alice creates a file, bob deletes it -- and we also arrange
+ # for Alice's file to have "gone missing" as well.
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ yield self.setup_alice_and_bob(alice_clock, bob_clock)
+ alice_dir = self.alice_magicfolder.uploader._local_path_u
+ bob_dir = self.bob_magicfolder.uploader._local_path_u
+ alice_fname = os.path.join(alice_dir, 'blam')
+ bob_fname = os.path.join(bob_dir, 'blam')
+
+ try:
+ # alice creates a file, bob downloads it
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+
+ fileutil.write(alice_fname, 'contents0\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc # alice uploads
+
+ bob_clock.advance(0)
+ yield bob_proc # bob downloads
+
+ # check the state
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
+ 0
+ )
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
+ 1
+ )
+
+ # now bob deletes it (bob should upload, alice download)
+ bob_proc = self.bob_magicfolder.uploader.set_hook('processed')
+ alice_proc = self.alice_magicfolder.downloader.set_hook('processed')
+ os.unlink(bob_fname)
+ self.notify(to_filepath(bob_fname), self.inotify.IN_DELETE, magic=self.bob_magicfolder)
+ # just after notifying bob, we also delete alice's,
+ # covering the 'except' flow in _rename_deleted_file()
+ os.unlink(alice_fname)
+
+ bob_clock.advance(0)
+ yield bob_proc
+ alice_clock.advance(0)
+ yield alice_proc
+
+ # check versions
+ node, metadata = yield self.alice_magicfolder.downloader._get_collective_latest_file(u'blam')
+ self.assertTrue(metadata['deleted'])
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
+
+ finally:
+ # cleanup
+ d0 = self.alice_magicfolder.finish()
+ alice_clock.advance(0)
+ yield d0
+
+ d1 = self.bob_magicfolder.finish()
+ bob_clock.advance(0)
+ yield d1
+
+ @defer.inlineCallbacks
+ def test_alice_create_bob_update(self):
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ yield self.setup_alice_and_bob(alice_clock, bob_clock)
+ alice_dir = self.alice_magicfolder.uploader._local_path_u
+ bob_dir = self.bob_magicfolder.uploader._local_path_u
+ alice_fname = os.path.join(alice_dir, 'blam')
+ bob_fname = os.path.join(bob_dir, 'blam')
+
+ try:
+ # alice creates a file, bob downloads it
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+
+ fileutil.write(alice_fname, 'contents0\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc # alice uploads
+
+ bob_clock.advance(0)
+ yield bob_proc # bob downloads
+
+ # check the state
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
+ 0
+ )
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
+ 1
+ )
+
+ # now bob updates it (bob should upload, alice download)
+ bob_proc = self.bob_magicfolder.uploader.set_hook('processed')
+ alice_proc = self.alice_magicfolder.downloader.set_hook('processed')
+ fileutil.write(bob_fname, 'bob wuz here\n')
+ self.notify(to_filepath(bob_fname), self.inotify.IN_CLOSE_WRITE, magic=self.bob_magicfolder)
+
+ bob_clock.advance(0)
+ yield bob_proc
+ alice_clock.advance(0)
+ yield alice_proc
+
+ # check the state
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
+
+ finally:
+ # cleanup
+ d0 = self.alice_magicfolder.finish()
+ alice_clock.advance(0)
+ yield d0
+
+ d1 = self.bob_magicfolder.finish()
+ bob_clock.advance(0)
+ yield d1
+
+ @defer.inlineCallbacks
+ def test_alice_delete_and_restore(self):
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ yield self.setup_alice_and_bob(alice_clock, bob_clock)
+ alice_dir = self.alice_magicfolder.uploader._local_path_u
+ bob_dir = self.bob_magicfolder.uploader._local_path_u
+ alice_fname = os.path.join(alice_dir, 'blam')
+ bob_fname = os.path.join(bob_dir, 'blam')
+
+ try:
+ # alice creates a file, bob downloads it
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+
+ fileutil.write(alice_fname, 'contents0\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc # alice uploads
+
+ bob_clock.advance(0)
+ yield bob_proc # bob downloads
+
+ # check the state
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 0)
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 0)
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_failed', client=self.bob_magicfolder._client),
+ 0
+ )
+ yield self.failUnlessReallyEqual(
+ self._get_count('downloader.objects_downloaded', client=self.bob_magicfolder._client),
+ 1
+ )
+ self.failUnless(os.path.exists(bob_fname))
+
+ # now alice deletes it (alice should upload, bob download)
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+ os.unlink(alice_fname)
+ self.notify(to_filepath(alice_fname), self.inotify.IN_DELETE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc
+ bob_clock.advance(0)
+ yield bob_proc
+
+ # check the state
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 1)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 1)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 1)
+ self.failIf(os.path.exists(bob_fname))
+
+ # now alice restores the file (with new contents)
+ alice_proc = self.alice_magicfolder.uploader.set_hook('processed')
+ bob_proc = self.bob_magicfolder.downloader.set_hook('processed')
+ fileutil.write(alice_fname, 'alice wuz here\n')
+ self.notify(to_filepath(alice_fname), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+
+ alice_clock.advance(0)
+ yield alice_proc
+ bob_clock.advance(0)
+ yield bob_proc
+
+ # check the state
+ yield self._check_version_in_dmd(self.bob_magicfolder, u"blam", 2)
+ yield self._check_version_in_local_db(self.bob_magicfolder, u"blam", 2)
+ yield self._check_version_in_dmd(self.alice_magicfolder, u"blam", 2)
+ yield self._check_version_in_local_db(self.alice_magicfolder, u"blam", 2)
+ self.failUnless(os.path.exists(bob_fname))
+
+ finally:
+ # cleanup
+ d0 = self.alice_magicfolder.finish()
+ alice_clock.advance(0)
+ yield d0
+
+ d1 = self.bob_magicfolder.finish()
+ bob_clock.advance(0)
+ yield d1
+
+ def test_magic_folder(self):
+ self.set_up_grid()
+ self.local_dir = os.path.join(self.basedir, self.unicode_or_fallback(u"loc\u0101l_dir", u"local_dir"))
+ self.mkdir_nonascii(self.local_dir)
+
+ d = self.create_invite_join_magic_folder(u"Alice\u0101", self.local_dir)
+ d.addCallback(self._restart_client)
+
+ # Write something short enough for a LIT file.
+ d.addCallback(lambda ign: self._check_file(u"short", "test"))
+
+ # Write to the same file again with different data.
+ d.addCallback(lambda ign: self._check_file(u"short", "different"))
+
+ # Test that temporary files are not uploaded.
+ d.addCallback(lambda ign: self._check_file(u"tempfile", "test", temporary=True))
+
+ # Test creation of a subdirectory.
+ d.addCallback(lambda ign: self._check_mkdir(u"directory"))
+
+ # Write something longer, and also try to test a Unicode name if the fs can represent it.
+ name_u = self.unicode_or_fallback(u"l\u00F8ng", u"long")
+ d.addCallback(lambda ign: self._check_file(name_u, "test"*100))
+
+ # TODO: test that causes an upload failure.
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+
+ d.addBoth(self.cleanup)
+ return d
+
+ def _check_mkdir(self, name_u):
+ return self._check_file(name_u + u"/", "", directory=True)
+
+ def _check_file(self, name_u, data, temporary=False, directory=False):
+ precondition(not (temporary and directory), temporary=temporary, directory=directory)
+
+ print "%r._check_file(%r, %r, temporary=%r, directory=%r)" % (self, name_u, data, temporary, directory)
+ previously_uploaded = self._get_count('uploader.objects_succeeded')
+ previously_disappeared = self._get_count('uploader.objects_disappeared')
+
+ d = self.magicfolder.uploader.set_hook('processed')
+
+ path_u = abspath_expanduser_unicode(name_u, base=self.local_dir)
+ path = to_filepath(path_u)
+
+ if directory:
+ os.mkdir(path_u)
+ event_mask = self.inotify.IN_CREATE | self.inotify.IN_ISDIR
+ else:
+ # We don't use FilePath.setContent() here because it creates a temporary file that
+ # is renamed into place, which causes events that the test is not expecting.
+ f = open(path_u, "wb")
+ try:
+ if temporary and sys.platform != "win32":
+ os.unlink(path_u)
+ f.write(data)
+ finally:
+ f.close()
+ if temporary and sys.platform == "win32":
+ os.unlink(path_u)
+ self.notify(path, self.inotify.IN_DELETE, flush=False)
+ event_mask = self.inotify.IN_CLOSE_WRITE
+
+ self.notify(path, event_mask)
+ encoded_name_u = magicpath.path2magic(name_u)
+
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_failed'), 0))
+ if temporary:
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_disappeared'),
+ previously_disappeared + 1))
+ else:
+ def _here(res, n):
+ print "here %r %r" % (n, res)
+ return res
+ d.addBoth(_here, 1)
+ d.addCallback(lambda ign: self.upload_dirnode.list())
+ d.addBoth(_here, 1.5)
+ d.addCallback(lambda ign: self.upload_dirnode.get(encoded_name_u))
+ d.addBoth(_here, 2)
+ d.addCallback(download_to_data)
+ d.addBoth(_here, 3)
+ d.addCallback(lambda actual_data: self.failUnlessReallyEqual(actual_data, data))
+ d.addBoth(_here, 4)
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_succeeded'),
+ previously_uploaded + 1))
+
+ d.addCallback(lambda ign: self.failUnlessReallyEqual(self._get_count('uploader.objects_queued'), 0))
+ return d
+
+ def _check_version_in_dmd(self, magicfolder, relpath_u, expected_version):
+ encoded_name_u = magicpath.path2magic(relpath_u)
+ d = magicfolder.downloader._get_collective_latest_file(encoded_name_u)
+ def check_latest(result):
+ if result[0] is not None:
+ node, metadata = result
+ d.addCallback(lambda ign: self.failUnlessEqual(metadata['version'], expected_version))
+ d.addCallback(check_latest)
+ return d
+
+ def _check_version_in_local_db(self, magicfolder, relpath_u, expected_version):
+ db_entry = magicfolder._db.get_db_entry(relpath_u)
+ if db_entry is not None:
+ #print "_check_version_in_local_db: %r has version %s" % (relpath_u, version)
+ self.failUnlessEqual(db_entry.version, expected_version)
+
+ def _check_file_gone(self, magicfolder, relpath_u):
+ path = os.path.join(magicfolder.uploader._local_path_u, relpath_u)
+ self.assertTrue(not os.path.exists(path))
+
+ def _check_uploader_count(self, name, expected, magic=None):
+ self.failUnlessReallyEqual(self._get_count('uploader.'+name, client=(magic or self.alice_magicfolder)._client),
+ expected)
+
+ def _check_downloader_count(self, name, expected, magic=None):
+ self.failUnlessReallyEqual(self._get_count('downloader.'+name, client=(magic or self.bob_magicfolder)._client),
+ expected)
+
+ def test_alice_bob(self):
+ alice_clock = task.Clock()
+ bob_clock = task.Clock()
+ d = self.setup_alice_and_bob(alice_clock, bob_clock)
+
+ def _wait_for_Alice(ign, downloaded_d):
+ print "Now waiting for Alice to download\n"
+ alice_clock.advance(0)
+ return downloaded_d
+
+ def _wait_for_Bob(ign, downloaded_d):
+ print "Now waiting for Bob to download\n"
+ bob_clock.advance(0)
+ return downloaded_d
+
+ def _wait_for(ign, something_to_do, alice=True):
+ if alice:
+ downloaded_d = self.bob_magicfolder.downloader.set_hook('processed')
+ uploaded_d = self.alice_magicfolder.uploader.set_hook('processed')
+ else:
+ downloaded_d = self.alice_magicfolder.downloader.set_hook('processed')
+ uploaded_d = self.bob_magicfolder.uploader.set_hook('processed')
+ something_to_do()
+ if alice:
+ print "Waiting for Alice to upload\n"
+ alice_clock.advance(0)
+ uploaded_d.addCallback(_wait_for_Bob, downloaded_d)
+ else:
+ print "Waiting for Bob to upload\n"
+ bob_clock.advance(0)
+ uploaded_d.addCallback(_wait_for_Alice, downloaded_d)
+ return uploaded_d
+
+ def Alice_to_write_a_file():
+ print "Alice writes a file\n"
+ self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
+ fileutil.write(self.file_path, "meow, meow meow. meow? meow meow! meow.")
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+ d.addCallback(_wait_for, Alice_to_write_a_file)
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 0))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 1))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 1))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_conflicted', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_conflicted', 0, magic=self.bob_magicfolder))
+
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 1))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 0, magic=self.bob_magicfolder))
+
+ def Alice_to_delete_file():
+ print "Alice deletes the file!\n"
+ os.unlink(self.file_path)
+ self.notify(to_filepath(self.file_path), self.inotify.IN_DELETE, magic=self.alice_magicfolder)
+ d.addCallback(_wait_for, Alice_to_delete_file)
+
+ def notify_bob_moved(ign):
+ d0 = self.bob_magicfolder.uploader.set_hook('processed')
+ p = abspath_expanduser_unicode(u"file1", base=self.bob_magicfolder.uploader._local_path_u)
+ self.notify(to_filepath(p), self.inotify.IN_MOVED_FROM, magic=self.bob_magicfolder, flush=False)
+ self.notify(to_filepath(p + u'.backup'), self.inotify.IN_MOVED_TO, magic=self.bob_magicfolder)
+ bob_clock.advance(0)
+ return d0
+ d.addCallback(notify_bob_moved)
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 1))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 1))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 2))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_not_uploaded', 1, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 1, magic=self.bob_magicfolder))
+
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 1))
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 1))
+ d.addCallback(lambda ign: self._check_file_gone(self.bob_magicfolder, u"file1"))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2))
+
+ def Alice_to_rewrite_file():
+ print "Alice rewrites file\n"
+ self.file_path = abspath_expanduser_unicode(u"file1", base=self.alice_magicfolder.uploader._local_path_u)
+ fileutil.write(self.file_path, "Alice suddenly sees the white rabbit running into the forest.")
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+ d.addCallback(_wait_for, Alice_to_rewrite_file)
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 2))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 2))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 3))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 3))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0))
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 2))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 2))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 3))
+
+ path_u = u"/tmp/magic_folder_test"
+ encoded_path_u = magicpath.path2magic(u"/tmp/magic_folder_test")
+
+ def Alice_tries_to_p0wn_Bob(ign):
+ print "Alice tries to p0wn Bob\n"
+ processed_d = self.bob_magicfolder.downloader.set_hook('processed')
+
+ # upload a file that would provoke the security bug from #2506
+ uploadable = Data("", self.alice_magicfolder._client.convergence)
+ alice_dmd = self.alice_magicfolder.uploader._upload_dirnode
+
+ d2 = alice_dmd.add_file(encoded_path_u, uploadable, metadata={"version": 0}, overwrite=True)
+ d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u)))
+ d2.addCallback(_wait_for_Bob, processed_d)
+ return d2
+ d.addCallback(Alice_tries_to_p0wn_Bob)
+
+ d.addCallback(lambda ign: self.failIf(os.path.exists(path_u)))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, encoded_path_u, None))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 3))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0))
+
+ def Bob_to_rewrite_file():
+ print "Bob rewrites file\n"
+ self.file_path = abspath_expanduser_unicode(u"file1", base=self.bob_magicfolder.uploader._local_path_u)
+ print "---- bob's file is %r" % (self.file_path,)
+ fileutil.write(self.file_path, "No white rabbit to be found.")
+ self.magicfolder = self.bob_magicfolder
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
+ d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file, alice=False))
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file1", 3))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.bob_magicfolder, u"file1", 3))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 2, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 1, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0))
+
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file1", 3))
+ d.addCallback(lambda ign: self._check_version_in_local_db(self.alice_magicfolder, u"file1", 3))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 1, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+
+ def Alice_conflicts_with_Bobs_last_downloaded_uri():
+ print "Alice conflicts with Bob\n"
+ downloaded_d = self.bob_magicfolder.downloader.set_hook('processed')
+ uploadable = Data("do not follow the white rabbit", self.alice_magicfolder._client.convergence)
+ alice_dmd = self.alice_magicfolder.uploader._upload_dirnode
+ d2 = alice_dmd.add_file(u"file1", uploadable,
+ metadata={"version": 5,
+ "last_downloaded_uri" : "URI:LIT:" },
+ overwrite=True)
+ print "Waiting for Alice to upload\n"
+ d2.addCallback(lambda ign: bob_clock.advance(6))
+ d2.addCallback(lambda ign: downloaded_d)
+ d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u)))
+ return d2
+
+ d.addCallback(lambda ign: Alice_conflicts_with_Bobs_last_downloaded_uri())
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 4))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 1))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 1, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+
+ # prepare to perform another conflict test
+ def Alice_to_write_file2():
+ print "Alice writes a file\n"
+ self.file_path = abspath_expanduser_unicode(u"file2", base=self.alice_magicfolder.uploader._local_path_u)
+ fileutil.write(self.file_path, "something")
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+ d.addCallback(_wait_for, Alice_to_write_file2)
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file2", 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+
+ def Bob_to_rewrite_file2():
+ print "Bob rewrites file\n"
+ self.file_path = abspath_expanduser_unicode(u"file2", base=self.bob_magicfolder.uploader._local_path_u)
+ print "---- bob's file is %r" % (self.file_path,)
+ fileutil.write(self.file_path, "roger roger. what vector?")
+ self.magicfolder = self.bob_magicfolder
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
+ d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file2, alice=False))
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file2", 1))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 5))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 1))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 3, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder))
+
+ # XXX here we advance the clock and then test again to make sure no values are monotonically increasing
+ # with each queue turn ;-p
+ alice_clock.advance(6)
+ bob_clock.advance(6)
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file2", 1))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 5))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 1))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 3, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder))
+
+ def Alice_conflicts_with_Bobs_last_uploaded_uri():
+ print "Alice conflicts with Bob\n"
+ encoded_path_u = magicpath.path2magic(u"file2")
+ downloaded_d = self.bob_magicfolder.downloader.set_hook('processed')
+ uploadable = Data("rabbits with sharp fangs", self.alice_magicfolder._client.convergence)
+ alice_dmd = self.alice_magicfolder.uploader._upload_dirnode
+ d2 = alice_dmd.add_file(u"file2", uploadable,
+ metadata={"version": 5,
+ "last_uploaded_uri" : "URI:LIT:" },
+ overwrite=True)
+ print "Waiting for Alice to upload\n"
+ d2.addCallback(lambda ign: bob_clock.advance(6))
+ d2.addCallback(lambda ign: downloaded_d)
+ d2.addCallback(lambda ign: self.failUnless(alice_dmd.has_child(encoded_path_u)))
+ return d2
+ d.addCallback(lambda ign: Alice_conflicts_with_Bobs_last_uploaded_uri())
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file2", 5))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 6))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 2))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 3, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 2, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder))
+
+ alice_clock.advance(6)
+ bob_clock.advance(6)
+ alice_clock.advance(6)
+ bob_clock.advance(6)
+
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 2))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 6))
+
+ # prepare to perform another conflict test
+ def Alice_to_write_file3():
+ print "Alice writes a file\n"
+ self.file_path = abspath_expanduser_unicode(u"file3", base=self.alice_magicfolder.uploader._local_path_u)
+ fileutil.write(self.file_path, "something")
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE, magic=self.alice_magicfolder)
+ d.addCallback(_wait_for, Alice_to_write_file3)
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.alice_magicfolder, u"file3", 0))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 7))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 2, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 2))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+
+ def Bob_to_rewrite_file3():
+ print "Bob rewrites file\n"
+ self.file_path = abspath_expanduser_unicode(u"file3", base=self.bob_magicfolder.uploader._local_path_u)
+ print "---- bob's file is %r" % (self.file_path,)
+ fileutil.write(self.file_path, "roger roger")
+ self.magicfolder = self.bob_magicfolder
+ self.notify(to_filepath(self.file_path), self.inotify.IN_CLOSE_WRITE)
+ d.addCallback(lambda ign: _wait_for(None, Bob_to_rewrite_file3, alice=False))
+ d.addCallback(lambda ign: self._check_version_in_dmd(self.bob_magicfolder, u"file3", 1))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 7))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 2))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_failed', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_succeeded', 4, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('files_uploaded', 3, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('objects_queued', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_uploader_count('directories_created', 0, magic=self.bob_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_conflicted', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_failed', 0, magic=self.alice_magicfolder))
+ d.addCallback(lambda ign: self._check_downloader_count('objects_downloaded', 3, magic=self.alice_magicfolder))
+
+
+
+ def _cleanup(ign, magicfolder, clock):
+ if magicfolder is not None:
+ d2 = magicfolder.finish()
+ clock.advance(0)
+ return d2
+
+ def cleanup_Alice_and_Bob(result):
+ print "cleanup alice bob test\n"
+ d = defer.succeed(None)
+ d.addCallback(_cleanup, self.alice_magicfolder, alice_clock)
+ d.addCallback(_cleanup, self.bob_magicfolder, bob_clock)
+ d.addCallback(lambda ign: result)
+ return d
+ d.addBoth(cleanup_Alice_and_Bob)
+ return d
+
+
+class MockTest(MagicFolderTestMixin, unittest.TestCase):
+ """This can run on any platform, and even if twisted.internet.inotify can't be imported."""
+
+ def setUp(self):
+ MagicFolderTestMixin.setUp(self)
+ self.inotify = fake_inotify
+ self.patch(magic_folder, 'get_inotify_module', lambda: self.inotify)
+
+ def notify(self, path, mask, magic=None, flush=True):
+ if magic is None:
+ magic = self.magicfolder
+ magic.uploader._notifier.event(path, mask)
+ # no flush for the mock test.
+
+ def test_errors(self):
+ self.set_up_grid()
+
+ errors_dir = abspath_expanduser_unicode(u"errors_dir", base=self.basedir)
+ os.mkdir(errors_dir)
+ not_a_dir = abspath_expanduser_unicode(u"NOT_A_DIR", base=self.basedir)
+ fileutil.write(not_a_dir, "")
+ magicfolderdb = abspath_expanduser_unicode(u"magicfolderdb", base=self.basedir)
+ doesnotexist = abspath_expanduser_unicode(u"doesnotexist", base=self.basedir)
+
+ client = self.g.clients[0]
+ d = client.create_dirnode()
+ def _check_errors(n):
+ self.failUnless(IDirectoryNode.providedBy(n))
+ upload_dircap = n.get_uri()
+ readonly_dircap = n.get_readonly_uri()
+
+ self.shouldFail(AssertionError, 'nonexistent local.directory', 'there is no directory',
+ MagicFolder, client, upload_dircap, '', doesnotexist, magicfolderdb, 0077)
+ self.shouldFail(AssertionError, 'non-directory local.directory', 'is not a directory',
+ MagicFolder, client, upload_dircap, '', not_a_dir, magicfolderdb, 0077)
+ self.shouldFail(AssertionError, 'bad upload.dircap', 'does not refer to a directory',
+ MagicFolder, client, 'bad', '', errors_dir, magicfolderdb, 0077)
+ self.shouldFail(AssertionError, 'non-directory upload.dircap', 'does not refer to a directory',
+ MagicFolder, client, 'URI:LIT:foo', '', errors_dir, magicfolderdb, 0077)
+ self.shouldFail(AssertionError, 'readonly upload.dircap', 'is not a writecap to a directory',
+ MagicFolder, client, readonly_dircap, '', errors_dir, magicfolderdb, 0077)
+ self.shouldFail(AssertionError, 'collective dircap', 'is not a readonly cap to a directory',
+ MagicFolder, client, upload_dircap, upload_dircap, errors_dir, magicfolderdb, 0077)
+
+ def _not_implemented():
+ raise NotImplementedError("blah")
+ self.patch(magic_folder, 'get_inotify_module', _not_implemented)
+ self.shouldFail(NotImplementedError, 'unsupported', 'blah',
+ MagicFolder, client, upload_dircap, '', errors_dir, magicfolderdb, 0077)
+ d.addCallback(_check_errors)
+ return d
+
+ def test_write_downloaded_file(self):
+ workdir = u"cli/MagicFolder/write-downloaded-file"
+ local_file = fileutil.abspath_expanduser_unicode(os.path.join(workdir, "foobar"))
+
+ class TestWriteFileMixin(WriteFileMixin):
+ def _log(self, msg):
+ pass
+
+ writefile = TestWriteFileMixin()
+ writefile._umask = 0077
+
+ # create a file with name "foobar" with content "foo"
+ # write downloaded file content "bar" into "foobar" with is_conflict = False
+ fileutil.make_dirs(workdir)
+ fileutil.write(local_file, "foo")
+
+ # if is_conflict is False, then the .conflict file shouldn't exist.
+ writefile._write_downloaded_file(local_file, "bar", False, None)
+ conflicted_path = local_file + u".conflict"
+ self.failIf(os.path.exists(conflicted_path))
+
+ # At this point, the backup file should exist with content "foo"
+ backup_path = local_file + u".backup"
+ self.failUnless(os.path.exists(backup_path))
+ self.failUnlessEqual(fileutil.read(backup_path), "foo")
+
+ # .tmp file shouldn't exist
+ self.failIf(os.path.exists(local_file + u".tmp"))
+
+ # .. and the original file should have the new content
+ self.failUnlessEqual(fileutil.read(local_file), "bar")
+
+ # now a test for conflicted case
+ writefile._write_downloaded_file(local_file, "bar", True, None)
+ self.failUnless(os.path.exists(conflicted_path))
+
+ # .tmp file shouldn't exist
+ self.failIf(os.path.exists(local_file + u".tmp"))
+
+
+class RealTest(MagicFolderTestMixin, unittest.TestCase):
+ """This is skipped unless both Twisted and the platform support inotify."""
+
+ def setUp(self):
+ MagicFolderTestMixin.setUp(self)
+ self.inotify = magic_folder.get_inotify_module()
+
+ def notify(self, path, mask, magic=None, flush=True):
+ # Writing to the filesystem causes the notification.
+ # However, flushing filesystem buffers may be necessary on Windows.
+ if flush:
+ fileutil.flush_volume(path.path)
+
+try:
+ magic_folder.get_inotify_module()
+except NotImplementedError:
+ RealTest.skip = "Magic Folder support can only be tested for-real on an OS that supports inotify or equivalent."
--- /dev/null
+
+from twisted.trial import unittest
+
+from allmydata import magicpath
+
+
+class MagicPath(unittest.TestCase):
+ tests = {
+ u"Documents/work/critical-project/qed.txt": u"Documents@_work@_critical-project@_qed.txt",
+ u"Documents/emails/bunnyfufu@hoppingforest.net": u"Documents@_emails@_bunnyfufu@@hoppingforest.net",
+ u"foo/@/bar": u"foo@_@@@_bar",
+ }
+
+ def test_path2magic(self):
+ for test, expected in self.tests.items():
+ self.failUnlessEqual(magicpath.path2magic(test), expected)
+
+ def test_magic2path(self):
+ for expected, test in self.tests.items():
+ self.failUnlessEqual(magicpath.magic2path(test), expected)
+
+ def test_should_ignore(self):
+ self.failUnlessEqual(magicpath.should_ignore_file(u".bashrc"), True)
+ self.failUnlessEqual(magicpath.should_ignore_file(u"bashrc."), False)
+ self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/.bashrc"), True)
+ self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/.branch/bashrc"), True)
+ self.failUnlessEqual(magicpath.should_ignore_file(u"forest/.tree/branch/bashrc"), True)
+ self.failUnlessEqual(magicpath.should_ignore_file(u"forest/tree/branch/bashrc"), False)
from twisted.internet import threads
from allmydata.util import fileutil, pollmixin
-from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, get_filesystem_encoding
+from allmydata.util.encodingutil import unicode_to_argv, unicode_to_output, \
+ get_filesystem_encoding
from allmydata.scripts import runner
from allmydata.client import Client
from allmydata.test import common_util
self.failUnless(re.search(r"\n\[storage\]\n#.*\nenabled = true\n", content), content)
self.failUnless("\nreserved_space = 1G\n" in content)
- self.failUnless(re.search(r"\n\[drop_upload\]\n#.*\nenabled = false\n", content), content)
-
# creating the node a second time should be rejected
rc, out, err = self.run_tahoe(argv)
self.failIfEqual(rc, 0, str((out, err, rc)))
--- /dev/null
+
+# Windows near-equivalent to twisted.internet.inotify
+# This should only be imported on Windows.
+
+import os, sys
+
+from twisted.internet import reactor
+from twisted.internet.threads import deferToThread
+
+from allmydata.util.fake_inotify import humanReadableMask, \
+ IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \
+ IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \
+ IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \
+ IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED
+[humanReadableMask, \
+ IN_WATCH_MASK, IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_NOWRITE, IN_CLOSE_WRITE, \
+ IN_OPEN, IN_MOVED_FROM, IN_MOVED_TO, IN_CREATE, IN_DELETE, IN_DELETE_SELF, \
+ IN_MOVE_SELF, IN_UNMOUNT, IN_Q_OVERFLOW, IN_IGNORED, IN_ONLYDIR, IN_DONT_FOLLOW, \
+ IN_MASK_ADD, IN_ISDIR, IN_ONESHOT, IN_CLOSE, IN_MOVED, IN_CHANGED]
+
+from allmydata.util.assertutil import _assert, precondition
+from allmydata.util.encodingutil import quote_output
+from allmydata.util import log, fileutil
+from allmydata.util.pollmixin import PollMixin
+
+from ctypes import WINFUNCTYPE, WinError, windll, POINTER, byref, create_string_buffer, \
+ addressof, get_last_error
+from ctypes.wintypes import BOOL, HANDLE, DWORD, LPCWSTR, LPVOID
+
+# <http://msdn.microsoft.com/en-us/library/gg258116%28v=vs.85%29.aspx>
+FILE_LIST_DIRECTORY = 1
+
+# <http://msdn.microsoft.com/en-us/library/aa363858%28v=vs.85%29.aspx>
+CreateFileW = WINFUNCTYPE(
+ HANDLE, LPCWSTR, DWORD, DWORD, LPVOID, DWORD, DWORD, HANDLE,
+ use_last_error=True
+)(("CreateFileW", windll.kernel32))
+
+FILE_SHARE_READ = 0x00000001
+FILE_SHARE_WRITE = 0x00000002
+FILE_SHARE_DELETE = 0x00000004
+
+OPEN_EXISTING = 3
+
+FILE_FLAG_BACKUP_SEMANTICS = 0x02000000
+
+# <http://msdn.microsoft.com/en-us/library/ms724211%28v=vs.85%29.aspx>
+CloseHandle = WINFUNCTYPE(
+ BOOL, HANDLE,
+ use_last_error=True
+)(("CloseHandle", windll.kernel32))
+
+# <http://msdn.microsoft.com/en-us/library/aa365465%28v=vs.85%29.aspx>
+ReadDirectoryChangesW = WINFUNCTYPE(
+ BOOL, HANDLE, LPVOID, DWORD, BOOL, DWORD, POINTER(DWORD), LPVOID, LPVOID,
+ use_last_error=True
+)(("ReadDirectoryChangesW", windll.kernel32))
+
+FILE_NOTIFY_CHANGE_FILE_NAME = 0x00000001
+FILE_NOTIFY_CHANGE_DIR_NAME = 0x00000002
+FILE_NOTIFY_CHANGE_ATTRIBUTES = 0x00000004
+#FILE_NOTIFY_CHANGE_SIZE = 0x00000008
+FILE_NOTIFY_CHANGE_LAST_WRITE = 0x00000010
+FILE_NOTIFY_CHANGE_LAST_ACCESS = 0x00000020
+#FILE_NOTIFY_CHANGE_CREATION = 0x00000040
+FILE_NOTIFY_CHANGE_SECURITY = 0x00000100
+
+# <http://msdn.microsoft.com/en-us/library/aa364391%28v=vs.85%29.aspx>
+FILE_ACTION_ADDED = 0x00000001
+FILE_ACTION_REMOVED = 0x00000002
+FILE_ACTION_MODIFIED = 0x00000003
+FILE_ACTION_RENAMED_OLD_NAME = 0x00000004
+FILE_ACTION_RENAMED_NEW_NAME = 0x00000005
+
+_action_to_string = {
+ FILE_ACTION_ADDED : "FILE_ACTION_ADDED",
+ FILE_ACTION_REMOVED : "FILE_ACTION_REMOVED",
+ FILE_ACTION_MODIFIED : "FILE_ACTION_MODIFIED",
+ FILE_ACTION_RENAMED_OLD_NAME : "FILE_ACTION_RENAMED_OLD_NAME",
+ FILE_ACTION_RENAMED_NEW_NAME : "FILE_ACTION_RENAMED_NEW_NAME",
+}
+
+_action_to_inotify_mask = {
+ FILE_ACTION_ADDED : IN_CREATE,
+ FILE_ACTION_REMOVED : IN_DELETE,
+ FILE_ACTION_MODIFIED : IN_CHANGED,
+ FILE_ACTION_RENAMED_OLD_NAME : IN_MOVED_FROM,
+ FILE_ACTION_RENAMED_NEW_NAME : IN_MOVED_TO,
+}
+
+INVALID_HANDLE_VALUE = 0xFFFFFFFF
+
+TRUE = 0
+FALSE = 1
+
+class Event(object):
+ """
+ * action: a FILE_ACTION_* constant (not a bit mask)
+ * filename: a Unicode string, giving the name relative to the watched directory
+ """
+ def __init__(self, action, filename):
+ self.action = action
+ self.filename = filename
+
+ def __repr__(self):
+ return "Event(%r, %r)" % (_action_to_string.get(self.action, self.action), self.filename)
+
+
+class FileNotifyInformation(object):
+ """
+ I represent a buffer containing FILE_NOTIFY_INFORMATION structures, and can
+ iterate over those structures, decoding them into Event objects.
+ """
+
+ def __init__(self, size=1024):
+ self.size = size
+ self.buffer = create_string_buffer(size)
+ address = addressof(self.buffer)
+ _assert(address & 3 == 0, "address 0x%X returned by create_string_buffer is not DWORD-aligned" % (address,))
+ self.data = None
+
+ def read_changes(self, hDirectory, recursive, filter):
+ bytes_returned = DWORD(0)
+ r = ReadDirectoryChangesW(hDirectory,
+ self.buffer,
+ self.size,
+ recursive,
+ filter,
+ byref(bytes_returned),
+ None, # NULL -> no overlapped I/O
+ None # NULL -> no completion routine
+ )
+ if r == 0:
+ raise WinError(get_last_error())
+ self.data = self.buffer.raw[:bytes_returned.value]
+
+ def __iter__(self):
+ # Iterator implemented as generator: <http://docs.python.org/library/stdtypes.html#generator-types>
+ pos = 0
+ while True:
+ bytes = self._read_dword(pos+8)
+ s = Event(self._read_dword(pos+4),
+ self.data[pos+12 : pos+12+bytes].decode('utf-16-le'))
+
+ next_entry_offset = self._read_dword(pos)
+ yield s
+ if next_entry_offset == 0:
+ break
+ pos = pos + next_entry_offset
+
+ def _read_dword(self, i):
+ # little-endian
+ return ( ord(self.data[i]) |
+ (ord(self.data[i+1]) << 8) |
+ (ord(self.data[i+2]) << 16) |
+ (ord(self.data[i+3]) << 24))
+
+
+def _open_directory(path_u):
+ hDirectory = CreateFileW(path_u,
+ FILE_LIST_DIRECTORY, # access rights
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ # don't prevent other processes from accessing
+ None, # no security descriptor
+ OPEN_EXISTING, # directory must already exist
+ FILE_FLAG_BACKUP_SEMANTICS, # necessary to open a directory
+ None # no template file
+ )
+ if hDirectory == INVALID_HANDLE_VALUE:
+ e = WinError(get_last_error())
+ raise OSError("Opening directory %s gave WinError: %s" % (quote_output(path_u), e))
+ return hDirectory
+
+
+def simple_test():
+ path_u = u"test"
+ filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE
+ recursive = FALSE
+
+ hDirectory = _open_directory(path_u)
+ fni = FileNotifyInformation()
+ print "Waiting..."
+ while True:
+ fni.read_changes(hDirectory, recursive, filter)
+ print repr(fni.data)
+ for info in fni:
+ print info
+
+
+NOT_STARTED = "NOT_STARTED"
+STARTED = "STARTED"
+STOPPING = "STOPPING"
+STOPPED = "STOPPED"
+
+class INotify(PollMixin):
+ def __init__(self):
+ self._state = NOT_STARTED
+ self._filter = None
+ self._callbacks = None
+ self._hDirectory = None
+ self._path = None
+ self._pending = set()
+ self._pending_delay = 1.0
+ self.recursive_includes_new_subdirectories = True
+
+ def set_pending_delay(self, delay):
+ self._pending_delay = delay
+
+ def startReading(self):
+ deferToThread(self._thread)
+ return self.poll(lambda: self._state != NOT_STARTED)
+
+ def stopReading(self):
+ # FIXME race conditions
+ if self._state != STOPPED:
+ self._state = STOPPING
+
+ def wait_until_stopped(self):
+ fileutil.write(os.path.join(self._path.path, u".ignore-me"), "")
+ return self.poll(lambda: self._state == STOPPED)
+
+ def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
+ precondition(self._state == NOT_STARTED, "watch() can only be called before startReading()", state=self._state)
+ precondition(self._filter is None, "only one watch is supported")
+ precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
+ precondition(isinstance(recursive, bool), recursive=recursive)
+ #precondition(autoAdd == recursive, "need autoAdd and recursive to be the same", autoAdd=autoAdd, recursive=recursive)
+
+ self._path = path
+ path_u = path.path
+ if not isinstance(path_u, unicode):
+ path_u = path_u.decode(sys.getfilesystemencoding())
+ _assert(isinstance(path_u, unicode), path_u=path_u)
+
+ self._filter = FILE_NOTIFY_CHANGE_FILE_NAME | FILE_NOTIFY_CHANGE_DIR_NAME | FILE_NOTIFY_CHANGE_LAST_WRITE
+
+ if mask & (IN_ACCESS | IN_CLOSE_NOWRITE | IN_OPEN):
+ self._filter = self._filter | FILE_NOTIFY_CHANGE_LAST_ACCESS
+ if mask & IN_ATTRIB:
+ self._filter = self._filter | FILE_NOTIFY_CHANGE_ATTRIBUTES | FILE_NOTIFY_CHANGE_SECURITY
+
+ self._recursive = TRUE if recursive else FALSE
+ self._callbacks = callbacks or []
+ self._hDirectory = _open_directory(path_u)
+
+ def _thread(self):
+ try:
+ _assert(self._filter is not None, "no watch set")
+
+ # To call Twisted or Tahoe APIs, use reactor.callFromThread as described in
+ # <http://twistedmatrix.com/documents/current/core/howto/threading.html>.
+
+ fni = FileNotifyInformation()
+
+ while True:
+ self._state = STARTED
+ fni.read_changes(self._hDirectory, self._recursive, self._filter)
+ for info in fni:
+ if self._state == STOPPING:
+ hDirectory = self._hDirectory
+ self._callbacks = None
+ self._hDirectory = None
+ CloseHandle(hDirectory)
+ self._state = STOPPED
+ return
+
+ path = self._path.preauthChild(info.filename) # FilePath with Unicode path
+ #mask = _action_to_inotify_mask.get(info.action, IN_CHANGED)
+
+ def _maybe_notify(path):
+ if path not in self._pending:
+ self._pending.add(path)
+ def _do_callbacks():
+ self._pending.remove(path)
+ for cb in self._callbacks:
+ try:
+ cb(None, path, IN_CHANGED)
+ except Exception, e:
+ log.err(e)
+ reactor.callLater(self._pending_delay, _do_callbacks)
+ reactor.callFromThread(_maybe_notify, path)
+ except Exception, e:
+ log.err(e)
+ self._state = STOPPED
+ raise