--- /dev/null
+from UserDict import DictMixin
+
+
+DELETED = object()
+
+
+class OrderedDict(DictMixin):
+
+ def __init__(self, *args, **kwds):
+ self.clear()
+ self.update(*args, **kwds)
+
+ def clear(self):
+ self._keys = []
+ self._content = {} # {key: (index, value)}
+ self._deleted = 0
+
+ def copy(self):
+ return OrderedDict(self)
+
+ def __iter__(self):
+ for key in self._keys:
+ if key is not DELETED:
+ yield key
+
+ def keys(self):
+ return [key for key in self._keys if key is not DELETED]
+
+ def popitem(self):
+ while 1:
+ try:
+ k = self._keys.pop()
+ except IndexError:
+ raise KeyError, 'OrderedDict is empty'
+ if k is not DELETED:
+ return k, self._content.pop(k)[1]
+
+ def __getitem__(self, key):
+ index, value = self._content[key]
+ return value
+
+ def __setitem__(self, key, value):
+ try:
+ index, oldvalue = self._content[key]
+ except KeyError:
+ index = len(self._keys)
+ self._keys.append(key)
+ self._content[key] = index, value
+
+ def __delitem__(self, key):
+ index, oldvalue = self._content.pop(key)
+ self._keys[index] = DELETED
+ if self._deleted <= len(self._content):
+ self._deleted += 1
+ else:
+ # compress
+ newkeys = []
+ for k in self._keys:
+ if k is not DELETED:
+ i, value = self._content[k]
+ self._content[k] = len(newkeys), value
+ newkeys.append(k)
+ self._keys = newkeys
+ self._deleted = 0
+
+ def __len__(self):
+ return len(self._content)
+
+ def __repr__(self):
+ res = ['%r: %r' % (key, self._content[key][1]) for key in self]
+ return 'OrderedDict(%s)' % (', '.join(res),)
+
+ def __cmp__(self, other):
+ if not isinstance(other, OrderedDict):
+ return NotImplemented
+ keys = self.keys()
+ r = cmp(keys, other.keys())
+ if r:
+ return r
+ for k in keys:
+ r = cmp(self[k], other[k])
+ if r:
+ return r
+ return 0
--- /dev/null
+import os, stat, py, select
+import inspect
+from objectfs import ObjectFs
+
+
+BLOCKSIZE = 8192
+
+
+def remote_runner(BLOCKSIZE):
+ import sys, select, os, struct
+ stream = None
+ while True:
+ while stream is not None:
+ iwtd, owtd, ewtd = select.select([0], [1], [])
+ if iwtd:
+ break
+ pos = stream.tell()
+ data = stream.read(BLOCKSIZE)
+ res = ('R', path, pos, len(data))
+ sys.stdout.write('%r\n%s' % (res, data))
+ if len(data) < BLOCKSIZE:
+ stream = None
+
+ stream = None
+ msg = eval(sys.stdin.readline())
+ if msg[0] == 'L':
+ path = msg[1]
+ names = os.listdir(path)
+ res = []
+ for name in names:
+ try:
+ st = os.stat(os.path.join(path, name))
+ except OSError:
+ continue
+ res.append((name, st.st_mode, st.st_size))
+ res = msg + (res,)
+ sys.stdout.write('%s\n' % (res,))
+ elif msg[0] == 'R':
+ path, pos = msg[1:]
+ f = open(path, 'rb')
+ f.seek(pos)
+ data = f.read(BLOCKSIZE)
+ res = msg + (len(data),)
+ sys.stdout.write('%r\n%s' % (res, data))
+ elif msg[0] == 'S':
+ path, pos = msg[1:]
+ stream = open(path, 'rb')
+ stream.seek(pos)
+ #elif msg[0] == 'C':
+ # stream = None
+
+
+class CacheFs(ObjectFs):
+ MOUNT_OPTIONS = {'max_read': BLOCKSIZE}
+
+ def __init__(self, localdir, remotehost, remotedir):
+ src = inspect.getsource(remote_runner)
+ src += '\n\nremote_runner(%d)\n' % BLOCKSIZE
+
+ remotecmd = 'python -u -c "exec input()"'
+ cmdline = [remotehost, remotecmd]
+ # XXX Unix style quoting
+ for i in range(len(cmdline)):
+ cmdline[i] = "'" + cmdline[i].replace("'", "'\\''") + "'"
+ cmd = 'ssh -C'
+ cmdline.insert(0, cmd)
+
+ child_in, child_out = os.popen2(' '.join(cmdline), bufsize=0)
+ child_in.write('%r\n' % (src,))
+
+ control = Controller(child_in, child_out)
+ ObjectFs.__init__(self, CacheDir(localdir, remotedir, control))
+
+
+class Controller:
+ def __init__(self, child_in, child_out):
+ self.child_in = child_in
+ self.child_out = child_out
+ self.cache = {}
+ self.streaming = None
+
+ def next_answer(self):
+ answer = eval(self.child_out.readline())
+ #print 'A', answer
+ if answer[0] == 'R':
+ remotefn, pos, length = answer[1:]
+ data = self.child_out.read(length)
+ self.cache[remotefn, pos] = data
+ return answer
+
+ def wait_answer(self, query):
+ self.streaming = None
+ #print 'Q', query
+ self.child_in.write('%r\n' % (query,))
+ while True:
+ answer = self.next_answer()
+ if answer[:len(query)] == query:
+ return answer[len(query):]
+
+ def listdir(self, remotedir):
+ query = ('L', remotedir)
+ res, = self.wait_answer(query)
+ return res
+
+ def wait_for_block(self, remotefn, pos):
+ key = remotefn, pos
+ while key not in self.cache:
+ self.next_answer()
+ return self.cache[key]
+
+ def peek_for_block(self, remotefn, pos):
+ key = remotefn, pos
+ while key not in self.cache:
+ iwtd, owtd, ewtd = select.select([self.child_out], [], [], 0)
+ if not iwtd:
+ return None
+ self.next_answer()
+ return self.cache[key]
+
+ def cached_block(self, remotefn, pos):
+ key = remotefn, pos
+ return self.cache.get(key)
+
+ def start_streaming(self, remotefn, pos):
+ if remotefn != self.streaming:
+ while (remotefn, pos) in self.cache:
+ pos += BLOCKSIZE
+ query = ('S', remotefn, pos)
+ #print 'Q', query
+ self.child_in.write('%r\n' % (query,))
+ self.streaming = remotefn
+
+ def read_blocks(self, remotefn, poslist):
+ lst = ['%r\n' % (('R', remotefn, pos),)
+ for pos in poslist if (remotefn, pos) not in self.cache]
+ if lst:
+ self.streaming = None
+ #print 'Q', '+ '.join(lst)
+ self.child_in.write(''.join(lst))
+
+ def clear_cache(self, remotefn):
+ for key in self.cache.keys():
+ if key[0] == remotefn:
+ del self.cache[key]
+
+
+class CacheDir:
+ def __init__(self, localdir, remotedir, control, size=0):
+ self.localdir = localdir
+ self.remotedir = remotedir
+ self.control = control
+ self.entries = None
+ def listdir(self):
+ if self.entries is None:
+ self.entries = []
+ for name, st_mode, st_size in self.control.listdir(self.remotedir):
+ if stat.S_ISDIR(st_mode):
+ cls = CacheDir
+ else:
+ cls = CacheFile
+ obj = cls(os.path.join(self.localdir, name),
+ os.path.join(self.remotedir, name),
+ self.control,
+ st_size)
+ self.entries.append((name, obj))
+ return self.entries
+
+class CacheFile:
+ def __init__(self, localfn, remotefn, control, size):
+ self.localfn = localfn
+ self.remotefn = remotefn
+ self.control = control
+ self.st_size = size
+
+ def size(self):
+ return self.st_size
+
+ def read(self):
+ try:
+ st = os.stat(self.localfn)
+ except OSError:
+ pass
+ else:
+ if st.st_size == self.st_size: # fully cached
+ return open(self.localfn, 'rb')
+ os.unlink(self.localfn)
+ lpath = py.path.local(self.partial())
+ lpath.ensure(file=1)
+ f = open(self.partial(), 'r+b')
+ return DumpFile(self, f)
+
+ def partial(self):
+ return self.localfn + '.partial~'
+
+ def complete(self):
+ try:
+ os.rename(self.partial(), self.localfn)
+ except OSError:
+ pass
+
+
+class DumpFile:
+
+ def __init__(self, cf, f):
+ self.cf = cf
+ self.f = f
+ self.pos = 0
+
+ def seek(self, npos):
+ self.pos = npos
+
+ def read(self, count):
+ control = self.cf.control
+ self.f.seek(self.pos)
+ buffer = self.f.read(count)
+ self.pos += len(buffer)
+ count -= len(buffer)
+
+ self.f.seek(0, 2)
+ curend = self.f.tell()
+
+ if count > 0:
+
+ while self.pos > curend:
+ curend &= -BLOCKSIZE
+ data = control.peek_for_block(self.cf.remotefn, curend)
+ if data is None:
+ break
+ self.f.seek(curend)
+ self.f.write(data)
+ curend += len(data)
+ if len(data) < BLOCKSIZE:
+ break
+
+ start = max(self.pos, curend) & (-BLOCKSIZE)
+ end = (self.pos + count + BLOCKSIZE-1) & (-BLOCKSIZE)
+ poslist = range(start, end, BLOCKSIZE)
+
+ if self.pos <= curend:
+ control.start_streaming(self.cf.remotefn, start)
+ self.f.seek(start)
+ for p in poslist:
+ data = control.wait_for_block(self.cf.remotefn, p)
+ assert self.f.tell() == p
+ self.f.write(data)
+ if len(data) < BLOCKSIZE:
+ break
+
+ curend = self.f.tell()
+ while curend < self.cf.st_size:
+ curend &= -BLOCKSIZE
+ data = control.cached_block(self.cf.remotefn, curend)
+ if data is None:
+ break
+ assert self.f.tell() == curend
+ self.f.write(data)
+ curend += len(data)
+ else:
+ self.cf.complete()
+ control.clear_cache(self.cf.remotefn)
+
+ self.f.seek(self.pos)
+ buffer += self.f.read(count)
+
+ else:
+ control.read_blocks(self.cf.remotefn, poslist)
+ result = []
+ for p in poslist:
+ data = control.wait_for_block(self.cf.remotefn, p)
+ result.append(data)
+ if len(data) < BLOCKSIZE:
+ break
+ data = ''.join(result)
+ buffer += data[self.pos-start:self.pos-start+count]
+
+ else:
+ if self.pos + 60000 > curend:
+ curend &= -BLOCKSIZE
+ control.start_streaming(self.cf.remotefn, curend)
+
+ return buffer
--- /dev/null
+import sys, os, Queue, atexit
+
+dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+dir = os.path.join(dir, 'pypeers')
+if dir not in sys.path:
+ sys.path.append(dir)
+del dir
+
+from greensock import *
+import threadchannel
+
+
+def _read_from_kernel(handler):
+ while True:
+ msg = read(handler.fd, handler.MAX_READ)
+ if not msg:
+ print >> sys.stderr, "out-kernel connexion closed"
+ break
+ autogreenlet(handler.handle_message, msg)
+
+def add_handler(handler):
+ autogreenlet(_read_from_kernel, handler)
+ atexit.register(handler.close)
+
+# ____________________________________________________________
+
+THREAD_QUEUE = None
+
+def thread_runner(n):
+ while True:
+ #print 'thread runner %d waiting' % n
+ operation, answer = THREAD_QUEUE.get()
+ #print 'thread_runner %d: %r' % (n, operation)
+ try:
+ res = True, operation()
+ except Exception:
+ res = False, sys.exc_info()
+ #print 'thread_runner %d: got %d bytes' % (n, len(res or ''))
+ answer.send(res)
+
+
+def start_bkgnd_thread():
+ global THREAD_QUEUE, THREAD_LOCK
+ import thread
+ threadchannel.startup()
+ THREAD_LOCK = thread.allocate_lock()
+ THREAD_QUEUE = Queue.Queue()
+ for i in range(4):
+ thread.start_new_thread(thread_runner, (i,))
+
+def wget(*args, **kwds):
+ from wget import wget
+
+ def operation():
+ kwds['unlock'] = THREAD_LOCK
+ THREAD_LOCK.acquire()
+ try:
+ return wget(*args, **kwds)
+ finally:
+ THREAD_LOCK.release()
+
+ if THREAD_QUEUE is None:
+ start_bkgnd_thread()
+ answer = threadchannel.ThreadChannel()
+ THREAD_QUEUE.put((operation, answer))
+ ok, res = answer.receive()
+ if not ok:
+ typ, value, tb = res
+ raise typ, value, tb
+ #print 'wget returns %d bytes' % (len(res or ''),)
+ return res
--- /dev/null
+from kernel import *
+import os, errno, sys, stat
+
+def fuse_mount(mountpoint, opts=None):
+ if not isinstance(mountpoint, str):
+ raise TypeError
+ if opts is not None and not isinstance(opts, str):
+ raise TypeError
+ import dl
+ fuse = dl.open('libfuse.so')
+ if fuse.sym('fuse_mount_compat22'):
+ fnname = 'fuse_mount_compat22'
+ else:
+ fnname = 'fuse_mount' # older versions of libfuse.so
+ return fuse.call(fnname, mountpoint, opts)
+
+class Handler(object):
+ __system = os.system
+ mountpoint = fd = None
+ __in_header_size = fuse_in_header.calcsize()
+ __out_header_size = fuse_out_header.calcsize()
+ MAX_READ = FUSE_MAX_IN
+
+ def __init__(self, mountpoint, filesystem, logfile='STDERR', **opts1):
+ opts = getattr(filesystem, 'MOUNT_OPTIONS', {}).copy()
+ opts.update(opts1)
+ if opts:
+ opts = opts.items()
+ opts.sort()
+ opts = ' '.join(['%s=%s' % item for item in opts])
+ else:
+ opts = None
+ fd = fuse_mount(mountpoint, opts)
+ if fd < 0:
+ raise IOError("mount failed")
+ self.fd = fd
+ if logfile == 'STDERR':
+ logfile = sys.stderr
+ self.logfile = logfile
+ if self.logfile:
+ print >> self.logfile, '* mounted at', mountpoint
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem
+ self.handles = {}
+ self.nexth = 1
+
+ def __del__(self):
+ if self.fd is not None:
+ os.close(self.fd)
+ self.fd = None
+ if self.mountpoint:
+ cmd = "fusermount -u '%s'" % self.mountpoint.replace("'", r"'\''")
+ self.mountpoint = None
+ if self.logfile:
+ print >> self.logfile, '*', cmd
+ self.__system(cmd)
+
+ close = __del__
+
+ def loop_forever(self):
+ while True:
+ msg = os.read(self.fd, FUSE_MAX_IN)
+ if not msg:
+ raise EOFError("out-kernel connection closed")
+ self.handle_message(msg)
+
+ def handle_message(self, msg):
+ headersize = self.__in_header_size
+ req = fuse_in_header(msg[:headersize])
+ assert req.len == len(msg)
+ name = req.opcode
+ try:
+ try:
+ name = fuse_opcode2name[req.opcode]
+ meth = getattr(self, name)
+ except (IndexError, AttributeError):
+ raise NotImplementedError
+ #if self.logfile:
+ # print >> self.logfile, '%s(%d)' % (name, req.nodeid)
+ reply = meth(req, msg[headersize:])
+ #if self.logfile:
+ # print >> self.logfile, ' >>', repr(reply)
+ except NotImplementedError:
+ if self.logfile:
+ print >> self.logfile, '%s: not implemented' % (name,)
+ self.send_reply(req, err=errno.ENOSYS)
+ except EnvironmentError, e:
+ if self.logfile:
+ print >> self.logfile, '%s: %s' % (name, e)
+ self.send_reply(req, err = e.errno or errno.ESTALE)
+ except NoReply:
+ pass
+ else:
+ self.send_reply(req, reply)
+
+ def send_reply(self, req, reply=None, err=0):
+ assert 0 <= err < 1000
+ if reply is None:
+ reply = ''
+ elif not isinstance(reply, str):
+ reply = reply.pack()
+ f = fuse_out_header(unique = req.unique,
+ error = -err,
+ len = self.__out_header_size + len(reply))
+ data = f.pack() + reply
+ while data:
+ count = os.write(self.fd, data)
+ if not count:
+ raise EOFError("in-kernel connection closed")
+ data = data[count:]
+
+ def notsupp_or_ro(self):
+ if hasattr(self.filesystem, "modified"):
+ raise IOError(errno.ENOSYS, "not supported")
+ else:
+ raise IOError(errno.EROFS, "read-only file system")
+
+ # ____________________________________________________________
+
+ def FUSE_INIT(self, req, msg):
+ msg = fuse_init_in_out(msg[:8])
+ if self.logfile:
+ print >> self.logfile, 'INIT: %d.%d' % (msg.major, msg.minor)
+ return fuse_init_in_out(major = FUSE_KERNEL_VERSION,
+ minor = FUSE_KERNEL_MINOR_VERSION)
+
+ def FUSE_GETATTR(self, req, msg):
+ node = self.filesystem.getnode(req.nodeid)
+ attr, valid = self.filesystem.getattr(node)
+ return fuse_attr_out(attr_valid = valid,
+ attr = attr)
+
+ def FUSE_SETATTR(self, req, msg):
+ if not hasattr(self.filesystem, 'setattr'):
+ self.notsupp_or_ro()
+ msg = fuse_setattr_in(msg)
+ if msg.valid & FATTR_MODE: mode = msg.attr.mode & 0777
+ else: mode = None
+ if msg.valid & FATTR_UID: uid = msg.attr.uid
+ else: uid = None
+ if msg.valid & FATTR_GID: gid = msg.attr.gid
+ else: gid = None
+ if msg.valid & FATTR_SIZE: size = msg.attr.size
+ else: size = None
+ if msg.valid & FATTR_ATIME: atime = msg.attr.atime
+ else: atime = None
+ if msg.valid & FATTR_MTIME: mtime = msg.attr.mtime
+ else: mtime = None
+ node = self.filesystem.getnode(req.nodeid)
+ self.filesystem.setattr(node, mode, uid, gid,
+ size, atime, mtime)
+ attr, valid = self.filesystem.getattr(node)
+ return fuse_attr_out(attr_valid = valid,
+ attr = attr)
+
+ def FUSE_RELEASE(self, req, msg):
+ msg = fuse_release_in(msg, truncate=True)
+ try:
+ del self.handles[msg.fh]
+ except KeyError:
+ raise IOError(errno.EBADF, msg.fh)
+ FUSE_RELEASEDIR = FUSE_RELEASE
+
+ def FUSE_OPENDIR(self, req, msg):
+ #msg = fuse_open_in(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ attr, valid = self.filesystem.getattr(node)
+ if mode2type(attr.mode) != TYPE_DIR:
+ raise IOError(errno.ENOTDIR, node)
+ fh = self.nexth
+ self.nexth += 1
+ self.handles[fh] = True, '', node
+ return fuse_open_out(fh = fh)
+
+ def FUSE_READDIR(self, req, msg):
+ msg = fuse_read_in(msg)
+ try:
+ isdir, data, node = self.handles[msg.fh]
+ if not isdir:
+ raise KeyError # not a dir handle
+ except KeyError:
+ raise IOError(errno.EBADF, msg.fh)
+ if msg.offset == 0:
+ # start or rewind
+ d_entries = []
+ off = 0
+ for name, type in self.filesystem.listdir(node):
+ off += fuse_dirent.calcsize(len(name))
+ d_entry = fuse_dirent(ino = INVALID_INO,
+ off = off,
+ type = type,
+ name = name)
+ d_entries.append(d_entry)
+ data = ''.join([d.pack() for d in d_entries])
+ self.handles[msg.fh] = True, data, node
+ return data[msg.offset:msg.offset+msg.size]
+
+ def replyentry(self, (subnodeid, valid1)):
+ subnode = self.filesystem.getnode(subnodeid)
+ attr, valid2 = self.filesystem.getattr(subnode)
+ return fuse_entry_out(nodeid = subnodeid,
+ entry_valid = valid1,
+ attr_valid = valid2,
+ attr = attr)
+
+ def FUSE_LOOKUP(self, req, msg):
+ filename = c2pystr(msg)
+ dirnode = self.filesystem.getnode(req.nodeid)
+ return self.replyentry(self.filesystem.lookup(dirnode, filename))
+
+ def FUSE_OPEN(self, req, msg, mask=os.O_RDONLY|os.O_WRONLY|os.O_RDWR):
+ msg = fuse_open_in(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ attr, valid = self.filesystem.getattr(node)
+ if mode2type(attr.mode) != TYPE_REG:
+ raise IOError(errno.EPERM, node)
+ f = self.filesystem.open(node, msg.flags & mask)
+ if isinstance(f, tuple):
+ f, open_flags = f
+ else:
+ open_flags = 0
+ fh = self.nexth
+ self.nexth += 1
+ self.handles[fh] = False, f, node
+ return fuse_open_out(fh = fh, open_flags = open_flags)
+
+ def FUSE_READ(self, req, msg):
+ msg = fuse_read_in(msg)
+ try:
+ isdir, f, node = self.handles[msg.fh]
+ if isdir:
+ raise KeyError
+ except KeyError:
+ raise IOError(errno.EBADF, msg.fh)
+ f.seek(msg.offset)
+ return f.read(msg.size)
+
+ def FUSE_WRITE(self, req, msg):
+ if not hasattr(self.filesystem, 'modified'):
+ raise IOError(errno.EROFS, "read-only file system")
+ msg, data = fuse_write_in.from_head(msg)
+ try:
+ isdir, f, node = self.handles[msg.fh]
+ if isdir:
+ raise KeyError
+ except KeyError:
+ raise IOError(errno.EBADF, msg.fh)
+ f.seek(msg.offset)
+ f.write(data)
+ self.filesystem.modified(node)
+ return fuse_write_out(size = len(data))
+
+ def FUSE_MKNOD(self, req, msg):
+ if not hasattr(self.filesystem, 'mknod'):
+ self.notsupp_or_ro()
+ msg, filename = fuse_mknod_in.from_param(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ return self.replyentry(self.filesystem.mknod(node, filename, msg.mode))
+
+ def FUSE_MKDIR(self, req, msg):
+ if not hasattr(self.filesystem, 'mkdir'):
+ self.notsupp_or_ro()
+ msg, filename = fuse_mkdir_in.from_param(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ return self.replyentry(self.filesystem.mkdir(node, filename, msg.mode))
+
+ def FUSE_SYMLINK(self, req, msg):
+ if not hasattr(self.filesystem, 'symlink'):
+ self.notsupp_or_ro()
+ linkname, target = c2pystr2(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ return self.replyentry(self.filesystem.symlink(node, linkname, target))
+
+ #def FUSE_LINK(self, req, msg):
+ # ...
+
+ def FUSE_UNLINK(self, req, msg):
+ if not hasattr(self.filesystem, 'unlink'):
+ self.notsupp_or_ro()
+ filename = c2pystr(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ self.filesystem.unlink(node, filename)
+
+ def FUSE_RMDIR(self, req, msg):
+ if not hasattr(self.filesystem, 'rmdir'):
+ self.notsupp_or_ro()
+ dirname = c2pystr(msg)
+ node = self.filesystem.getnode(req.nodeid)
+ self.filesystem.rmdir(node, dirname)
+
+ def FUSE_FORGET(self, req, msg):
+ if hasattr(self.filesystem, 'forget'):
+ self.filesystem.forget(req.nodeid)
+ raise NoReply
+
+ def FUSE_READLINK(self, req, msg):
+ if not hasattr(self.filesystem, 'readlink'):
+ raise IOError(errno.ENOSYS, "readlink not supported")
+ node = self.filesystem.getnode(req.nodeid)
+ target = self.filesystem.readlink(node)
+ return target
+
+ def FUSE_RENAME(self, req, msg):
+ if not hasattr(self.filesystem, 'rename'):
+ self.notsupp_or_ro()
+ msg, oldname, newname = fuse_rename_in.from_param2(msg)
+ oldnode = self.filesystem.getnode(req.nodeid)
+ newnode = self.filesystem.getnode(msg.newdir)
+ self.filesystem.rename(oldnode, oldname, newnode, newname)
+
+ def getxattrs(self, nodeid):
+ if not hasattr(self.filesystem, 'getxattrs'):
+ raise IOError(errno.ENOSYS, "xattrs not supported")
+ node = self.filesystem.getnode(nodeid)
+ return self.filesystem.getxattrs(node)
+
+ def FUSE_LISTXATTR(self, req, msg):
+ names = self.getxattrs(req.nodeid).keys()
+ names = ['user.' + name for name in names]
+ totalsize = 0
+ for name in names:
+ totalsize += len(name)+1
+ msg = fuse_getxattr_in(msg)
+ if msg.size > 0:
+ if msg.size < totalsize:
+ raise IOError(errno.ERANGE, "buffer too small")
+ names.append('')
+ return '\x00'.join(names)
+ else:
+ return fuse_getxattr_out(size=totalsize)
+
+ def FUSE_GETXATTR(self, req, msg):
+ xattrs = self.getxattrs(req.nodeid)
+ msg, name = fuse_getxattr_in.from_param(msg)
+ if not name.startswith('user.'): # ENODATA == ENOATTR
+ raise IOError(errno.ENODATA, "only supports 'user.' xattrs, "
+ "got %r" % (name,))
+ name = name[5:]
+ try:
+ value = xattrs[name]
+ except KeyError:
+ raise IOError(errno.ENODATA, "no such xattr") # == ENOATTR
+ value = str(value)
+ if msg.size > 0:
+ if msg.size < len(value):
+ raise IOError(errno.ERANGE, "buffer too small")
+ return value
+ else:
+ return fuse_getxattr_out(size=len(value))
+
+ def FUSE_SETXATTR(self, req, msg):
+ xattrs = self.getxattrs(req.nodeid)
+ msg, name, value = fuse_setxattr_in.from_param_head(msg)
+ assert len(value) == msg.size
+ # XXX msg.flags ignored
+ if not name.startswith('user.'): # ENODATA == ENOATTR
+ raise IOError(errno.ENODATA, "only supports 'user.' xattrs")
+ name = name[5:]
+ try:
+ xattrs[name] = value
+ except KeyError:
+ raise IOError(errno.ENODATA, "cannot set xattr") # == ENOATTR
+
+ def FUSE_REMOVEXATTR(self, req, msg):
+ xattrs = self.getxattrs(req.nodeid)
+ name = c2pystr(msg)
+ if not name.startswith('user.'): # ENODATA == ENOATTR
+ raise IOError(errno.ENODATA, "only supports 'user.' xattrs")
+ name = name[5:]
+ try:
+ del xattrs[name]
+ except KeyError:
+ raise IOError(errno.ENODATA, "cannot delete xattr") # == ENOATTR
+
+
+class NoReply(Exception):
+ pass
--- /dev/null
+import os, re, urlparse
+from handler import Handler
+from objectfs import ObjectFs
+
+
+class Root:
+ def __init__(self):
+ self.entries = {'gg': GoogleRoot()}
+ def listdir(self):
+ return self.entries.keys()
+ def join(self, hostname):
+ if hostname in self.entries:
+ return self.entries[hostname]
+ if '.' not in hostname:
+ raise KeyError
+ result = HtmlNode('http://%s/' % (hostname,))
+ self.entries[hostname] = result
+ return result
+
+
+class UrlNode:
+ data = None
+
+ def __init__(self, url):
+ self.url = url
+
+ def getdata(self):
+ if self.data is None:
+ print self.url
+ g = os.popen("lynx -source %r" % (self.url,), 'r')
+ self.data = g.read()
+ g.close()
+ return self.data
+
+
+class HtmlNode(UrlNode):
+ r_links = re.compile(r'<a\s[^>]*href="([^"]+)"[^>]*>(.*?)</a>',
+ re.IGNORECASE | re.DOTALL)
+ r_images = re.compile(r'<img\s[^>]*src="([^"]+[.]jpg)"', re.IGNORECASE)
+
+ def format(self, text, index,
+ TRANSTBL = ''.join([(32<=c<127 and c!=ord('/'))
+ and chr(c) or '_'
+ for c in range(256)])):
+ return text.translate(TRANSTBL)
+
+ def listdir(self):
+ data = self.getdata()
+
+ seen = {}
+ def uniquename(name):
+ name = self.format(name, len(seen))
+ if name == '' or name.startswith('.'):
+ name = '_' + name
+ basename = name
+ i = 1
+ while name in seen:
+ i += 1
+ name = '%s_%d' % (basename, i)
+ seen[name] = True
+ return name
+
+ for link, text in self.r_links.findall(data):
+ url = urlparse.urljoin(self.url, link)
+ yield uniquename(text), HtmlNode(url)
+
+ for link in self.r_images.findall(data):
+ text = os.path.basename(link)
+ url = urlparse.urljoin(self.url, link)
+ yield uniquename(text), RawNode(url)
+
+ yield '.source', RawNode(self.url)
+
+
+class RawNode(UrlNode):
+
+ def read(self):
+ return self.getdata()
+
+ def size(self):
+ if self.data:
+ return len(self.data)
+ else:
+ return None
+
+
+class GoogleRoot:
+ def join(self, query):
+ return GoogleSearch(query)
+
+class GoogleSearch(HtmlNode):
+ r_links = re.compile(r'<a\sclass=l\s[^>]*href="([^"]+)"[^>]*>(.*?)</a>',
+ re.IGNORECASE | re.DOTALL)
+
+ def __init__(self, query):
+ self.url = 'http://www.google.com/search?q=' + query
+
+ def format(self, text, index):
+ text = text.replace('<b>', '').replace('</b>', '')
+ text = HtmlNode.format(self, text, index)
+ return '%d. %s' % (index, text)
+
+
+if __name__ == '__main__':
+ root = Root()
+ handler = Handler('/home/arigo/mnt', ObjectFs(root))
+ handler.loop_forever()
--- /dev/null
+from struct import pack, unpack, calcsize
+import stat
+
+class Struct(object):
+ __slots__ = []
+
+ def __init__(self, data=None, truncate=False, **fields):
+ if data is not None:
+ if truncate:
+ data = data[:self.calcsize()]
+ self.unpack(data)
+ for key, value in fields.items():
+ setattr(self, key, value)
+
+ def unpack(self, data):
+ data = unpack(self.__types__, data)
+ for key, value in zip(self.__slots__, data):
+ setattr(self, key, value)
+
+ def pack(self):
+ return pack(self.__types__, *[getattr(self, k, 0)
+ for k in self.__slots__])
+
+ def calcsize(cls):
+ return calcsize(cls.__types__)
+ calcsize = classmethod(calcsize)
+
+ def __repr__(self):
+ result = ['%s=%r' % (name, getattr(self, name, None))
+ for name in self.__slots__]
+ return '<%s %s>' % (self.__class__.__name__, ', '.join(result))
+
+ def from_param(cls, msg):
+ limit = cls.calcsize()
+ zero = msg.find('\x00', limit)
+ assert zero >= 0
+ return cls(msg[:limit]), msg[limit:zero]
+ from_param = classmethod(from_param)
+
+ def from_param2(cls, msg):
+ limit = cls.calcsize()
+ zero1 = msg.find('\x00', limit)
+ assert zero1 >= 0
+ zero2 = msg.find('\x00', zero1+1)
+ assert zero2 >= 0
+ return cls(msg[:limit]), msg[limit:zero1], msg[zero1+1:zero2]
+ from_param2 = classmethod(from_param2)
+
+ def from_head(cls, msg):
+ limit = cls.calcsize()
+ return cls(msg[:limit]), msg[limit:]
+ from_head = classmethod(from_head)
+
+ def from_param_head(cls, msg):
+ limit = cls.calcsize()
+ zero = msg.find('\x00', limit)
+ assert zero >= 0
+ return cls(msg[:limit]), msg[limit:zero], msg[zero+1:]
+ from_param_head = classmethod(from_param_head)
+
+class StructWithAttr(Struct):
+
+ def unpack(self, data):
+ limit = -fuse_attr.calcsize()
+ super(StructWithAttr, self).unpack(data[:limit])
+ self.attr = fuse_attr(data[limit:])
+
+ def pack(self):
+ return super(StructWithAttr, self).pack() + self.attr.pack()
+
+ def calcsize(cls):
+ return super(StructWithAttr, cls).calcsize() + fuse_attr.calcsize()
+ calcsize = classmethod(calcsize)
+
+
+def _mkstruct(name, c, base=Struct):
+ typ2code = {
+ '__u32': 'I',
+ '__s32': 'i',
+ '__u64': 'Q',
+ '__s64': 'q'}
+ slots = []
+ types = ['=']
+ for line in c.split('\n'):
+ line = line.strip()
+ if line:
+ line, tail = line.split(';', 1)
+ typ, nam = line.split()
+ slots.append(nam)
+ types.append(typ2code[typ])
+ cls = type(name, (base,), {'__slots__': slots,
+ '__types__': ''.join(types)})
+ globals()[name] = cls
+
+class timeval(object):
+
+ def __init__(self, attr1, attr2):
+ self.sec = attr1
+ self.nsec = attr2
+
+ def __get__(self, obj, typ=None):
+ if obj is None:
+ return self
+ else:
+ return (getattr(obj, self.sec) +
+ getattr(obj, self.nsec) * 0.000000001)
+
+ def __set__(self, obj, val):
+ val = int(val * 1000000000)
+ sec, nsec = divmod(val, 1000000000)
+ setattr(obj, self.sec, sec)
+ setattr(obj, self.nsec, nsec)
+
+ def __delete__(self, obj):
+ delattr(obj, self.sec)
+ delattr(obj, self.nsec)
+
+def _mktimeval(cls, attr1, attr2):
+ assert attr1.startswith('_')
+ assert attr2.startswith('_')
+ tv = timeval(attr1, attr2)
+ setattr(cls, attr1[1:], tv)
+
+INVALID_INO = 0xFFFFFFFFFFFFFFFF
+
+def mode2type(mode):
+ return (mode & 0170000) >> 12
+
+TYPE_REG = mode2type(stat.S_IFREG)
+TYPE_DIR = mode2type(stat.S_IFDIR)
+TYPE_LNK = mode2type(stat.S_IFLNK)
+
+def c2pystr(s):
+ n = s.find('\x00')
+ assert n >= 0
+ return s[:n]
+
+def c2pystr2(s):
+ first = c2pystr(s)
+ second = c2pystr(s[len(first)+1:])
+ return first, second
+
+# ____________________________________________________________
+
+# Version number of this interface
+FUSE_KERNEL_VERSION = 7
+
+# Minor version number of this interface
+FUSE_KERNEL_MINOR_VERSION = 2
+
+# The node ID of the root inode
+FUSE_ROOT_ID = 1
+
+# The major number of the fuse character device
+FUSE_MAJOR = 10
+
+# The minor number of the fuse character device
+FUSE_MINOR = 229
+
+# Make sure all structures are padded to 64bit boundary, so 32bit
+# userspace works under 64bit kernels
+
+_mkstruct('fuse_attr', '''
+ __u64 ino;
+ __u64 size;
+ __u64 blocks;
+ __u64 _atime;
+ __u64 _mtime;
+ __u64 _ctime;
+ __u32 _atimensec;
+ __u32 _mtimensec;
+ __u32 _ctimensec;
+ __u32 mode;
+ __u32 nlink;
+ __u32 uid;
+ __u32 gid;
+ __u32 rdev;
+''')
+_mktimeval(fuse_attr, '_atime', '_atimensec')
+_mktimeval(fuse_attr, '_mtime', '_mtimensec')
+_mktimeval(fuse_attr, '_ctime', '_ctimensec')
+
+_mkstruct('fuse_kstatfs', '''
+ __u64 blocks;
+ __u64 bfree;
+ __u64 bavail;
+ __u64 files;
+ __u64 ffree;
+ __u32 bsize;
+ __u32 namelen;
+''')
+
+FATTR_MODE = 1 << 0
+FATTR_UID = 1 << 1
+FATTR_GID = 1 << 2
+FATTR_SIZE = 1 << 3
+FATTR_ATIME = 1 << 4
+FATTR_MTIME = 1 << 5
+
+#
+# Flags returned by the OPEN request
+#
+# FOPEN_DIRECT_IO: bypass page cache for this open file
+# FOPEN_KEEP_CACHE: don't invalidate the data cache on open
+#
+FOPEN_DIRECT_IO = 1 << 0
+FOPEN_KEEP_CACHE = 1 << 1
+
+fuse_opcode = {
+ 'FUSE_LOOKUP' : 1,
+ 'FUSE_FORGET' : 2, # no reply
+ 'FUSE_GETATTR' : 3,
+ 'FUSE_SETATTR' : 4,
+ 'FUSE_READLINK' : 5,
+ 'FUSE_SYMLINK' : 6,
+ 'FUSE_MKNOD' : 8,
+ 'FUSE_MKDIR' : 9,
+ 'FUSE_UNLINK' : 10,
+ 'FUSE_RMDIR' : 11,
+ 'FUSE_RENAME' : 12,
+ 'FUSE_LINK' : 13,
+ 'FUSE_OPEN' : 14,
+ 'FUSE_READ' : 15,
+ 'FUSE_WRITE' : 16,
+ 'FUSE_STATFS' : 17,
+ 'FUSE_RELEASE' : 18,
+ 'FUSE_FSYNC' : 20,
+ 'FUSE_SETXATTR' : 21,
+ 'FUSE_GETXATTR' : 22,
+ 'FUSE_LISTXATTR' : 23,
+ 'FUSE_REMOVEXATTR' : 24,
+ 'FUSE_FLUSH' : 25,
+ 'FUSE_INIT' : 26,
+ 'FUSE_OPENDIR' : 27,
+ 'FUSE_READDIR' : 28,
+ 'FUSE_RELEASEDIR' : 29,
+ 'FUSE_FSYNCDIR' : 30,
+}
+
+fuse_opcode2name = []
+def setup():
+ for key, value in fuse_opcode.items():
+ fuse_opcode2name.extend([None] * (value+1 - len(fuse_opcode2name)))
+ fuse_opcode2name[value] = key
+setup()
+del setup
+
+# Conservative buffer size for the client
+FUSE_MAX_IN = 8192
+
+FUSE_NAME_MAX = 1024
+FUSE_SYMLINK_MAX = 4096
+FUSE_XATTR_SIZE_MAX = 4096
+
+_mkstruct('fuse_entry_out', """
+ __u64 nodeid; /* Inode ID */
+ __u64 generation; /* Inode generation: nodeid:gen must \
+ be unique for the fs's lifetime */
+ __u64 _entry_valid; /* Cache timeout for the name */
+ __u64 _attr_valid; /* Cache timeout for the attributes */
+ __u32 _entry_valid_nsec;
+ __u32 _attr_valid_nsec;
+""", base=StructWithAttr)
+_mktimeval(fuse_entry_out, '_entry_valid', '_entry_valid_nsec')
+_mktimeval(fuse_entry_out, '_attr_valid', '_attr_valid_nsec')
+
+_mkstruct('fuse_forget_in', '''
+ __u64 nlookup;
+''')
+
+_mkstruct('fuse_attr_out', '''
+ __u64 _attr_valid; /* Cache timeout for the attributes */
+ __u32 _attr_valid_nsec;
+ __u32 dummy;
+''', base=StructWithAttr)
+_mktimeval(fuse_attr_out, '_attr_valid', '_attr_valid_nsec')
+
+_mkstruct('fuse_mknod_in', '''
+ __u32 mode;
+ __u32 rdev;
+''')
+
+_mkstruct('fuse_mkdir_in', '''
+ __u32 mode;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_rename_in', '''
+ __u64 newdir;
+''')
+
+_mkstruct('fuse_link_in', '''
+ __u64 oldnodeid;
+''')
+
+_mkstruct('fuse_setattr_in', '''
+ __u32 valid;
+ __u32 padding;
+''', base=StructWithAttr)
+
+_mkstruct('fuse_open_in', '''
+ __u32 flags;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_open_out', '''
+ __u64 fh;
+ __u32 open_flags;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_release_in', '''
+ __u64 fh;
+ __u32 flags;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_flush_in', '''
+ __u64 fh;
+ __u32 flush_flags;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_read_in', '''
+ __u64 fh;
+ __u64 offset;
+ __u32 size;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_write_in', '''
+ __u64 fh;
+ __u64 offset;
+ __u32 size;
+ __u32 write_flags;
+''')
+
+_mkstruct('fuse_write_out', '''
+ __u32 size;
+ __u32 padding;
+''')
+
+fuse_statfs_out = fuse_kstatfs
+
+_mkstruct('fuse_fsync_in', '''
+ __u64 fh;
+ __u32 fsync_flags;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_setxattr_in', '''
+ __u32 size;
+ __u32 flags;
+''')
+
+_mkstruct('fuse_getxattr_in', '''
+ __u32 size;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_getxattr_out', '''
+ __u32 size;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_init_in_out', '''
+ __u32 major;
+ __u32 minor;
+''')
+
+_mkstruct('fuse_in_header', '''
+ __u32 len;
+ __u32 opcode;
+ __u64 unique;
+ __u64 nodeid;
+ __u32 uid;
+ __u32 gid;
+ __u32 pid;
+ __u32 padding;
+''')
+
+_mkstruct('fuse_out_header', '''
+ __u32 len;
+ __s32 error;
+ __u64 unique;
+''')
+
+class fuse_dirent(Struct):
+ __slots__ = ['ino', 'off', 'type', 'name']
+
+ def unpack(self, data):
+ self.ino, self.off, namelen, self.type = struct.unpack('QQII',
+ data[:24])
+ self.name = data[24:24+namelen]
+ assert len(self.name) == namelen
+
+ def pack(self):
+ namelen = len(self.name)
+ return pack('QQII%ds' % ((namelen+7)&~7,),
+ self.ino, getattr(self, 'off', 0), namelen,
+ self.type, self.name)
+
+ def calcsize(cls, namelen):
+ return 24 + ((namelen+7)&~7)
+ calcsize = classmethod(calcsize)
--- /dev/null
+from kernel import *
+from handler import Handler
+import stat, time, os, weakref, errno
+from cStringIO import StringIO
+
+
+class MemoryFS(object):
+ INFINITE = 86400.0
+
+
+ class Dir(object):
+ type = TYPE_DIR
+ def __init__(self, attr):
+ self.attr = attr
+ self.contents = {} # { 'filename': Dir()/File()/SymLink() }
+
+ class File(object):
+ type = TYPE_REG
+ def __init__(self, attr):
+ self.attr = attr
+ self.data = StringIO()
+
+ class SymLink(object):
+ type = TYPE_LNK
+ def __init__(self, attr, target):
+ self.attr = attr
+ self.target = target
+
+
+ def __init__(self, root=None):
+ self.uid = os.getuid()
+ self.gid = os.getgid()
+ self.umask = os.umask(0); os.umask(self.umask)
+ self.root = root or self.Dir(self.newattr(stat.S_IFDIR))
+ self.root.id = FUSE_ROOT_ID
+ self.nodes = weakref.WeakValueDictionary()
+ self.nodes[FUSE_ROOT_ID] = self.root
+ self.nextid = FUSE_ROOT_ID + 1
+
+ def newattr(self, s, ino=None, mode=0666):
+ now = time.time()
+ attr = fuse_attr(size = 0,
+ mode = s | (mode & ~self.umask),
+ nlink = 1, # even on dirs! this confuses 'find' in
+ # a good way :-)
+ atime = now,
+ mtime = now,
+ ctime = now,
+ uid = self.uid,
+ gid = self.gid)
+ if ino is None:
+ ino = id(attr)
+ if ino < 0:
+ ino = ~ino
+ attr.ino = ino
+ return attr
+
+ def getnode(self, id):
+ return self.nodes[id]
+
+ def modified(self, node):
+ node.attr.mtime = node.attr.atime = time.time()
+ if isinstance(node, self.File):
+ node.data.seek(0, 2)
+ node.attr.size = node.data.tell()
+
+ def getattr(self, node):
+ return node.attr, self.INFINITE
+
+ def setattr(self, node, mode, uid, gid, size, atime, mtime):
+ if mode is not None:
+ node.attr.mode = (node.attr.mode & ~0777) | (mode & 0777)
+ if uid is not None:
+ node.attr.uid = uid
+ if gid is not None:
+ node.attr.gid = gid
+ if size is not None:
+ assert isinstance(node, self.File)
+ node.data.seek(0, 2)
+ oldsize = node.data.tell()
+ if size < oldsize:
+ node.data.seek(size)
+ node.data.truncate()
+ self.modified(node)
+ elif size > oldsize:
+ node.data.write('\x00' * (size - oldsize))
+ self.modified(node)
+ if atime is not None:
+ node.attr.atime = atime
+ if mtime is not None:
+ node.attr.mtime = mtime
+
+ def listdir(self, node):
+ assert isinstance(node, self.Dir)
+ for name, subobj in node.contents.items():
+ yield name, subobj.type
+
+ def lookup(self, dirnode, filename):
+ try:
+ return dirnode.contents[filename].id, self.INFINITE
+ except KeyError:
+ raise IOError(errno.ENOENT, filename)
+
+ def open(self, filenode, flags):
+ return filenode.data
+
+ def newnodeid(self, newnode):
+ id = self.nextid
+ self.nextid += 1
+ newnode.id = id
+ self.nodes[id] = newnode
+ return id
+
+ def mknod(self, dirnode, filename, mode):
+ node = self.File(self.newattr(stat.S_IFREG, mode=mode))
+ dirnode.contents[filename] = node
+ return self.newnodeid(node), self.INFINITE
+
+ def mkdir(self, dirnode, subdirname, mode):
+ node = self.Dir(self.newattr(stat.S_IFDIR, mode=mode))
+ dirnode.contents[subdirname] = node
+ return self.newnodeid(node), self.INFINITE
+
+ def symlink(self, dirnode, linkname, target):
+ node = self.SymLink(self.newattr(stat.S_IFLNK), target)
+ dirnode.contents[linkname] = node
+ return self.newnodeid(node), self.INFINITE
+
+ def unlink(self, dirnode, filename):
+ del dirnode.contents[filename]
+
+ rmdir = unlink
+
+ def readlink(self, symlinknode):
+ return symlinknode.target
+
+ def rename(self, olddirnode, oldname, newdirnode, newname):
+ node = olddirnode.contents[oldname]
+ newdirnode.contents[newname] = node
+ del olddirnode.contents[oldname]
+
+ def getxattrs(self, node):
+ try:
+ return node.xattrs
+ except AttributeError:
+ node.xattrs = {}
+ return node.xattrs
+
+
+if __name__ == '__main__':
+ import sys
+ mountpoint = sys.argv[1]
+ memoryfs = MemoryFS()
+ handler = Handler(mountpoint, memoryfs)
+ handler.loop_forever()
--- /dev/null
+"""
+For reading and caching from slow file system (e.g. DVDs or network).
+
+ python mirrorfs.py <sourcedir> <cachedir> <mountpoint>
+
+Makes <mountpoint> show a read-only copy of the files in <sourcedir>,
+caching all data ever read in the <cachedir> to avoid reading it
+twice. This script also features optimistic read-ahead: once a
+file is accessed, and as long as no other file is accessed, the
+whole file is read and cached as fast as the <sourcedir> will
+provide it.
+
+You have to clean up <cachedir> manually before mounting a modified
+or different <sourcedir>.
+"""
+import sys, os, posixpath, stat
+
+try:
+ __file__
+except NameError:
+ __file__ = sys.argv[0]
+this_dir = os.path.dirname(os.path.abspath(__file__))
+
+# ____________________________________________________________
+
+sys.path.append(os.path.dirname(this_dir))
+from blockfs import valuetree
+from handler import Handler
+import greenhandler, greensock
+from objectfs import ObjectFs
+
+BLOCKSIZE = 65536
+
+class MirrorFS(ObjectFs):
+ rawfd = None
+
+ def __init__(self, srcdir, cachedir):
+ self.srcdir = srcdir
+ self.cachedir = cachedir
+ self.table = valuetree.ValueTree(os.path.join(cachedir, 'table'), 'q')
+ if '' not in self.table:
+ self.initial_read_dir('')
+ self.table[''] = -1,
+ try:
+ self.rawfile = open(os.path.join(cachedir, 'raw'), 'r+b')
+ except IOError:
+ self.rawfile = open(os.path.join(cachedir, 'raw'), 'w+b')
+ ObjectFs.__init__(self, DirNode(self, ''))
+ self.readahead_at = None
+ greenhandler.autogreenlet(self.readahead)
+
+ def close(self):
+ self.table.close()
+
+ def readahead(self):
+ while True:
+ greensock.sleep(0.001)
+ while not self.readahead_at:
+ greensock.sleep(1)
+ path, blocknum = self.readahead_at
+ self.readahead_at = None
+ try:
+ self.readblock(path, blocknum, really=False)
+ except EOFError:
+ pass
+
+ def initial_read_dir(self, path):
+ print 'Reading initial directory structure...', path
+ dirname = os.path.join(self.srcdir, path)
+ for name in os.listdir(dirname):
+ filename = os.path.join(dirname, name)
+ st = os.stat(filename)
+ if stat.S_ISDIR(st.st_mode):
+ self.initial_read_dir(posixpath.join(path, name))
+ q = -1
+ else:
+ q = st.st_size
+ self.table[posixpath.join(path, name)] = q,
+
+ def __getitem__(self, key):
+ self.tablelock.acquire()
+ try:
+ return self.table[key]
+ finally:
+ self.tablelock.release()
+
+ def readblock(self, path, blocknum, really=True):
+ s = '%s/%d' % (path, blocknum)
+ try:
+ q, = self.table[s]
+ except KeyError:
+ print s
+ self.readahead_at = None
+ f = open(os.path.join(self.srcdir, path), 'rb')
+ f.seek(blocknum * BLOCKSIZE)
+ data = f.read(BLOCKSIZE)
+ f.close()
+ if not data:
+ q = -2
+ else:
+ data += '\x00' * (BLOCKSIZE - len(data))
+ self.rawfile.seek(0, 2)
+ q = self.rawfile.tell()
+ self.rawfile.write(data)
+ self.table[s] = q,
+ if q == -2:
+ raise EOFError
+ else:
+ if q == -2:
+ raise EOFError
+ if really:
+ self.rawfile.seek(q, 0)
+ data = self.rawfile.read(BLOCKSIZE)
+ else:
+ data = None
+ if self.readahead_at is None:
+ self.readahead_at = path, blocknum + 1
+ return data
+
+
+class Node(object):
+
+ def __init__(self, mfs, path):
+ self.mfs = mfs
+ self.path = path
+
+class DirNode(Node):
+
+ def join(self, name):
+ path = posixpath.join(self.path, name)
+ q, = self.mfs.table[path]
+ if q == -1:
+ return DirNode(self.mfs, path)
+ else:
+ return FileNode(self.mfs, path)
+
+ def listdir(self):
+ result = []
+ for key, value in self.mfs.table.iteritemsfrom(self.path):
+ if not key.startswith(self.path):
+ break
+ tail = key[len(self.path):].lstrip('/')
+ if tail and '/' not in tail:
+ result.append(tail)
+ return result
+
+class FileNode(Node):
+
+ def size(self):
+ q, = self.mfs.table[self.path]
+ return q
+
+ def read(self):
+ return FileStream(self.mfs, self.path)
+
+class FileStream(object):
+
+ def __init__(self, mfs, path):
+ self.mfs = mfs
+ self.path = path
+ self.pos = 0
+ self.size, = self.mfs.table[path]
+
+ def seek(self, p):
+ self.pos = p
+
+ def read(self, count):
+ result = []
+ end = min(self.pos + count, self.size)
+ while self.pos < end:
+ blocknum, offset = divmod(self.pos, BLOCKSIZE)
+ data = self.mfs.readblock(self.path, blocknum)
+ data = data[offset:]
+ data = data[:end - self.pos]
+ assert len(data) > 0
+ result.append(data)
+ self.pos += len(data)
+ return ''.join(result)
+
+# ____________________________________________________________
+
+if __name__ == '__main__':
+ import sys
+ srcdir, cachedir, mountpoint = sys.argv[1:]
+ mirrorfs = MirrorFS(srcdir, cachedir)
+ try:
+ handler = Handler(mountpoint, mirrorfs)
+ greenhandler.add_handler(handler)
+ greenhandler.mainloop()
+ finally:
+ mirrorfs.close()
--- /dev/null
+from kernel import *
+import stat, errno, os, time
+from cStringIO import StringIO
+from OrderedDict import OrderedDict
+
+
+class ObjectFs:
+ """A simple read-only file system based on Python objects.
+
+ Interface of Directory objects:
+ * join(name) returns a file or subdirectory object
+ * listdir() returns a list of names, or a list of (name, object)
+
+ join() is optional if listdir() returns a list of (name, object).
+ Alternatively, Directory objects can be plain dictionaries {name: object}.
+
+ Interface of File objects:
+ * size() returns the size
+ * read() returns the data
+
+ Alternatively, File objects can be plain strings.
+
+ Interface of SymLink objects:
+ * readlink() returns the symlink's target, as a string
+ """
+
+ INFINITE = 86400.0
+ USE_DIR_CACHE = True
+
+ def __init__(self, rootnode):
+ self.nodes = {FUSE_ROOT_ID: rootnode}
+ if self.USE_DIR_CACHE:
+ self.dircache = {}
+ self.starttime = time.time()
+ self.uid = os.getuid()
+ self.gid = os.getgid()
+ self.umask = os.umask(0); os.umask(self.umask)
+
+ def newattr(self, s, ino, mode=0666):
+ if ino < 0:
+ ino = ~ino
+ return fuse_attr(ino = ino,
+ size = 0,
+ mode = s | (mode & ~self.umask),
+ nlink = 1, # even on dirs! this confuses 'find' in
+ # a good way :-)
+ atime = self.starttime,
+ mtime = self.starttime,
+ ctime = self.starttime,
+ uid = self.uid,
+ gid = self.gid)
+
+ def getnode(self, nodeid):
+ try:
+ return self.nodes[nodeid]
+ except KeyError:
+ raise IOError(errno.ESTALE, nodeid)
+
+ def getattr(self, node):
+ timeout = self.INFINITE
+ if isinstance(node, str):
+ attr = self.newattr(stat.S_IFREG, id(node))
+ attr.size = len(node)
+ elif hasattr(node, 'readlink'):
+ target = node.readlink()
+ attr = self.newattr(stat.S_IFLNK, id(node))
+ attr.size = len(target)
+ attr.mode |= 0777
+ elif hasattr(node, 'size'):
+ sz = node.size()
+ attr = self.newattr(stat.S_IFREG, id(node))
+ if sz is None:
+ timeout = 0
+ else:
+ attr.size = sz
+ else:
+ attr = self.newattr(stat.S_IFDIR, id(node), mode=0777)
+ #print 'getattr(%s) -> %s, %s' % (node, attr, timeout)
+ return attr, timeout
+
+ def getentries(self, node):
+ if isinstance(node, dict):
+ return node
+ try:
+ if not self.USE_DIR_CACHE:
+ raise KeyError
+ return self.dircache[node]
+ except KeyError:
+ entries = OrderedDict()
+ if hasattr(node, 'listdir'):
+ for name in node.listdir():
+ if isinstance(name, tuple):
+ name, subnode = name
+ else:
+ subnode = None
+ entries[name] = subnode
+ if self.USE_DIR_CACHE:
+ self.dircache[node] = entries
+ return entries
+
+ def listdir(self, node):
+ entries = self.getentries(node)
+ for name, subnode in entries.items():
+ if subnode is None:
+ subnode = node.join(name)
+ self.nodes[uid(subnode)] = subnode
+ entries[name] = subnode
+ if isinstance(subnode, str):
+ yield name, TYPE_REG
+ elif hasattr(subnode, 'readlink'):
+ yield name, TYPE_LNK
+ elif hasattr(subnode, 'size'):
+ yield name, TYPE_REG
+ else:
+ yield name, TYPE_DIR
+
+ def lookup(self, node, name):
+ entries = self.getentries(node)
+ try:
+ subnode = entries.get(name)
+ if subnode is None:
+ if hasattr(node, 'join'):
+ subnode = node.join(name)
+ entries[name] = subnode
+ else:
+ raise KeyError
+ except KeyError:
+ raise IOError(errno.ENOENT, name)
+ else:
+ return self.reply(subnode)
+
+ def reply(self, node):
+ res = uid(node)
+ self.nodes[res] = node
+ return res, self.INFINITE
+
+ def open(self, node, mode):
+ if not isinstance(node, str):
+ node = node.read()
+ if not hasattr(node, 'read'):
+ node = StringIO(node)
+ return node
+
+ def readlink(self, node):
+ return node.readlink()
+
+ def getxattrs(self, node):
+ return getattr(node, '__dict__', {})
+
+# ____________________________________________________________
+
+import struct
+try:
+ HUGEVAL = 256 ** struct.calcsize('P')
+except struct.error:
+ HUGEVAL = 0
+
+def fixid(result):
+ if result < 0:
+ result += HUGEVAL
+ return result
+
+def uid(obj):
+ """
+ Return the id of an object as an unsigned number so that its hex
+ representation makes sense
+ """
+ return fixid(id(obj))
+
+class SymLink(object):
+ def __init__(self, target):
+ self.target = target
+ def readlink(self):
+ return self.target
--- /dev/null
+"""
+Two magic tricks for classes:
+
+ class X:
+ __metaclass__ = extendabletype
+ ...
+
+ # in some other file...
+ class __extend__(X):
+ ... # and here you can add new methods and class attributes to X
+
+Mostly useful together with the second trick, which lets you build
+methods whose 'self' is a pair of objects instead of just one:
+
+ class __extend__(pairtype(X, Y)):
+ attribute = 42
+ def method((x, y), other, arguments):
+ ...
+
+ pair(x, y).attribute
+ pair(x, y).method(other, arguments)
+
+This finds methods and class attributes based on the actual
+class of both objects that go into the pair(), with the usual
+rules of method/attribute overriding in (pairs of) subclasses.
+
+For more information, see test_pairtype.
+"""
+
+class extendabletype(type):
+ """A type with a syntax trick: 'class __extend__(t)' actually extends
+ the definition of 't' instead of creating a new subclass."""
+ def __new__(cls, name, bases, dict):
+ if name == '__extend__':
+ for cls in bases:
+ for key, value in dict.items():
+ if key == '__module__':
+ continue
+ # XXX do we need to provide something more for pickling?
+ setattr(cls, key, value)
+ return None
+ else:
+ return super(extendabletype, cls).__new__(cls, name, bases, dict)
+
+
+def pair(a, b):
+ """Return a pair object."""
+ tp = pairtype(a.__class__, b.__class__)
+ return tp((a, b)) # tp is a subclass of tuple
+
+pairtypecache = {}
+
+def pairtype(cls1, cls2):
+ """type(pair(a,b)) is pairtype(a.__class__, b.__class__)."""
+ try:
+ pair = pairtypecache[cls1, cls2]
+ except KeyError:
+ name = 'pairtype(%s, %s)' % (cls1.__name__, cls2.__name__)
+ bases1 = [pairtype(base1, cls2) for base1 in cls1.__bases__]
+ bases2 = [pairtype(cls1, base2) for base2 in cls2.__bases__]
+ bases = tuple(bases1 + bases2) or (tuple,) # 'tuple': ultimate base
+ pair = pairtypecache[cls1, cls2] = extendabletype(name, bases, {})
+ return pair
--- /dev/null
+from kernel import *
+import errno, posixpath, os
+
+
+class PathFs(object):
+ """Base class for a read-write FUSE file system interface
+ whose underlying content is best accessed with '/'-separated
+ string paths.
+ """
+ uid = os.getuid()
+ gid = os.getgid()
+ umask = os.umask(0); os.umask(umask)
+ timeout = 86400.0
+
+ def __init__(self, root=''):
+ self._paths = {FUSE_ROOT_ID: root}
+ self._path2id = {root: FUSE_ROOT_ID}
+ self._nextid = FUSE_ROOT_ID + 1
+
+ def getnode(self, nodeid):
+ try:
+ return self._paths[nodeid]
+ except KeyError:
+ raise IOError(errno.ESTALE, nodeid)
+
+ def forget(self, nodeid):
+ try:
+ p = self._paths.pop(nodeid)
+ del self._path2id[p]
+ except KeyError:
+ pass
+
+ def cachepath(self, path):
+ if path in self._path2id:
+ return self._path2id[path]
+ id = self._nextid
+ self._nextid += 1
+ self._paths[id] = path
+ self._path2id[path] = id
+ return id
+
+ def mkattr(self, path, size, st_kind, mode, time):
+ attr = fuse_attr(ino = self._path2id[path],
+ size = size,
+ mode = st_kind | (mode & ~self.umask),
+ nlink = 1, # even on dirs! this confuses 'find' in
+ # a good way :-)
+ atime = time,
+ mtime = time,
+ ctime = time,
+ uid = self.uid,
+ gid = self.gid)
+ return attr, self.timeout
+
+ def lookup(self, path, name):
+ npath = posixpath.join(path, name)
+ if not self.check_path(npath):
+ raise IOError(errno.ENOENT, name)
+ return self.cachepath(npath), self.timeout
+
+ def mknod(self, path, name, mode):
+ npath = posixpath.join(path, name)
+ self.mknod_path(npath, mode)
+ return self.cachepath(npath), self.timeout
+
+ def mkdir(self, path, name, mode):
+ npath = posixpath.join(path, name)
+ self.mkdir_path(npath, mode)
+ return self.cachepath(npath), self.timeout
+
+ def unlink(self, path, name):
+ npath = posixpath.join(path, name)
+ self.unlink_path(npath)
+
+ def rmdir(self, path, name):
+ npath = posixpath.join(path, name)
+ self.rmdir_path(npath)
+
+ def rename(self, oldpath, oldname, newpath, newname):
+ noldpath = posixpath.join(oldpath, oldname)
+ nnewpath = posixpath.join(newpath, newname)
+ if not self.rename_path(noldpath, nnewpath):
+ raise IOError(errno.ENOENT, oldname)
+ # fix all paths in the cache
+ N = len(noldpath)
+ for id, path in self._paths.items():
+ if path.startswith(noldpath):
+ if len(path) == N or path[N] == '/':
+ del self._path2id[path]
+ path = nnewpath + path[N:]
+ self._paths[id] = path
+ self._path2id[path] = id
--- /dev/null
+from kernel import *
+import errno, posixpath, weakref
+from time import time as now
+from stat import S_IFDIR, S_IFREG, S_IFMT
+from cStringIO import StringIO
+from handler import Handler
+from pathfs import PathFs
+from pysvn.ra_filesystem import SvnRepositoryFilesystem
+import pysvn.date
+
+
+class SvnFS(PathFs):
+
+ def __init__(self, svnurl, root=''):
+ super(SvnFS, self).__init__(root)
+ self.svnurl = svnurl
+ self.openfiles = weakref.WeakValueDictionary()
+ self.creationtimes = {}
+ self.do_open()
+
+ def do_open(self, rev='HEAD'):
+ self.fs = SvnRepositoryFilesystem(svnurl, rev)
+
+ def do_commit(self, msg):
+ rev = self.fs.commit(msg)
+ if rev is None:
+ print '* no changes.'
+ else:
+ print '* checked in revision %d.' % (rev,)
+ self.do_open()
+
+ def do_status(self, path=''):
+ print '* status'
+ result = []
+ if path and not path.endswith('/'):
+ path += '/'
+ for delta in self.fs._compute_deltas():
+ if delta.path.startswith(path):
+ if delta.oldrev is None:
+ c = 'A'
+ elif delta.newrev is None:
+ c = 'D'
+ else:
+ c = 'M'
+ result.append(' %s %s\n' % (c, delta.path[len(path):]))
+ return ''.join(result)
+
+ def getattr(self, path):
+ stat = self.fs.stat(path)
+ if stat['svn:entry:kind'] == 'dir':
+ s = S_IFDIR
+ mode = 0777
+ else:
+ s = S_IFREG
+ mode = 0666
+ try:
+ time = pysvn.date.decode(stat['svn:entry:committed-date'])
+ except KeyError:
+ try:
+ time = self.creationtimes[path]
+ except KeyError:
+ time = self.creationtimes[path] = now()
+ return self.mkattr(path,
+ size = stat.get('svn:entry:size', 0),
+ st_kind = s,
+ mode = mode,
+ time = time)
+
+ def setattr(self, path, mode, uid, gid, size, atime, mtime):
+ if size is not None:
+ data = self.fs.read(path)
+ if size < len(data):
+ self.fs.write(path, data[:size])
+ elif size > len(data):
+ self.fs.write(path, data + '\x00' * (size - len(data)))
+
+ def listdir(self, path):
+ for name in self.fs.listdir(path):
+ kind = self.fs.check_path(posixpath.join(path, name))
+ if kind == 'dir':
+ yield name, TYPE_DIR
+ else:
+ yield name, TYPE_REG
+
+ def check_path(self, path):
+ kind = self.fs.check_path(path)
+ return kind is not None
+
+ def open(self, path, mode):
+ try:
+ of = self.openfiles[path]
+ except KeyError:
+ of = self.openfiles[path] = OpenFile(self.fs.read(path))
+ return of, FOPEN_KEEP_CACHE
+
+ def modified(self, path):
+ try:
+ of = self.openfiles[path]
+ except KeyError:
+ pass
+ else:
+ self.fs.write(path, of.f.getvalue())
+
+ def mknod_path(self, path, mode):
+ self.fs.add(path)
+
+ def mkdir_path(self, path, mode):
+ self.fs.mkdir(path)
+
+ def unlink_path(self, path):
+ self.fs.unlink(path)
+
+ def rmdir_path(self, path):
+ self.fs.rmdir(path)
+
+ def rename_path(self, oldpath, newpath):
+ kind = self.fs.check_path(oldpath)
+ if kind is None:
+ return False
+ self.fs.move(oldpath, newpath, kind)
+ return True
+
+ def getxattrs(self, path):
+ return XAttrs(self, path)
+
+
+class OpenFile:
+ def __init__(self, data=''):
+ self.f = StringIO()
+ self.f.write(data)
+ self.f.seek(0)
+
+ def seek(self, pos):
+ self.f.seek(pos)
+
+ def read(self, sz):
+ return self.f.read(sz)
+
+ def write(self, buf):
+ self.f.write(buf)
+
+
+class XAttrs:
+ def __init__(self, svnfs, path):
+ self.svnfs = svnfs
+ self.path = path
+
+ def keys(self):
+ return []
+
+ def __getitem__(self, key):
+ if key == 'status':
+ return self.svnfs.do_status(self.path)
+ raise KeyError(key)
+
+ def __setitem__(self, key, value):
+ if key == 'commit' and self.path == '':
+ self.svnfs.do_commit(value)
+ elif key == 'update' and self.path == '':
+ if self.svnfs.fs.modified():
+ raise IOError(errno.EPERM, "there are local changes")
+ if value == '':
+ rev = 'HEAD'
+ else:
+ try:
+ rev = int(value)
+ except ValueError:
+ raise IOError(errno.EPERM, "invalid revision number")
+ self.svnfs.do_open(rev)
+ else:
+ raise KeyError(key)
+
+ def __delitem__(self, key):
+ raise KeyError(key)
+
+
+if __name__ == '__main__':
+ import sys
+ svnurl, mountpoint = sys.argv[1:]
+ handler = Handler(mountpoint, SvnFS(svnurl))
+ handler.loop_forever()
--- /dev/null
+"""
+A read-only svn fs showing all the revisions in subdirectories.
+"""
+from objectfs import ObjectFs, SymLink
+from handler import Handler
+from pysvn.ra import connect
+from pysvn.date import decode
+import errno, posixpath, time
+
+
+#USE_SYMLINKS = 0 # they are wrong if the original file had another path
+
+# use getfattr -d filename to see the node's attributes, which include
+# information like the revision at which the file was last modified
+
+
+class Root:
+ def __init__(self, svnurl):
+ self.svnurl = svnurl
+ self.ra = connect(svnurl)
+ self.head = self.ra.get_latest_rev()
+
+ def listdir(self):
+ for rev in range(1, self.head+1):
+ yield str(rev)
+ yield 'HEAD'
+
+ def join(self, name):
+ try:
+ rev = int(name)
+ except ValueError:
+ if name == 'HEAD':
+ return SymLink(str(self.head))
+ else:
+ raise KeyError(name)
+ return TopLevelDir(self.ra, rev, rev, '')
+
+
+class Node:
+ def __init__(self, ra, rev, last_changed_rev, path):
+ self.ra = ra
+ self.rev = rev
+ self.last_changed_rev = last_changed_rev
+ self.path = path
+
+ def __repr__(self):
+ return '<%s %d/%s>' % (self.__class__.__name__, self.rev, self.path)
+
+class Dir(Node):
+ def listdir(self):
+ rev, props, entries = self.ra.get_dir(self.path, self.rev,
+ want_props = False)
+ for key, stats in entries.items():
+ yield key, getnode(self.ra, self.rev,
+ posixpath.join(self.path, key), stats)
+
+class File(Node):
+ def __init__(self, ra, rev, last_changed_rev, path, size):
+ Node.__init__(self, ra, rev, last_changed_rev, path)
+ self.filesize = size
+
+ def size(self):
+ return self.filesize
+
+ def read(self):
+ checksum, rev, props, data = self.ra.get_file(self.path, self.rev,
+ want_props = False)
+ return data
+
+
+class TopLevelDir(Dir):
+ def listdir(self):
+ for item in Dir.listdir(self):
+ yield item
+ yield 'svn:log', Log(self.ra, self.rev)
+
+class Log:
+
+ def __init__(self, ra, rev):
+ self.ra = ra
+ self.rev = rev
+
+ def getlogentry(self):
+ try:
+ return self.logentry
+ except AttributeError:
+ logentries = self.ra.log('', startrev=self.rev, endrev=self.rev)
+ try:
+ [self.logentry] = logentries
+ except ValueError:
+ self.logentry = None
+ return self.logentry
+
+ def size(self):
+ return len(self.read())
+
+ def read(self):
+ logentry = self.getlogentry()
+ if logentry is None:
+ return 'r%d | (no change here)\n' % (self.rev,)
+ datetuple = time.gmtime(decode(logentry.date))
+ date = time.strftime("%c", datetuple)
+ return 'r%d | %s | %s\n\n%s' % (self.rev,
+ logentry.author,
+ date,
+ logentry.message)
+
+
+if 0:
+ pass
+##if USE_SYMLINKS:
+## def getnode(ra, rev, path, stats):
+## committed_rev = stats['svn:entry:committed-rev']
+## if committed_rev == rev:
+## kind = stats['svn:entry:kind']
+## if kind == 'file':
+## return File(ra, rev, path, stats['svn:entry:size'])
+## elif kind == 'dir':
+## return Dir(ra, rev, path)
+## else:
+## raise IOError(errno.EINVAL, "kind %r" % (kind,))
+## else:
+## depth = path.count('/')
+## return SymLink('../' * depth + '../%d/%s' % (committed_rev, path))
+else:
+ def getnode(ra, rev, path, stats):
+ last_changed_rev = stats['svn:entry:committed-rev']
+ kind = stats['svn:entry:kind']
+ if kind == 'file':
+ return File(ra, rev, last_changed_rev, path,
+ stats['svn:entry:size'])
+ elif kind == 'dir':
+ return Dir(ra, rev, last_changed_rev, path)
+ else:
+ raise IOError(errno.EINVAL, "kind %r" % (kind,))
+
+
+if __name__ == '__main__':
+ import sys
+ svnurl, mountpoint = sys.argv[1:]
+ handler = Handler(mountpoint, ObjectFs(Root(svnurl)))
+ handler.loop_forever()
--- /dev/null
+from kernel import *
+import stat, errno, os, time
+from cStringIO import StringIO
+from OrderedDict import OrderedDict
+
+INFINITE = 86400.0
+
+
+class Wrapper(object):
+ def __init__(self, obj):
+ self.obj = obj
+
+ def getuid(self):
+ return uid(self.obj)
+
+ def __hash__(self):
+ return hash(self.obj)
+
+ def __eq__(self, other):
+ return self.obj == other
+
+ def __ne__(self, other):
+ return self.obj != other
+
+
+class BaseDir(object):
+
+ def join(self, name):
+ "Return a file or subdirectory object"
+ for item in self.listdir():
+ if isinstance(item, tuple):
+ subname, subnode = item
+ if subname == name:
+ return subnode
+ raise KeyError(name)
+
+ def listdir(self):
+ "Return a list of names, or a list of (name, object)"
+ raise NotImplementedError
+
+ def create(self, name):
+ "Create a file"
+ raise NotImplementedError
+
+ def mkdir(self, name):
+ "Create a subdirectory"
+ raise NotImplementedError
+
+ def symlink(self, name, target):
+ "Create a symbolic link"
+ raise NotImplementedError
+
+ def unlink(self, name):
+ "Remove a file or subdirectory."
+ raise NotImplementedError
+
+ def rename(self, newname, olddirnode, oldname):
+ "Move another node into this directory."
+ raise NotImplementedError
+
+ def getuid(self):
+ return uid(self)
+
+ def getattr(self, fs):
+ return fs.newattr(stat.S_IFDIR, self.getuid(), mode=0777), INFINITE
+
+ def setattr(self, **kwds):
+ pass
+
+ def getentries(self):
+ entries = OrderedDict()
+ for name in self.listdir():
+ if isinstance(name, tuple):
+ name, subnode = name
+ else:
+ subnode = None
+ entries[name] = subnode
+ return entries
+
+
+class BaseFile(object):
+
+ def size(self):
+ "Return the size of the file, or None if not known yet"
+ f = self.open()
+ if isinstance(f, str):
+ return len(f)
+ f.seek(0, 2)
+ return f.tell()
+
+ def open(self):
+ "Return the content as a string or a file-like object"
+ raise NotImplementedError
+
+ def getuid(self):
+ return uid(self)
+
+ def getattr(self, fs):
+ sz = self.size()
+ attr = fs.newattr(stat.S_IFREG, self.getuid())
+ if sz is None:
+ timeout = 0
+ else:
+ attr.size = sz
+ timeout = INFINITE
+ return attr, timeout
+
+ def setattr(self, size, **kwds):
+ f = self.open()
+ if self.size() == size:
+ return
+ if isinstance(f, str):
+ raise IOError(errno.EPERM)
+ f.seek(size)
+ f.truncate()
+
+
+class BaseSymLink(object):
+
+ def readlink(self):
+ "Return the symlink's target, as a string"
+ raise NotImplementedError
+
+ def getuid(self):
+ return uid(self)
+
+ def getattr(self, fs):
+ target = self.readlink()
+ attr = fs.newattr(stat.S_IFLNK, self.getuid())
+ attr.size = len(target)
+ attr.mode |= 0777
+ return attr, INFINITE
+
+ def setattr(self, **kwds):
+ pass
+
+# ____________________________________________________________
+
+class Dir(BaseDir):
+ def __init__(self, **contents):
+ self.contents = contents
+ def listdir(self):
+ return self.contents.items()
+ def join(self, name):
+ return self.contents[name]
+ def create(self, fs, name):
+ node = fs.File()
+ self.contents[name] = node
+ return node
+ def mkdir(self, fs, name):
+ node = fs.Dir()
+ self.contents[name] = node
+ return node
+ def symlink(self, fs, name, target):
+ node = fs.SymLink(target)
+ self.contents[name] = node
+ return node
+ def unlink(self, name):
+ del self.contents[name]
+ def rename(self, newname, olddirnode, oldname):
+ oldnode = olddirnode.join(oldname)
+ olddirnode.unlink(oldname)
+ self.contents[newname] = oldnode
+
+class File(BaseFile):
+ def __init__(self):
+ self.data = StringIO()
+ def size(self):
+ self.data.seek(0, 2)
+ return self.data.tell()
+ def open(self):
+ return self.data
+
+class SymLink(BaseFile):
+ def __init__(self, target):
+ self.target = target
+ def readlink(self):
+ return self.target
+
+# ____________________________________________________________
+
+
+class RWObjectFs(object):
+ """A simple read-write file system based on Python objects."""
+
+ UID = os.getuid()
+ GID = os.getgid()
+ UMASK = os.umask(0); os.umask(UMASK)
+
+ Dir = Dir
+ File = File
+ SymLink = SymLink
+
+ def __init__(self, rootnode):
+ self.nodes = {FUSE_ROOT_ID: rootnode}
+ self.starttime = time.time()
+
+ def newattr(self, s, ino, mode=0666):
+ return fuse_attr(ino = ino,
+ size = 0,
+ mode = s | (mode & ~self.UMASK),
+ nlink = 1, # even on dirs! this confuses 'find' in
+ # a good way :-)
+ atime = self.starttime,
+ mtime = self.starttime,
+ ctime = self.starttime,
+ uid = self.UID,
+ gid = self.GID)
+
+ def getnode(self, nodeid):
+ try:
+ return self.nodes[nodeid]
+ except KeyError:
+ raise IOError(errno.ESTALE, nodeid)
+
+ def getattr(self, node):
+ return node.getattr(self)
+
+ def setattr(self, node, mode, uid, gid, size, atime, mtime):
+ node.setattr(mode=mode, uid=uid, gid=gid, size=size,
+ atime=atime, mtime=mtime)
+
+ def listdir(self, node):
+ entries = node.getentries()
+ for name, subnode in entries.items():
+ if subnode is None:
+ subnode = node.join(name)
+ self.nodes[uid(subnode)] = subnode
+ entries[name] = subnode
+ if isinstance(subnode, str):
+ yield name, TYPE_REG
+ elif hasattr(subnode, 'readlink'):
+ yield name, TYPE_LNK
+ elif hasattr(subnode, 'size'):
+ yield name, TYPE_REG
+ else:
+ yield name, TYPE_DIR
+
+ def lookup(self, node, name):
+ try:
+ subnode = node.join(name)
+ except KeyError:
+ raise IOError(errno.ENOENT, name)
+ else:
+ res = uid(subnode)
+ self.nodes[res] = subnode
+ return res, INFINITE
+
+ def mknod(self, dirnode, filename, mode):
+ node = dirnode.create(filename)
+ return self.newnodeid(node), INFINITE
+
+ def mkdir(self, dirnode, subdirname, mode):
+ node = dirnode.mkdir(subdirname)
+ return self.newnodeid(node), INFINITE
+
+ def symlink(self, dirnode, linkname, target):
+ node = dirnode.symlink(linkname, target)
+ return self.newnodeid(node), INFINITE
+
+ def unlink(self, dirnode, filename):
+ try:
+ dirnode.unlink(filename)
+ except KeyError:
+ raise IOError(errno.ENOENT, filename)
+
+ rmdir = unlink
+
+ def open(self, node, mode):
+ f = node.open()
+ if isinstance(f, str):
+ f = StringIO(f)
+ return f
+
+ def readlink(self, node):
+ return node.readlink()
+
+ def rename(self, olddirnode, oldname, newdirnode, newname):
+ try:
+ newdirnode.rename(newname, olddirnode, oldname)
+ except KeyError:
+ raise IOError(errno.ENOENT, oldname)
+
+ def getxattrs(self, node):
+ return getattr(node, '__dict__', {})
+
+# ____________________________________________________________
+
+import struct
+try:
+ HUGEVAL = 256 ** struct.calcsize('P')
+except struct.error:
+ HUGEVAL = 0
+
+def fixid(result):
+ if result < 0:
+ result += HUGEVAL
+ return result
+
+def uid(obj):
+ """
+ Return the id of an object as an unsigned number so that its hex
+ representation makes sense
+ """
+ return fixid(id(obj))
--- /dev/null
+import py
+from handler import Handler
+from objectfs import ObjectFs
+
+
+class SvnDir:
+ def __init__(self, path):
+ self.path = path
+
+ def listdir(self):
+ for p in self.path.listdir():
+ if p.check(dir=1):
+ cls = SvnDir
+ else:
+ cls = SvnFile
+ yield p.basename, cls(p)
+
+
+class SvnFile:
+ data = None
+
+ def __init__(self, path):
+ self.path = path
+
+ def size(self):
+ if self.data is None:
+ return None
+ else:
+ return len(self.data)
+
+ def read(self):
+ if self.data is None:
+ self.data = self.path.read()
+ return self.data
+
+
+if __name__ == '__main__':
+ import sys
+ svnurl, mountpoint = sys.argv[1:]
+ root = SvnDir(py.path.svnurl(svnurl))
+ handler = Handler(mountpoint, ObjectFs(root))
+ handler.loop_forever()
--- /dev/null
+"""
+PyFuse client for the Tahoe distributed file system.
+See http://allmydata.org/
+"""
+
+# Read-only for now.
+
+# Portions copied from the file contrib/fuse/tahoe_fuse.py distributed
+# with Tahoe 1.0.0.
+
+import os, sys
+from objectfs import ObjectFs
+from handler import Handler
+import simplejson
+import urllib
+
+
+### Config:
+TahoeConfigDir = '~/.tahoe'
+
+
+### Utilities for debug:
+def log(msg, *args):
+ print msg % args
+
+
+class TahoeConnection:
+ def __init__(self, confdir):
+ self.confdir = confdir
+ self._init_url()
+
+ def _init_url(self):
+ f = open(os.path.join(self.confdir, 'webport'), 'r')
+ contents = f.read()
+ f.close()
+
+ fields = contents.split(':')
+ proto, port = fields[:2]
+ assert proto == 'tcp'
+ port = int(port)
+ self.url = 'http://localhost:%d' % (port,)
+
+ def get_root(self):
+ # For now we just use the same default as the CLI:
+ rootdirfn = os.path.join(self.confdir, 'private', 'root_dir.cap')
+ f = open(rootdirfn, 'r')
+ cap = f.read().strip()
+ f.close()
+ return TahoeDir(self, canonicalize_cap(cap))
+
+
+class TahoeNode:
+ def __init__(self, conn, uri):
+ self.conn = conn
+ self.uri = uri
+
+ def get_metadata(self):
+ f = self._open('?t=json')
+ json = f.read()
+ f.close()
+ return simplejson.loads(json)
+
+ def _open(self, postfix=''):
+ url = '%s/uri/%s%s' % (self.conn.url, self.uri, postfix)
+ log('*** Fetching: %r', url)
+ return urllib.urlopen(url)
+
+
+class TahoeDir(TahoeNode):
+ def listdir(self):
+ flag, md = self.get_metadata()
+ assert flag == 'dirnode'
+ result = []
+ for name, (childflag, childmd) in md['children'].items():
+ if childflag == 'dirnode':
+ cls = TahoeDir
+ else:
+ cls = TahoeFile
+ result.append((str(name), cls(self.conn, childmd['ro_uri'])))
+ return result
+
+class TahoeFile(TahoeNode):
+ def size(self):
+ rawsize = self.get_metadata()[1]['size']
+ return rawsize
+
+ def read(self):
+ return self._open().read()
+
+
+def canonicalize_cap(cap):
+ cap = urllib.unquote(cap)
+ i = cap.find('URI:')
+ assert i != -1, 'A cap must contain "URI:...", but this does not: ' + cap
+ return cap[i:]
+
+def main(mountpoint, basedir):
+ conn = TahoeConnection(basedir)
+ root = conn.get_root()
+ handler = Handler(mountpoint, ObjectFs(root))
+ handler.loop_forever()
+
+if __name__ == '__main__':
+ [mountpoint] = sys.argv[1:]
+ basedir = os.path.expanduser(TahoeConfigDir)
+ main(mountpoint, basedir)
--- /dev/null
+from handler import Handler
+import stat, errno, os, time
+from cStringIO import StringIO
+from kernel import *
+
+
+UID = os.getuid()
+GID = os.getgid()
+UMASK = os.umask(0); os.umask(UMASK)
+INFINITE = 86400.0
+
+
+class Node(object):
+ __slots__ = ['attr', 'data']
+
+ def __init__(self, attr, data=None):
+ self.attr = attr
+ self.data = data
+
+ def type(self):
+ return mode2type(self.attr.mode)
+
+ def modified(self):
+ self.attr.mtime = self.attr.atime = time.time()
+ t = self.type()
+ if t == TYPE_REG:
+ f = self.data
+ pos = f.tell()
+ f.seek(0, 2)
+ self.attr.size = f.tell()
+ f.seek(pos)
+ elif t == TYPE_DIR:
+ nsubdirs = 0
+ for nodeid in self.data.values():
+ nsubdirs += nodeid & 1
+ self.attr.nlink = 2 + nsubdirs
+
+
+def newattr(s, mode=0666):
+ now = time.time()
+ return fuse_attr(ino = INVALID_INO,
+ size = 0,
+ mode = s | (mode & ~UMASK),
+ nlink = 1 + (s == stat.S_IFDIR),
+ atime = now,
+ mtime = now,
+ ctime = now,
+ uid = UID,
+ gid = GID)
+
+# ____________________________________________________________
+
+class Filesystem:
+
+ def __init__(self, rootnode):
+ self.nodes = {FUSE_ROOT_ID: rootnode}
+ self.nextid = 2
+ assert self.nextid > FUSE_ROOT_ID
+
+ def getnode(self, nodeid):
+ try:
+ return self.nodes[nodeid]
+ except KeyError:
+ raise IOError(errno.ESTALE, nodeid)
+
+ def forget(self, nodeid):
+ pass
+
+ def cachenode(self, node):
+ id = self.nextid
+ self.nextid += 2
+ if node.type() == TYPE_DIR:
+ id += 1
+ self.nodes[id] = node
+ return id
+
+ def getattr(self, node):
+ return node.attr, INFINITE
+
+ def setattr(self, node, mode=None, uid=None, gid=None,
+ size=None, atime=None, mtime=None):
+ if mode is not None: node.attr.mode = (node.attr.mode&~0777) | mode
+ if uid is not None: node.attr.uid = uid
+ if gid is not None: node.attr.gid = gid
+ if atime is not None: node.attr.atime = atime
+ if mtime is not None: node.attr.mtime = mtime
+ if size is not None and node.type() == TYPE_REG:
+ node.data.seek(size)
+ node.data.truncate()
+
+ def listdir(self, node):
+ for name, subnodeid in node.data.items():
+ subnode = self.nodes[subnodeid]
+ yield name, subnode.type()
+
+ def lookup(self, node, name):
+ try:
+ return node.data[name], INFINITE
+ except KeyError:
+ pass
+ if hasattr(node, 'findnode'):
+ try:
+ subnode = node.findnode(name)
+ except KeyError:
+ pass
+ else:
+ id = self.cachenode(subnode)
+ node.data[name] = id
+ return id, INFINITE
+ raise IOError(errno.ENOENT, name)
+
+ def open(self, node, mode):
+ return node.data
+
+ def mknod(self, node, name, mode):
+ subnode = Node(newattr(mode & 0170000, mode & 0777))
+ if subnode.type() == TYPE_REG:
+ subnode.data = StringIO()
+ else:
+ raise NotImplementedError
+ id = self.cachenode(subnode)
+ node.data[name] = id
+ node.modified()
+ return id, INFINITE
+
+ def mkdir(self, node, name, mode):
+ subnode = Node(newattr(stat.S_IFDIR, mode & 0777), {})
+ id = self.cachenode(subnode)
+ node.data[name] = id
+ node.modified()
+ return id, INFINITE
+
+ def symlink(self, node, linkname, target):
+ subnode = Node(newattr(stat.S_IFLNK, 0777), target)
+ id = self.cachenode(subnode)
+ node.data[linkname] = id
+ node.modified()
+ return id, INFINITE
+
+ def readlink(self, node):
+ assert node.type() == TYPE_LNK
+ return node.data
+
+ def unlink(self, node, name):
+ try:
+ del node.data[name]
+ except KeyError:
+ raise IOError(errno.ENOENT, name)
+ node.modified()
+
+ rmdir = unlink
+
+ def rename(self, oldnode, oldname, newnode, newname):
+ if newnode.type() != TYPE_DIR:
+ raise IOError(errno.ENOTDIR, newnode)
+ try:
+ nodeid = oldnode.data.pop(oldname)
+ except KeyError:
+ raise IOError(errno.ENOENT, oldname)
+ oldnode.modified()
+ newnode.data[newname] = nodeid
+ newnode.modified()
+
+ def modified(self, node):
+ node.modified()
+
+# ____________________________________________________________
+
+if __name__ == '__main__':
+ root = Node(newattr(stat.S_IFDIR), {})
+ handler = Handler('/home/arigo/mnt', Filesystem(root))
+ handler.loop_forever()