-import os, tempfile, heapq, binascii, traceback, sys
+import os, tempfile, heapq, binascii, traceback, array, stat
from stat import S_IFREG, S_IFDIR
+from time import time, strftime, localtime
from zope.interface import implements
from twisted.python import components
from twisted.cred import portal
from twisted.internet import defer
-from twisted.python.failure import Failure
from twisted.internet.interfaces import IFinishableConsumer
from foolscap.api import eventually\r
from allmydata.util import deferredutil
message="BaseException.message has been deprecated as of Python 2.6",
module=".*filetransfer", append=True)
-debug = False
+noisy = True
+use_foolscap_logging = True
-if debug:
+if use_foolscap_logging:
+ from allmydata.util.log import msg as logmsg, err as logerr, \
+ NOISY, OPERATIONAL, SCARY, PrefixingLogMixin
+else:
+ def logmsg(s, level=None):
+ print s
+ def logerr(s, level=None):
+ print s
+ NOISY = None
+ OPERATIONAL = None
+ SCARY = None
+ class PrefixingLogMixin:
+ def __init__(self, facility=None):
+ pass
+ def log(self, s, level=None):
+ print s
+
+if noisy:
def eventually_callback(d):
s = traceback.format_stack()
def _cb(res):
try:
- print "CALLBACK %r %r" % (d, res)
+ if noisy: logmsg("CALLBACK %r %r" % (d, res), level=NOISY)
d.callback(res)
except: # pragma: no cover
- print "Failed to callback %r" % (d,)
- print "with %r" % (res,)
- print "Original stack:"
- print '!' + '!'.join(s)
+ logerr("Failed to callback %r with %r\n"
+ "Original stack:\n!%s" %
+ (d, res, '!'.join(s)), level=SCARY)
traceback.print_exc()
raise
return lambda res: eventually(_cb, res)
s = traceback.format_stack()
def _eb(err):
try:
- print "ERRBACK", d, err
+ if noisy: logmsg("ERRBACK %r %r" % (d, err), level=NOISY)
d.errback(err)
except: # pragma: no cover
- print "Failed to errback %r" % (d,)
- print "with %r" % (err,)
- print "Original stack:"
- print '!' + '!'.join(s)
+ logerr("Failed to errback %r with %r\n"
+ "Original stack:\n!%s" %
+ (d, err, '!'.join(s)), level=SCARY)
traceback.print_exc()
raise
return lambda err: eventually(_eb, err)
def _raise_error(err):
if err is None:
return None
- if debug:
- print "TRACEBACK %r" % (err,)
- #traceback.print_exc(err)
+ if noisy: logmsg("RAISE %r" % (err,), level=NOISY)
+ #traceback.print_exc(err)
# The message argument to SFTPError must not reveal information that
# might compromise anonymity.
if err.check(NoSuchChildError):
childname = err.value.args[0].encode('utf-8')
raise SFTPError(FX_NO_SUCH_FILE, childname)
- if err.check(ExistingChildError):
- msg = err.value.args[0].encode('utf-8')
+ if err.check(ExistingChildError) or err.check(NotWriteableError):
# later versions of SFTP define FX_FILE_ALREADY_EXISTS, but version 3 doesn't
- raise SFTPError(FX_PERMISSION_DENIED, msg)
- if err.check(NotWriteableError):
msg = err.value.args[0].encode('utf-8')
raise SFTPError(FX_PERMISSION_DENIED, msg)
if err.check(NotImplementedError):
]
if f])
+def _lsLine(name, attrs):
+ st_uid = "tahoe"
+ st_gid = "tahoe"
+ st_mtime = attrs.get("mtime", 0)
+ st_mode = attrs["permissions"]
+ # TODO: check that clients are okay with this being a "?".
+ # (They should be because the longname is intended for human
+ # consumption.)
+ st_size = attrs.get("size", "?")
+ # We don't know how many links there really are to this object.
+ st_nlink = 1
+
+ # From <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
+ # We can't call the version in Twisted because we might have a version earlier than
+ # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
+
+ mode = st_mode
+ perms = array.array('c', '-'*10)
+ ft = stat.S_IFMT(mode)
+ if stat.S_ISDIR(ft): perms[0] = 'd'
+ elif stat.S_ISCHR(ft): perms[0] = 'c'
+ elif stat.S_ISBLK(ft): perms[0] = 'b'
+ elif stat.S_ISREG(ft): perms[0] = '-'
+ elif stat.S_ISFIFO(ft): perms[0] = 'f'
+ elif stat.S_ISLNK(ft): perms[0] = 'l'
+ elif stat.S_ISSOCK(ft): perms[0] = 's'
+ else: perms[0] = '?'
+ # user
+ if mode&stat.S_IRUSR:perms[1] = 'r'
+ if mode&stat.S_IWUSR:perms[2] = 'w'
+ if mode&stat.S_IXUSR:perms[3] = 'x'
+ # group
+ if mode&stat.S_IRGRP:perms[4] = 'r'
+ if mode&stat.S_IWGRP:perms[5] = 'w'
+ if mode&stat.S_IXGRP:perms[6] = 'x'
+ # other
+ if mode&stat.S_IROTH:perms[7] = 'r'
+ if mode&stat.S_IWOTH:perms[8] = 'w'
+ if mode&stat.S_IXOTH:perms[9] = 'x'
+ # suid/sgid never set
+
+ l = perms.tostring()
+ l += str(st_nlink).rjust(5) + ' '
+ un = str(st_uid)
+ l += un.ljust(9)
+ gr = str(st_gid)
+ l += gr.ljust(9)
+ sz = str(st_size)
+ l += sz.rjust(8)
+ l += ' '
+ sixmo = 60 * 60 * 24 * 7 * 26
+ if st_mtime + sixmo < time(): # last edited more than 6mo ago
+ l += strftime("%b %d %Y ", localtime(st_mtime))
+ else:
+ l += strftime("%b %d %H:%M ", localtime(st_mtime))
+ l += name
+ return l
+
def _populate_attrs(childnode, metadata, writeable, size=None):
attrs = {}
return attrs
-
-class EncryptedTemporaryFile:
+class EncryptedTemporaryFile(PrefixingLogMixin):
# not implemented: next, readline, readlines, xreadlines, writelines
+ def __init__(self):
+ PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
+ self.file = tempfile.TemporaryFile()
+ self.key = os.urandom(16) # AES-128
+
def _crypt(self, offset, data):
# FIXME: use random-access AES (pycryptopp ticket #18)
offset_big = offset // 16\r
cipher.process("\x00"*offset_small)\r
return cipher.process(data)\r
- def __init__(self):
- self.file = tempfile.TemporaryFile()
- self.key = os.urandom(16) # AES-128
-
def close(self):
self.file.close()
self.file.flush()
def seek(self, offset, whence=os.SEEK_SET):
- if debug: print ".seek(%r, %r)" % (offset, whence)
+ if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
self.file.seek(offset, whence)
def tell(self):
offset = self.file.tell()
- if debug: print ".offset = %r" % (offset,)
+ if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
return offset
def read(self, size=-1):
- if debug: print ".read(%r)" % (size,)
+ if noisy: self.log(".read(%r)" % (size,), level=NOISY)
index = self.file.tell()
ciphertext = self.file.read(size)
plaintext = self._crypt(index, ciphertext)
return plaintext
def write(self, plaintext):
- if debug: print ".write(%r)" % (plaintext,)
+ if noisy: self.log(".write(%r)" % (plaintext,), level=NOISY)
index = self.file.tell()
ciphertext = self._crypt(index, plaintext)
self.file.write(ciphertext)
def truncate(self, newsize):
- if debug: print ".truncate(%r)" % (newsize,)
+ if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
self.file.truncate(newsize)
-class OverwriteableFileConsumer:
+class OverwriteableFileConsumer(PrefixingLogMixin):
implements(IFinishableConsumer)
"""I act both as a consumer for the download of the original file contents, and as a
wrapper for a temporary file that records the downloaded data and any overwrites.
useful for other frontends."""
def __init__(self, check_abort, download_size, tempfile_maker):
+ PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
self.check_abort = check_abort
self.download_size = download_size
self.current_size = download_size
return self.current_size
def set_current_size(self, size):
- if debug: print "set_current_size(%r), current_size = %r, downloaded = %r" % (size, self.current_size, self.downloaded)
+ if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
+ (size, self.current_size, self.downloaded), level=NOISY)
if size < self.current_size or size < self.downloaded:
self.f.truncate(size)
self.current_size = size
p.resumeProducing()
def write(self, data):
- if debug: print "write(%r)" % (data,)
+ if noisy: self.log(".write(%r)" % (data,), level=NOISY)
if self.check_abort():
self.close()
return
(next, d) = self.milestones[0]
if next > milestone:
return
- if debug: print "MILESTONE %r %r" % (next, d)
+ if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
heapq.heappop(self.milestones)
eventually_callback(d)(None)
self.finish()
def overwrite(self, offset, data):
- if debug: print "overwrite(%r, %r)" % (offset, data)
+ if noisy: self.log(".overwrite(%r, %r)" % (offset, data), level=NOISY)
if offset > self.download_size and offset > self.current_size:
# Normally writing at an offset beyond the current end-of-file
# would leave a hole that appears filled with zeroes. However, an
Otherwise errback the Deferred that we return.
The caller must perform no more overwrites until the Deferred has fired."""
- if debug: print "read(%r, %r), current_size = %r" % (offset, length, self.current_size)
+ if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
if offset >= self.current_size:
def _eof(): raise EOFError("read past end of file")
return defer.execute(_eof)
# then extended.
assert self.current_size >= offset + length, (self.current_size, offset, length)
- if debug: print "!!! %r" % (self.f,)
+ if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
self.f.seek(offset)
return self.f.read(length)
d.addCallback(_reached)
return d
def when_reached(self, index):
- if debug: print "when_reached(%r)" % (index,)
+ if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
if index <= self.downloaded: # already reached
- if debug: print "already reached %r" % (index,)
+ if noisy: self.log("already reached %r" % (index,), level=NOISY)
return defer.succeed(None)
d = defer.Deferred()
def _reached(ign):
- if debug: print "reached %r" % (index,)
+ if noisy: self.log("reached %r" % (index,), level=NOISY)
return ign
d.addCallback(_reached)
heapq.heappush(self.milestones, (index, d))
def finish(self):
while len(self.milestones) > 0:
(next, d) = self.milestones[0]
- if debug: print "MILESTONE FINISH %r %r" % (next, d)
+ if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
heapq.heappop(self.milestones)
# The callback means that the milestone has been reached if
# it is ever going to be. Note that the file may have been
SIZE_THRESHOLD = 1000
def _make_sftp_file(check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
+ if noisy: logmsg("_make_sftp_file(%r, %r, %r, parent=%r, childname=%r, filenode=%r, metadata=%r" %
+ (check_abort, flags, convergence, parent, childname, filenode, metadata), NOISY)
+
if not (flags & (FXF_WRITE | FXF_CREAT)) and (flags & FXF_READ) and filenode and \
not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
return ShortReadOnlySFTPFile(filenode, metadata)
parent=parent, childname=childname, filenode=filenode, metadata=metadata)
-class ShortReadOnlySFTPFile:
+class ShortReadOnlySFTPFile(PrefixingLogMixin):
implements(ISFTPFile)
"""I represent a file handle to a particular file on an SFTP connection.
I am used only for short immutable files opened in read-only mode.
The file contents are downloaded to memory when I am created."""
def __init__(self, filenode, metadata):
+ PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
+ if noisy: self.log(".__init__(%r, %r)" % (filenode, metadata), level=NOISY)
+
assert IFileNode.providedBy(filenode), filenode
self.filenode = filenode
self.metadata = metadata
self.closed = False
def readChunk(self, offset, length):
+ self.log(".readChunk(%r, %r)" % (offset, length), level=OPERATIONAL)
+
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
return defer.execute(_closed)
d = defer.Deferred()
def _read(data):
- if debug: print "_read(%r) in readChunk(%r, %r)" % (data, offset, length)
+ if noisy: self.log("_read(%r) in readChunk(%r, %r)" % (data, offset, length), level=NOISY)
# "In response to this request, the server will read as many bytes as it\r
# can from the file (up to 'len'), and return them in a SSH_FXP_DATA\r
return d
def writeChunk(self, offset, data):
+ self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
+
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
def close(self):
+ self.log(".close()", level=OPERATIONAL)
+
self.closed = True
return defer.succeed(None)
def getAttrs(self):
- if debug: print "GETATTRS(file)"
+ self.log(".getAttrs()", level=OPERATIONAL)
+
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
return defer.execute(_closed)
return defer.succeed(_populate_attrs(self.filenode, self.metadata, False))
def setAttrs(self, attrs):
- if debug: print "SETATTRS(file) %r" % (attrs,)
+ self.log(".setAttrs(%r)" % (attrs,), level=OPERATIONAL)
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
-class GeneralSFTPFile:
+class GeneralSFTPFile(PrefixingLogMixin):
implements(ISFTPFile)
"""I represent a file handle to a particular file on an SFTP connection.
I wrap an instance of OverwriteableFileConsumer, which is responsible for
implemented by the callback chain of self.async."""
def __init__(self, check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
+ PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
+ if noisy: self.log(".__init__(%r, %r, %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
+ (check_abort, flags, convergence, parent, childname, filenode, metadata), level=NOISY)
+
self.check_abort = check_abort
self.flags = flags
self.convergence = convergence
# self.consumer should only be relied on in callbacks for self.async, since it might
# not be set before then.
self.consumer = None
+ tempfile_maker = EncryptedTemporaryFile
if (flags & FXF_TRUNC) or not filenode:
# We're either truncating or creating the file, so we don't need the old contents.
assert flags & FXF_CREAT, flags
- self.consumer = OverwriteableFileConsumer(self.check_abort, 0,
- tempfile_maker=EncryptedTemporaryFile)
+ self.consumer = OverwriteableFileConsumer(self.check_abort, 0, tempfile_maker)
self.consumer.finish()
else:
assert IFileNode.providedBy(filenode), filenode
if filenode.is_mutable():
self.async.addCallback(lambda ign: filenode.download_best_version())
def _downloaded(data):
- self.consumer = OverwriteableFileConsumer(self.check_abort, len(data),
- tempfile_maker=tempfile.TemporaryFile)
+ self.consumer = OverwriteableFileConsumer(self.check_abort, len(data), tempfile_maker)
self.consumer.write(data)
self.consumer.finish()
return None
else:
download_size = filenode.get_size()
assert download_size is not None
- self.consumer = OverwriteableFileConsumer(self.check_abort, download_size,
- tempfile_maker=tempfile.TemporaryFile)
+ self.consumer = OverwriteableFileConsumer(self.check_abort, download_size, tempfile_maker)
self.async.addCallback(lambda ign: filenode.read(self.consumer, 0, None))
-
def readChunk(self, offset, length):
+ self.log(".readChunk(%r, %r)" % (offset, length), level=OPERATIONAL)
+
if not (self.flags & FXF_READ):
- return defer.fail(SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading"))
+ def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
+ return defer.execute(_denied)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
d = defer.Deferred()
def _read(ign):
- if debug: print "_read in readChunk(%r, %r)" % (offset, length)
+ if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
d2 = self.consumer.read(offset, length)
d2.addErrback(_raise_error)
d2.addCallbacks(eventually_callback(d), eventually_errback(d))
return d
def writeChunk(self, offset, data):
- if debug: print "writeChunk(%r, %r)" % (offset, data)
+ self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
+
if not (self.flags & FXF_WRITE):
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
return defer.succeed(None)
def close(self):
+ self.log(".close()", level=OPERATIONAL)
+
if self.closed:
return defer.succeed(None)
d2.addCallback(lambda ign: self.consumer.get_current_size())
d2.addCallback(lambda size: self.consumer.read(0, size))
d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
+ #elif (self.flags & FXF_EXCL) and self.consumer.get_current_size() == 0:
+ # # The file will already have been written by the open call, so we can
+ # # optimize out the extra directory write (useful for zero-length lockfiles).
+ # pass
else:
def _add_file(ign):
+ self.log("_add_file childname=%r" % (self.childname,), level=OPERATIONAL)
u = FileHandle(self.consumer.get_file(), self.convergence)
return self.parent.add_file(self.childname, u)
d2.addCallback(_add_file)
return d
def getAttrs(self):
- if debug: print "GETATTRS(file)"
+ self.log(".getAttrs()", level=OPERATIONAL)
if self.closed:
def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
return d
def setAttrs(self, attrs):
- if debug: print "SETATTRS(file) %r" % (attrs,)
+ self.log(".setAttrs(attrs) %r" % (attrs,), level=OPERATIONAL)
+
if not (self.flags & FXF_WRITE):
def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
return defer.execute(_denied)
self.async.addCallbacks(_resize, eventually_errback(d))
return d
-class SFTPUser(ConchUser):
- def __init__(self, check_abort, client, rootnode, username, convergence):
- ConchUser.__init__(self)
- self.channelLookup["session"] = session.SSHSession
- self.subsystemLookup["sftp"] = FileTransferServer
-
- self.check_abort = check_abort
- self.client = client
- self.root = rootnode
- self.username = username
- self.convergence = convergence
-
class StoppableList:
def __init__(self, items):
self.items = items
def close(self):
pass
-import array
-import stat
-
-from time import time, strftime, localtime
-
-def lsLine(name, attrs):
- st_uid = "tahoe"
- st_gid = "tahoe"
- st_mtime = attrs.get("mtime", 0)
- st_mode = attrs["permissions"]
- # TODO: check that clients are okay with this being a "?".
- # (They should be because the longname is intended for human
- # consumption.)
- st_size = attrs.get("size", "?")
- # We don't know how many links there really are to this object.
- st_nlink = 1
-
- # From <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
- # We can't call the version in Twisted because we might have a version earlier than
- # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
-
- mode = st_mode
- perms = array.array('c', '-'*10)
- ft = stat.S_IFMT(mode)
- if stat.S_ISDIR(ft): perms[0] = 'd'
- elif stat.S_ISCHR(ft): perms[0] = 'c'
- elif stat.S_ISBLK(ft): perms[0] = 'b'
- elif stat.S_ISREG(ft): perms[0] = '-'
- elif stat.S_ISFIFO(ft): perms[0] = 'f'
- elif stat.S_ISLNK(ft): perms[0] = 'l'
- elif stat.S_ISSOCK(ft): perms[0] = 's'
- else: perms[0] = '?'
- # user
- if mode&stat.S_IRUSR:perms[1] = 'r'
- if mode&stat.S_IWUSR:perms[2] = 'w'
- if mode&stat.S_IXUSR:perms[3] = 'x'
- # group
- if mode&stat.S_IRGRP:perms[4] = 'r'
- if mode&stat.S_IWGRP:perms[5] = 'w'
- if mode&stat.S_IXGRP:perms[6] = 'x'
- # other
- if mode&stat.S_IROTH:perms[7] = 'r'
- if mode&stat.S_IWOTH:perms[8] = 'w'
- if mode&stat.S_IXOTH:perms[9] = 'x'
- # suid/sgid never set
-
- l = perms.tostring()
- l += str(st_nlink).rjust(5) + ' '
- un = str(st_uid)
- l += un.ljust(9)
- gr = str(st_gid)
- l += gr.ljust(9)
- sz = str(st_size)
- l += sz.rjust(8)
- l += ' '
- sixmo = 60 * 60 * 24 * 7 * 26
- if st_mtime + sixmo < time(): # last edited more than 6mo ago
- l += strftime("%b %d %Y ", localtime(st_mtime))
- else:
- l += strftime("%b %d %H:%M ", localtime(st_mtime))
- l += name
- return l
-
-class SFTPHandler:
+class SFTPHandler(PrefixingLogMixin):
implements(ISFTPServer)
def __init__(self, user):
- if debug: print "Creating SFTPHandler from", user
+ PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
+ if noisy: self.log(".__init__(%r)" % (user,), level=NOISY)
+
self.check_abort = user.check_abort
self.client = user.client
self.root = user.root
self.convergence = user.convergence
def gotVersion(self, otherVersion, extData):
- if debug: print "GOTVERSION %r %r" % (otherVersion, extData)
+ self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
return {}
def openFile(self, pathstring, flags, attrs):
- if debug: print "OPENFILE %r %r %r %r" % (pathstring, flags, _repr_flags(flags), attrs)
- # this is used for both reading and writing.
+ self.log(".openFile(%r, %r = %r, %r)" % (pathstring, flags, _repr_flags(flags), attrs), level=OPERATIONAL)
+ # This is used for both reading and writing.
# First exclude invalid combinations of flags.
# /usr/bin/sftp 'get' gives us FXF_READ, while 'put' on a new file
"Upgrading the gateway to a later Tahoe-LAFS version may help")
if not path:
# case 1
+ if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
if not IFileNode.providedBy(root):
raise SFTPError(FX_PERMISSION_DENIED,
"cannot open a directory cap")
else:
# case 2
childname = path[-1]
- if debug: print "case 2: childname = %r, path[:-1] = %r" % (childname, path[:-1])
+ if noisy: self.log("case 2: root = %r, childname = %r, path[:-1] = %r" %
+ (root, childname, path[:-1]), level=NOISY)
d2 = root.get_child_at_path(path[:-1])
def _got_parent(parent):
- if debug: print "_got_parent(%r)" % (parent,)
+ if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
stash['parent'] = parent
if flags & FXF_EXCL:
# 'overwrite=False' ensures failure if the link already exists.
# FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
- d3 = parent.set_uri(childname, None, "URI:LIT:", overwrite=False)
+ zero_length_lit = "URI:LIT:"
+ d3 = parent.set_uri(childname, None, zero_length_lit, overwrite=False)
def _seturi_done(child):
stash['child'] = child
return parent.get_metadata_for(childname)
d3.addCallback(lambda metadata: (stash['child'], metadata))
return d3
else:
- if debug: print "get_child_and_metadata"
+ if noisy: self.log("get_child_and_metadata(%r)" % (childname,), level=NOISY)
return parent.get_child_and_metadata(childname)
d2.addCallback(_got_parent)
def _got_child( (filenode, metadata) ):
- if debug: print "_got_child((%r, %r))" % (filenode, metadata)
+ if noisy: self.log("_got_child( (%r, %r) )" % (filenode, metadata), level=NOISY)
parent = stash['parent']
if filenode.is_unknown():
raise SFTPError(FX_PERMISSION_DENIED,
return _make_sftp_file(self.check_abort, flags, self.convergence, parent=parent,
childname=childname, filenode=filenode, metadata=metadata)
def _no_child(f):
- if debug: print "_no_child(%r)" % (f,)
+ if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
f.trap(NoSuchChildError)
parent = stash['parent']
if parent is None:
return d
def removeFile(self, pathstring):
- if debug: print "REMOVEFILE %r" % (pathstring,)
+ self.log(".removeFile(%r)" % (pathstring,), level=OPERATIONAL)
+
path = self._path_from_string(pathstring)
return self._remove_object(path, must_be_file=True)
def renameFile(self, oldpathstring, newpathstring):
- if debug: print "RENAMEFILE %r %r" % (oldpathstring, newpathstring)
+ self.log(".renameFile(%r, %r)" % (oldpathstring, newpathstring), level=OPERATIONAL)
+
fromPath = self._path_from_string(oldpathstring)
toPath = self._path_from_string(newpathstring)
d = deferredutil.gatherResults([self._get_parent(fromPath),
self._get_parent(toPath)])
def _got( (fromPair, toPair) ):
+ if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r)" %
+ (fromPair, toPair, oldpathstring, newpathstring), level=NOISY)
(fromParent, fromChildname) = fromPair
(toParent, toChildname) = toPair
return d
def makeDirectory(self, pathstring, attrs):
- if debug: print "MAKEDIRECTORY %r %r" % (pathstring, attrs)
+ self.log(".makeDirectory(%r, %r)" % (pathstring, attrs), level=OPERATIONAL)
+
path = self._path_from_string(pathstring)
metadata = self._attrs_to_metadata(attrs)
d = self._get_root(path)
- d.addCallback(lambda (root,path):
+ d.addCallback(lambda (root, path):
self._get_or_create_directories(root, path, metadata))
d.addErrback(_raise_error)
return d
return d
def removeDirectory(self, pathstring):
- if debug: print "REMOVEDIRECTORY %r" % (pathstring,)
+ self.log(".removeDirectory(%r)" % (pathstring,), level=OPERATIONAL)
+
path = self._path_from_string(pathstring)
return self._remove_object(path, must_be_directory=True)
return d
def openDirectory(self, pathstring):
- if debug: print "OPENDIRECTORY %r" % (pathstring,)
+ self.log(".openDirectory(%r)" % (pathstring,), level=OPERATIONAL)
+
path = self._path_from_string(pathstring)
- if debug: print " PATH %r" % (path,)
d = self._get_node_and_metadata_for_path(path)
def _list( (dirnode, metadata) ):
if dirnode.is_unknown():
not (node.is_mutable() and node.is_readonly()))
attrs = _populate_attrs(node, metadata, writeable)
filename_utf8 = filename.encode('utf-8')
- longname = lsLine(filename_utf8, attrs)
+ longname = _lsLine(filename_utf8, attrs)
results.append( (filename_utf8, longname, attrs) )
return StoppableList(results)
d2.addCallback(_render)
return d
def getAttrs(self, pathstring, followLinks):
- if debug: print "GETATTRS %r %r" % (pathstring, followLinks)
+ self.log(".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks), level=OPERATIONAL)
+
d = self._get_node_and_metadata_for_path(self._path_from_string(pathstring))
def _render( (node, metadata) ):
# When asked about a specific file, report its current size.
return d2
d.addCallback(_render)
d.addErrback(_raise_error)
- def _done(res):
- if debug: print " DONE %r" % (res,)
- return res
- d.addBoth(_done)
return d
def setAttrs(self, pathstring, attrs):
- if debug: print "SETATTRS %r %r" % (pathstring, attrs)
+ self.log(".setAttrs(%r, %r)" % (pathstring, attrs), level=OPERATIONAL)
+
if "size" in attrs:
# this would require us to download and re-upload the truncated/extended
# file contents
return None
def readLink(self, pathstring):
- if debug: print "READLINK %r" % (pathstring,)
+ self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
+
raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
def makeLink(self, linkPathstring, targetPathstring):
- if debug: print "MAKELINK %r %r" % (linkPathstring, targetPathstring)
+ self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
+
raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
def extendedRequest(self, extendedName, extendedData):
- if debug: print "EXTENDEDREQUEST %r %r" % (extendedName, extendedData)
- # Client 'df' command requires 'statvfs@openssh.com' extension
- # (but there's little point to implementing that since we would only
- # have faked values to report).
+ self.log(".extendedRequest(%r, %r)" % (extendedName, extendedData), level=OPERATIONAL)
+
+ # A client 'df' command requires the 'statvfs@openssh.com' extension,
+ # but there's little point to implementing that since we would only
+ # have faked values to report.
raise SFTPError(FX_OP_UNSUPPORTED, "extendedRequest %r" % extendedName)
def realPath(self, pathstring):
- if debug: print "REALPATH %r" % (pathstring,)
- return "/" + "/".join(self._path_from_string(pathstring))
+ self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
+
+ path_utf8 = [p.encode('utf-8') for p in self._path_from_string(pathstring)]
+ return "/" + "/".join(path_utf8)
def _path_from_string(self, pathstring):
- if debug: print "CONVERT %r" % (pathstring,)
+ if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
# The home directory is the root directory.
pathstring = pathstring.strip("/")
raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
path.append(p)
- if debug: print " PATH %r" % (path,)
+ if noisy: self.log(" PATH %r" % (path,), level=NOISY)
return path
def _get_node_and_metadata_for_path(self, path):
d = self._get_root(path)
def _got_root( (root, path) ):
- if debug: print " ROOT %r" % (root,)
- if debug: print " PATH %r" % (path,)
+ if noisy: self.log("_got_root( (%r, %r) )" % (root, path), level=NOISY)
if path:
return root.get_child_and_metadata_at_path(path)
else:
- return (root,{})
+ return (root, {})
d.addCallback(_got_root)
return d
def _get_root(self, path):
# return (root, remaining_path)
if path and path[0] == u"uri":
- d = defer.maybeDeferred(self.client.create_node_from_uri,
- str(path[1]))
+ d = defer.maybeDeferred(self.client.create_node_from_uri, path[1].encode('utf-8'))
d.addCallback(lambda root: (root, path[2:]))
else:
- d = defer.succeed((self.root,path))
+ d = defer.succeed((self.root, path))
return d
def _get_parent(self, path):
return metadata
+class SFTPUser(ConchUser):
+ def __init__(self, check_abort, client, rootnode, username, convergence):
+ ConchUser.__init__(self)
+ self.channelLookup["session"] = session.SSHSession
+ self.subsystemLookup["sftp"] = FileTransferServer
+
+ self.check_abort = check_abort
+ self.client = client
+ self.root = rootnode
+ self.username = username
+ self.convergence = convergence
+
# if you have an SFTPUser, and you want something that provides ISFTPServer,
# then you get SFTPHandler(user)
components.registerAdapter(SFTPHandler, SFTPUser, ISFTPServer)
s = strports.service(sftp_portstr, f)
s.setServiceParent(self)
-
#from twisted.internet.base import DelayedCall
#DelayedCall.debug = True
-import sys, traceback\r
-\r
-"""\r
-def trace_exceptions(frame, event, arg):\r
- if event != 'exception':\r
- return\r
- co = frame.f_code\r
- func_name = co.co_name\r
- line_no = frame.f_lineno\r
- filename = co.co_filename\r
- exc_type, exc_value, exc_traceback = arg\r
- print 'Tracing exception: %r %r on line %r of %r in %r' % \\r
- (exc_type.__name__, exc_value, line_no, func_name, filename)\r
-\r
-def trace_calls(frame, event, arg):\r
- if event != 'call':\r
- return\r
- return trace_exceptions\r
+import traceback
+
+"""
+import sys
+def trace_exceptions(frame, event, arg):
+ if event != 'exception':
+ return
+ co = frame.f_code
+ func_name = co.co_name
+ line_no = frame.f_lineno
+ filename = co.co_filename
+ exc_type, exc_value, exc_traceback = arg
+ print 'Tracing exception: %r %r on line %r of %r in %r' % \
+ (exc_type.__name__, exc_value, line_no, func_name, filename)
+
+def trace_calls(frame, event, arg):
+ if event != 'call':
+ return
+ return trace_exceptions
sys.settrace(trace_calls)
"""
def _done(res):
if isinstance(res, Failure):
res.trap(sftp.SFTPError)
- self.failUnlessEqual(res.value.code, expected_code,
- "%s was supposed to raise SFTPError(%d), not SFTPError(%d): %s" %
- (which, expected_code, res.value.code, res))
+ self.failUnlessReallyEqual(res.value.code, expected_code,
+ "%s was supposed to raise SFTPError(%d), not SFTPError(%d): %s" %
+ (which, expected_code, res.value.code, res))
else:
print '@' + '@'.join(s)
self.fail("%s was supposed to raise SFTPError(%d), not get '%s'" %
d.addBoth(_done)
return d
+ def failUnlessReallyEqual(self, a, b, msg=None):
+ self.failUnlessEqual(a, b, msg=msg)
+ self.failUnlessEqual(type(a), type(b), msg=msg)
+
def _set_up(self, basedir, num_clients=1, num_servers=10):
self.basedir = "sftp/" + basedir
self.set_up_grid(num_clients=num_clients, num_servers=num_servers)
version = self.handler.gotVersion(3, {})
self.failUnless(isinstance(version, dict))
- self.failUnlessEqual(self.handler._path_from_string(""), [])
- self.failUnlessEqual(self.handler._path_from_string("/"), [])
- self.failUnlessEqual(self.handler._path_from_string("."), [])
- self.failUnlessEqual(self.handler._path_from_string("//"), [])
- self.failUnlessEqual(self.handler._path_from_string("/."), [])
- self.failUnlessEqual(self.handler._path_from_string("/./"), [])
- self.failUnlessEqual(self.handler._path_from_string("foo"), [u"foo"])
- self.failUnlessEqual(self.handler._path_from_string("/foo"), [u"foo"])
- self.failUnlessEqual(self.handler._path_from_string("foo/"), [u"foo"])
- self.failUnlessEqual(self.handler._path_from_string("/foo/"), [u"foo"])
- self.failUnlessEqual(self.handler._path_from_string("foo/bar"), [u"foo", u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("/foo/bar"), [u"foo", u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("foo/bar//"), [u"foo", u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("/foo/bar//"), [u"foo", u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("foo/../bar"), [u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("/foo/../bar"), [u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("../bar"), [u"bar"])
- self.failUnlessEqual(self.handler._path_from_string("/../bar"), [u"bar"])
-
- self.failUnlessEqual(self.handler.realPath(""), "/")
- self.failUnlessEqual(self.handler.realPath("/"), "/")
- self.failUnlessEqual(self.handler.realPath("."), "/")
- self.failUnlessEqual(self.handler.realPath("//"), "/")
- self.failUnlessEqual(self.handler.realPath("/."), "/")
- self.failUnlessEqual(self.handler.realPath("/./"), "/")
- self.failUnlessEqual(self.handler.realPath("foo"), "/foo")
- self.failUnlessEqual(self.handler.realPath("/foo"), "/foo")
- self.failUnlessEqual(self.handler.realPath("foo/"), "/foo")
- self.failUnlessEqual(self.handler.realPath("/foo/"), "/foo")
- self.failUnlessEqual(self.handler.realPath("foo/bar"), "/foo/bar")
- self.failUnlessEqual(self.handler.realPath("/foo/bar"), "/foo/bar")
- self.failUnlessEqual(self.handler.realPath("foo/bar//"), "/foo/bar")
- self.failUnlessEqual(self.handler.realPath("/foo/bar//"), "/foo/bar")
- self.failUnlessEqual(self.handler.realPath("foo/../bar"), "/bar")
- self.failUnlessEqual(self.handler.realPath("/foo/../bar"), "/bar")
- self.failUnlessEqual(self.handler.realPath("../bar"), "/bar")
- self.failUnlessEqual(self.handler.realPath("/../bar"), "/bar")
+ self.failUnlessReallyEqual(self.handler._path_from_string(""), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/"), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("."), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("//"), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/."), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/./"), [])
+ self.failUnlessReallyEqual(self.handler._path_from_string("foo"), [u"foo"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/foo"), [u"foo"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("foo/"), [u"foo"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/foo/"), [u"foo"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar"), [u"foo", u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar"), [u"foo", u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("foo/bar//"), [u"foo", u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/foo/bar//"), [u"foo", u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("foo/../bar"), [u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/foo/../bar"), [u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("../bar"), [u"bar"])
+ self.failUnlessReallyEqual(self.handler._path_from_string("/../bar"), [u"bar"])
+
+ self.failUnlessReallyEqual(self.handler.realPath(""), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("/"), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("."), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("//"), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("/."), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("/./"), "/")
+ self.failUnlessReallyEqual(self.handler.realPath("foo"), "/foo")
+ self.failUnlessReallyEqual(self.handler.realPath("/foo"), "/foo")
+ self.failUnlessReallyEqual(self.handler.realPath("foo/"), "/foo")
+ self.failUnlessReallyEqual(self.handler.realPath("/foo/"), "/foo")
+ self.failUnlessReallyEqual(self.handler.realPath("foo/bar"), "/foo/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("/foo/bar"), "/foo/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("foo/bar//"), "/foo/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("/foo/bar//"), "/foo/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("foo/../bar"), "/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("/foo/../bar"), "/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("../bar"), "/bar")
+ self.failUnlessReallyEqual(self.handler.realPath("/../bar"), "/bar")
d.addCallback(_check)
d.addCallback(lambda ign:
return d
def test_raise_error(self):
- self.failUnlessEqual(sftpd._raise_error(None), None)
+ self.failUnlessReallyEqual(sftpd._raise_error(None), None)
d = defer.succeed(None)
d.addCallback(lambda ign:
def _compareDirLists(self, actual, expected):
actual_list = sorted(actual)
expected_list = sorted(expected)
- self.failUnlessEqual(len(actual_list), len(expected_list),
+ self.failUnlessReallyEqual(len(actual_list), len(expected_list),
"%r is wrong length, expecting %r" % (actual_list, expected_list))
for (a, b) in zip(actual_list, expected_list):
(name, text, attrs) = a
(expected_name, expected_text_re, expected_attrs) = b
- self.failUnlessEqual(name, expected_name)
+ self.failUnlessReallyEqual(name, expected_name)
self.failUnless(re.match(expected_text_re, text), "%r does not match %r" % (text, expected_text_re))
# it is ok for there to be extra actual attributes
# TODO: check times
for e in expected_attrs:
- self.failUnlessEqual(attrs[e], expected_attrs[e])
+ self.failUnlessReallyEqual(attrs[e], expected_attrs[e])
def test_openDirectory_and_attrs(self):
- d = self._set_up("openDirectory")
+ d = self._set_up("openDirectory_and_attrs")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.handler.openDirectory, "nodir/nodir"))
gross = u"gro\u00DF".encode("utf-8")
- expected_root = [\r
- ('empty_lit_dir', r'drwxrwx--- .* \? .* empty_lit_dir$', {'permissions': S_IFDIR | 0770}),\r
- (gross, r'-rw-rw---- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0660, 'size': 1010}),\r
- ('loop', r'drwxrwx--- .* \? .* loop$', {'permissions': S_IFDIR | 0770}),\r
- ('mutable', r'-rw-rw---- .* \? .* mutable$', {'permissions': S_IFREG | 0660}),\r
- ('readonly', r'-r--r----- .* \? .* readonly$', {'permissions': S_IFREG | 0440}),\r
- ('small', r'-rw-rw---- .* 10 .* small$', {'permissions': S_IFREG | 0660, 'size': 10}),\r
- ('small2', r'-rw-rw---- .* 26 .* small2$', {'permissions': S_IFREG | 0660, 'size': 26}),\r
+ expected_root = [
+ ('empty_lit_dir', r'drwxrwx--- .* \? .* empty_lit_dir$', {'permissions': S_IFDIR | 0770}),
+ (gross, r'-rw-rw---- .* 1010 .* '+gross+'$', {'permissions': S_IFREG | 0660, 'size': 1010}),
+ ('loop', r'drwxrwx--- .* \? .* loop$', {'permissions': S_IFDIR | 0770}),
+ ('mutable', r'-rw-rw---- .* \? .* mutable$', {'permissions': S_IFREG | 0660}),
+ ('readonly', r'-r--r----- .* \? .* readonly$', {'permissions': S_IFREG | 0440}),
+ ('small', r'-rw-rw---- .* 10 .* small$', {'permissions': S_IFREG | 0660, 'size': 10}),
+ ('small2', r'-rw-rw---- .* 26 .* small2$', {'permissions': S_IFREG | 0660, 'size': 26}),
('tiny_lit_dir', r'drwxrwx--- .* \? .* tiny_lit_dir$', {'permissions': S_IFDIR | 0770}),
('unknown', r'\?--------- .* \? .* unknown$', {'permissions': 0}),
]
d.addCallback(lambda ign: self.handler.getAttrs("small", True))
def _check_attrs(attrs):
- self.failUnlessEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
- self.failUnlessEqual(attrs['size'], 10)
+ self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
+ self.failUnlessReallyEqual(attrs['size'], 10)
d.addCallback(_check_attrs)
d.addCallback(lambda ign:
- self.failUnlessEqual(self.handler.setAttrs("small", {}), None))
+ self.failUnlessReallyEqual(self.handler.setAttrs("small", {}), None))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_OP_UNSUPPORTED, "setAttrs size",
return d
def test_openFile_read(self):
- d = self._set_up("openFile")
+ d = self._set_up("openFile_read")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile tiny_lit_dir uri READ denied",
self.handler.openFile, "uri/"+self.tiny_lit_dir_uri, sftp.FXF_READ, {}))
+ # FIXME: should be FX_NO_SUCH_FILE?
+ d.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "openFile noexist uri READ denied",
+ self.handler.openFile, "uri/URI:noexist", sftp.FXF_READ, {}))
+ d.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile invalid UTF-8 uri READ denied",
+ self.handler.openFile, "uri/URI:\xFF", sftp.FXF_READ, {}))
# reading an existing file should succeed
d.addCallback(lambda ign: self.handler.openFile("small", sftp.FXF_READ, {}))
def _read_small(rf):
d2 = rf.readChunk(0, 10)
- d2.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.readChunk(2, 6))
- d2.addCallback(lambda data: self.failUnlessEqual(data, "234567"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567"))
d2.addCallback(lambda ign: rf.readChunk(8, 4)) # read that starts before EOF is OK
- d2.addCallback(lambda data: self.failUnlessEqual(data, "89"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)",
d2.addCallback(lambda ign: rf.getAttrs())
def _check_attrs(attrs):
- self.failUnlessEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
- self.failUnlessEqual(attrs['size'], 10)
+ self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
+ self.failUnlessReallyEqual(attrs['size'], 10)
d2.addCallback(_check_attrs)
d2.addCallback(lambda ign:
d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ, {}))
def _read_gross(rf):
d2 = rf.readChunk(0, 10)
- d2.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.readChunk(2, 6))
- d2.addCallback(lambda data: self.failUnlessEqual(data, "234567"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "234567"))
d2.addCallback(lambda ign: rf.readChunk(1008, 4)) # read that starts before EOF is OK
- d2.addCallback(lambda data: self.failUnlessEqual(data, "89"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "89"))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_EOF, "readChunk starting at EOF (0-byte)",
d2.addCallback(lambda ign: rf.getAttrs())
def _check_attrs(attrs):
- self.failUnlessEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
- self.failUnlessEqual(attrs['size'], 1010)
+ self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
+ self.failUnlessReallyEqual(attrs['size'], 1010)
d2.addCallback(_check_attrs)
d2.addCallback(lambda ign:
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.small_uri, sftp.FXF_READ, {}))
def _read_small_uri(rf):
d2 = rf.readChunk(0, 10)
- d2.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_small_uri)
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {}))
def _read_gross_uri(rf):
d2 = rf.readChunk(0, 10)
- d2.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_gross_uri)
d.addCallback(lambda ign: self.handler.openFile("uri/"+self.mutable_uri, sftp.FXF_READ, {}))
def _read_mutable_uri(rf):
d2 = rf.readChunk(0, 100)
- d2.addCallback(lambda data: self.failUnlessEqual(data, "mutable file contents"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable file contents"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_read_mutable_uri)
+ # repeat for a file within a directory referenced by URI
+ d.addCallback(lambda ign: self.handler.openFile("uri/"+self.tiny_lit_dir_uri+"/short", sftp.FXF_READ, {}))
+ def _read_short(rf):
+ d2 = rf.readChunk(0, 100)
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "The end."))
+ d2.addCallback(lambda ign: rf.close())
+ return d2
+ d.addCallback(_read_short)
+
return d
def test_openFile_write(self):
- d = self._set_up("openFile")
+ d = self._set_up("openFile_write")
d.addCallback(lambda ign: self._set_up_tree())
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_NO_SUCH_FILE, "openFile '' WRITE|CREAT|TRUNC",
- self.handler.openFile, "", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
+ self.handler.openFile, "", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
d.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_BAD_MESSAGE, "openFile newfile WRITE|TRUNC",
self.handler.openFile, "newfile", sftp.FXF_WRITE | sftp.FXF_TRUNC, {}))
self.handler.openFile("newfile", sftp.FXF_WRITE | sftp.FXF_CREAT | sftp.FXF_TRUNC, {}))
def _write(wf):
d2 = wf.writeChunk(0, "0123456789")
- d2.addCallback(lambda res: self.failUnlessEqual(res, None))
+ d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
d2.addCallback(lambda ign: wf.writeChunk(8, "0123"))
d2.addCallback(lambda ign: wf.writeChunk(13, "abc"))
d2.addCallback(lambda ign: wf.getAttrs())
def _check_attrs(attrs):
- self.failUnlessEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
- self.failUnlessEqual(attrs['size'], 16)
+ self.failUnlessReallyEqual(attrs['permissions'], S_IFREG | 0440) #FIXME
+ self.failUnlessReallyEqual(attrs['size'], 16)
d2.addCallback(_check_attrs)
d2.addCallback(lambda ign: wf.setAttrs({}))
d2.addCallback(lambda ign: wf.setAttrs({'size': 14}))
d2.addCallback(lambda ign: wf.getAttrs())
- d2.addCallback(lambda attrs: self.failUnlessEqual(attrs['size'], 14))
+ d2.addCallback(lambda attrs: self.failUnlessReallyEqual(attrs['size'], 14))
d2.addCallback(lambda ign:
self.shouldFailWithSFTPError(sftp.FX_PERMISSION_DENIED, "readChunk on write-only handle denied",
d.addCallback(_write)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
- d.addCallback(lambda data: self.failUnlessEqual(data, "012345670123\x00a"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123\x00a"))
# test APPEND flag, and also replacing an existing file ("newfile")
d.addCallback(lambda ign:
d.addCallback(_write_append)
d.addCallback(lambda ign: self.root.get(u"newfile"))
d.addCallback(lambda node: download_to_data(node))
- d.addCallback(lambda data: self.failUnlessEqual(data, "01234567890123"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "01234567890123"))
# test EXCL flag
d.addCallback(lambda ign:
def _write_excl(wf):
d2 = self.root.get(u"excl")
d2.addCallback(lambda node: download_to_data(node))
- d2.addCallback(lambda data: self.failUnlessEqual(data, ""))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
d2.addCallback(lambda ign: wf.writeChunk(0, "0123456789"))
d2.addCallback(lambda ign: wf.close())
d.addCallback(_write_excl)
d.addCallback(lambda ign: self.root.get(u"excl"))
d.addCallback(lambda node: download_to_data(node))
- d.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
+
+ """
+ # test that writing a zero-length file with EXCL only updates the directory once
+ d.addCallback(lambda ign:
+ self.handler.openFile("zerolength", sftp.FXF_WRITE | sftp.FXF_CREAT |
+ sftp.FXF_EXCL, {}))
+ def _write_excl_zerolength(wf):
+ d2 = self.root.get(u"zerolength")
+ d2.addCallback(lambda node: download_to_data(node))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
+
+ # FIXME: no API to get the best version number exists (fix as part of #993)
+ #stash = {}
+ #d2.addCallback(lambda ign: self.root.get_best_version_number())
+ #d2.addCallback(lambda version: stash['version'] = version)
+ d2.addCallback(lambda ign: wf.close())
+ #d2.addCallback(lambda ign: self.root.get_best_version_number())
+ #d2.addCallback(lambda new_version: self.failUnlessReallyEqual(new_version, stash['version'])
+ return d2
+ d.addCallback(_write_excl_zerolength)
+ d.addCallback(lambda ign: self.root.get(u"zerolength"))
+ d.addCallback(lambda node: download_to_data(node))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, ""))
+ """
# test WRITE | CREAT without TRUNC
d.addCallback(lambda ign:
d.addCallback(_write_notrunc)
d.addCallback(lambda ign: self.root.get(u"newfile2"))
d.addCallback(lambda node: download_to_data(node))
- d.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
# test writing to a mutable file
d.addCallback(lambda ign:
d.addCallback(lambda ign: self.root.get(u"mutable"))
def _check_same_file(node):
self.failUnless(node.is_mutable())
- self.failUnlessEqual(node.get_uri(), self.mutable_uri)
+ self.failUnlessReallyEqual(node.get_uri(), self.mutable_uri)
return node.download_best_version()
d.addCallback(_check_same_file)
- d.addCallback(lambda data: self.failUnlessEqual(data, "mutable new! contents"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "mutable new! contents"))
"""
# test READ | WRITE without CREAT or TRUNC
def _read_write(rwf):
d2 = rwf.writeChunk(8, "0123")
d2.addCallback(lambda ign: rwf.readChunk(0, 100))
- d2.addCallback(lambda data: self.failUnlessEqual(data, "012345670123"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123"))
d2.addCallback(lambda ign: rwf.close())
return d2
d.addCallback(_read_write)
d.addCallback(lambda ign: self.root.get(u"small"))
d.addCallback(lambda node: download_to_data(node))
- d.addCallback(lambda data: self.failUnlessEqual(data, "012345670123"))
+ d.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123"))
"""
return d
self.shouldFail(NoSuchChildError, "removeFile small", "small",
self.root.get, u"small"))
d2.addCallback(lambda ign: rf.readChunk(0, 10))
- d2.addCallback(lambda data: self.failUnlessEqual(data, "0123456789"))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "0123456789"))
d2.addCallback(lambda ign: rf.close())
return d2
d.addCallback(_remove_and_read_small)
# renaming a file to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("small", "new_small"))
d.addCallback(lambda ign: self.root.get(u"new_small"))
- d.addCallback(lambda node: self.failUnlessEqual(node.get_uri(), self.small_uri))
+ d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.small_uri))
# renaming a file into a subdirectory should succeed (also tests Unicode names)
d.addCallback(lambda ign: self.handler.renameFile(u"gro\u00DF".encode('utf-8'),
u"loop/neue_gro\u00DF".encode('utf-8')))
d.addCallback(lambda ign: self.root.get(u"neue_gro\u00DF"))
- d.addCallback(lambda node: self.failUnlessEqual(node.get_uri(), self.gross_uri))
+ d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.gross_uri))
# renaming a directory to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("tiny_lit_dir", "new_tiny_lit_dir"))
d.addCallback(lambda ign: self.root.get(u"new_tiny_lit_dir"))
- d.addCallback(lambda node: self.failUnlessEqual(node.get_uri(), self.tiny_lit_dir_uri))
+ d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.tiny_lit_dir_uri))
# renaming an unknown to a correct path should succeed
d.addCallback(lambda ign: self.handler.renameFile("unknown", "new_unknown"))
d.addCallback(lambda ign: self.root.get(u"new_unknown"))
- d.addCallback(lambda node: self.failUnlessEqual(node.get_uri(), self.unknown_uri))
+ d.addCallback(lambda node: self.failUnlessReallyEqual(node.get_uri(), self.unknown_uri))
return d
self.failUnless(child.is_mutable())
# FIXME
#self.failUnless('ctime' in metadata, metadata)
- #self.failUnlessEqual(metadata['ctime'], 42)
+ #self.failUnlessReallyEqual(metadata['ctime'], 42)
#self.failUnless('ext_foo' in metadata, metadata)
- #self.failUnlessEqual(metadata['ext_foo'], 'bar')
+ #self.failUnlessReallyEqual(metadata['ext_foo'], 'bar')
# TODO: child should be empty
d.addCallback(_got)