2 import os, tempfile, heapq, binascii, traceback, array, stat, struct
3 from stat import S_IFREG, S_IFDIR
4 from time import time, strftime, localtime
6 from zope.interface import implements
7 from twisted.python import components
8 from twisted.application import service, strports
9 from twisted.conch.ssh import factory, keys, session
10 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
11 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
12 FX_BAD_MESSAGE, FX_FAILURE, FX_OK
13 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
14 FXF_CREAT, FXF_TRUNC, FXF_EXCL
15 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
16 from twisted.conch.avatar import ConchUser
17 from twisted.conch.openssh_compat import primes
18 from twisted.cred import portal
19 from twisted.internet.error import ProcessDone, ProcessTerminated
20 from twisted.python.failure import Failure
21 from twisted.internet.interfaces import ITransport
23 from twisted.internet import defer
24 from twisted.internet.interfaces import IFinishableConsumer
25 from foolscap.api import eventually
26 from allmydata.util import deferredutil
28 from allmydata.util.consumer import download_to_data
29 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
30 NoSuchChildError, ChildOfWrongTypeError
31 from allmydata.mutable.common import NotWriteableError
32 from allmydata.immutable.upload import FileHandle
34 from pycryptopp.cipher.aes import AES
36 # twisted.conch.ssh.filetransfer generates this warning, but not when it is imported,
39 warnings.filterwarnings("ignore", category=DeprecationWarning,
40 message="BaseException.message has been deprecated as of Python 2.6",
41 module=".*filetransfer", append=True)
44 use_foolscap_logging = True
46 from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
47 msg as _msg, err as _err, PrefixingLogMixin as _PrefixingLogMixin
49 if use_foolscap_logging:
50 (logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
51 else: # pragma: no cover
52 def logmsg(s, level=None):
54 def logerr(s, level=None):
56 class PrefixingLogMixin:
57 def __init__(self, facility=None, prefix=''):
59 def log(self, s, level=None):
60 print "%r %s" % (self.prefix, s)
63 def eventually_callback(d):
64 return lambda res: eventually(d.callback, res)
66 def eventually_errback(d):
67 return lambda err: eventually(d.errback, err)
71 if isinstance(x, unicode):
72 return x.encode('utf-8')
73 if isinstance(x, str):
79 """SFTP times are unsigned 32-bit integers representing UTC seconds
80 (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
81 A Tahoe time is the corresponding float."""
82 return long(t) & 0xFFFFFFFFL
85 def _convert_error(res, request):
86 if not isinstance(res, Failure):
88 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
89 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
93 logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL)
95 if noisy: logmsg(traceback.format_exc(err.value), level=NOISY)
96 except: # pragma: no cover
99 # The message argument to SFTPError must not reveal information that
100 # might compromise anonymity.
102 if err.check(SFTPError):
103 # original raiser of SFTPError has responsibility to ensure anonymity
105 if err.check(NoSuchChildError):
106 childname = _utf8(err.value.args[0])
107 raise SFTPError(FX_NO_SUCH_FILE, childname)
108 if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
109 msg = _utf8(err.value.args[0])
110 raise SFTPError(FX_PERMISSION_DENIED, msg)
111 if err.check(ExistingChildError):
112 # Versions of SFTP after v3 (which is what twisted.conch implements)
113 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
114 # However v3 doesn't; instead, other servers such as sshd return
115 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
116 # to translate the error to the equivalent of POSIX EEXIST, which is
117 # necessary for some picky programs (such as gedit).
118 msg = _utf8(err.value.args[0])
119 raise SFTPError(FX_FAILURE, msg)
120 if err.check(NotImplementedError):
121 raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
122 if err.check(EOFError):
123 raise SFTPError(FX_EOF, "end of file reached")
124 if err.check(defer.FirstError):
125 _convert_error(err.value.subFailure, request)
127 # We assume that the error message is not anonymity-sensitive.
128 raise SFTPError(FX_FAILURE, _utf8(err.value))
131 def _repr_flags(flags):
132 return "|".join([f for f in
133 [(flags & FXF_READ) and "FXF_READ" or None,
134 (flags & FXF_WRITE) and "FXF_WRITE" or None,
135 (flags & FXF_APPEND) and "FXF_APPEND" or None,
136 (flags & FXF_CREAT) and "FXF_CREAT" or None,
137 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
138 (flags & FXF_EXCL) and "FXF_EXCL" or None,
143 def _lsLine(name, attrs):
146 st_mtime = attrs.get("mtime", 0)
147 st_mode = attrs["permissions"]
148 # TODO: check that clients are okay with this being a "?".
149 # (They should be because the longname is intended for human
151 st_size = attrs.get("size", "?")
152 # We don't know how many links there really are to this object.
155 # Based on <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
156 # We can't call the version in Twisted because we might have a version earlier than
157 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
160 perms = array.array('c', '-'*10)
161 ft = stat.S_IFMT(mode)
162 if stat.S_ISDIR(ft): perms[0] = 'd'
163 elif stat.S_ISREG(ft): perms[0] = '-'
166 if mode&stat.S_IRUSR: perms[1] = 'r'
167 if mode&stat.S_IWUSR: perms[2] = 'w'
168 if mode&stat.S_IXUSR: perms[3] = 'x'
170 if mode&stat.S_IRGRP: perms[4] = 'r'
171 if mode&stat.S_IWGRP: perms[5] = 'w'
172 if mode&stat.S_IXGRP: perms[6] = 'x'
174 if mode&stat.S_IROTH: perms[7] = 'r'
175 if mode&stat.S_IWOTH: perms[8] = 'w'
176 if mode&stat.S_IXOTH: perms[9] = 'x'
177 # suid/sgid never set
180 l += str(st_nlink).rjust(5) + ' '
191 if st_mtime + sixmo < now or st_mtime > now + day:
192 # mtime is more than 6 months ago, or more than one day in the future
193 l += strftime("%b %d %Y ", localtime(st_mtime))
195 l += strftime("%b %d %H:%M ", localtime(st_mtime))
200 def _is_readonly(parent_readonly, child):
201 """Whether child should be listed as having read-only permissions in parent."""
203 if child.is_unknown():
205 elif child.is_mutable():
206 return child.is_readonly()
208 return parent_readonly
211 def _populate_attrs(childnode, metadata, size=None):
214 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
215 # bits, otherwise the client may refuse to open a directory.
216 # Also, sshfs run as a non-root user requires files and directories
217 # to be world-readable/writeable.
219 # Directories and unknown nodes have no size, and SFTP doesn't
220 # require us to make one up.
222 # childnode might be None, meaning that the file doesn't exist yet,
223 # but we're going to write it later.
225 if childnode and childnode.is_unknown():
227 elif childnode and IDirectoryNode.providedBy(childnode):
228 perms = S_IFDIR | 0777
230 # For files, omit the size if we don't immediately know it.
231 if childnode and size is None:
232 size = childnode.get_size()
234 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
236 perms = S_IFREG | 0666
239 assert 'readonly' in metadata, metadata
240 if metadata['readonly']:
241 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
243 # see webapi.txt for what these times mean
244 if 'linkmotime' in metadata.get('tahoe', {}):
245 attrs['mtime'] = _to_sftp_time(metadata['tahoe']['linkmotime'])
246 elif 'mtime' in metadata:
247 # We would prefer to omit atime, but SFTP version 3 can only
248 # accept mtime if atime is also set.
249 attrs['mtime'] = _to_sftp_time(metadata['mtime'])
250 attrs['atime'] = attrs['mtime']
252 if 'linkcrtime' in metadata.get('tahoe', {}):
253 attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime'])
255 if 'ctime' in metadata:
256 attrs['ctime'] = _to_sftp_time(metadata['ctime'])
258 attrs['permissions'] = perms
260 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
261 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
266 class EncryptedTemporaryFile(PrefixingLogMixin):
267 # not implemented: next, readline, readlines, xreadlines, writelines
270 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
271 self.file = tempfile.TemporaryFile()
272 self.key = os.urandom(16) # AES-128
274 def _crypt(self, offset, data):
275 # TODO: use random-access AES (pycryptopp ticket #18)
276 offset_big = offset // 16
277 offset_small = offset % 16
278 iv = binascii.unhexlify("%032x" % offset_big)
279 cipher = AES(self.key, iv=iv)
280 cipher.process("\x00"*offset_small)
281 return cipher.process(data)
289 def seek(self, offset, whence=os.SEEK_SET):
290 if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
291 self.file.seek(offset, whence)
294 offset = self.file.tell()
295 if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
298 def read(self, size=-1):
299 if noisy: self.log(".read(%r)" % (size,), level=NOISY)
300 index = self.file.tell()
301 ciphertext = self.file.read(size)
302 plaintext = self._crypt(index, ciphertext)
305 def write(self, plaintext):
306 if noisy: self.log(".write(<data of length %r>)" % (len(plaintext),), level=NOISY)
307 index = self.file.tell()
308 ciphertext = self._crypt(index, plaintext)
309 self.file.write(ciphertext)
311 def truncate(self, newsize):
312 if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
313 self.file.truncate(newsize)
316 class OverwriteableFileConsumer(PrefixingLogMixin):
317 implements(IFinishableConsumer)
318 """I act both as a consumer for the download of the original file contents, and as a
319 wrapper for a temporary file that records the downloaded data and any overwrites.
320 I use a priority queue to keep track of which regions of the file have been overwritten
321 but not yet downloaded, so that the download does not clobber overwritten data.
322 I use another priority queue to record milestones at which to make callbacks
323 indicating that a given number of bytes have been downloaded.
325 The temporary file reflects the contents of the file that I represent, except that:
326 - regions that have neither been downloaded nor overwritten, if present,
328 - the temporary file may be shorter than the represented file (it is never longer).
329 The latter's current size is stored in self.current_size.
331 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
332 useful for other frontends."""
334 def __init__(self, download_size, tempfile_maker):
335 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
336 if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY)
337 self.download_size = download_size
338 self.current_size = download_size
339 self.f = tempfile_maker()
341 self.milestones = [] # empty heap of (offset, d)
342 self.overwrites = [] # empty heap of (start, end)
343 self.is_closed = False
344 self.done = self.when_reached(download_size) # adds a milestone
346 def _signal_done(ign):
347 if noisy: self.log("DONE", level=NOISY)
349 self.done.addCallback(_signal_done)
355 def get_current_size(self):
356 return self.current_size
358 def set_current_size(self, size):
359 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
360 (size, self.current_size, self.downloaded), level=NOISY)
361 if size < self.current_size or size < self.downloaded:
362 self.f.truncate(size)
363 if size > self.current_size:
364 self.overwrite(self.current_size, "\x00" * (size - self.current_size))
365 self.current_size = size
367 # invariant: self.download_size <= self.current_size
368 if size < self.download_size:
369 self.download_size = size
370 if self.downloaded >= self.download_size:
373 def registerProducer(self, p, streaming):
374 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
377 # call resumeProducing once to start things off
386 def write(self, data):
387 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
391 if self.downloaded >= self.download_size:
394 next_downloaded = self.downloaded + len(data)
395 if next_downloaded > self.download_size:
396 data = data[:(self.download_size - self.downloaded)]
398 while len(self.overwrites) > 0:
399 (start, end) = self.overwrites[0]
400 if start >= next_downloaded:
401 # This and all remaining overwrites are after the data we just downloaded.
403 if start > self.downloaded:
404 # The data we just downloaded has been partially overwritten.
405 # Write the prefix of it that precedes the overwritten region.
406 self.f.seek(self.downloaded)
407 self.f.write(data[:(start - self.downloaded)])
409 # This merges consecutive overwrites if possible, which allows us to detect the
410 # case where the download can be stopped early because the remaining region
411 # to download has already been fully overwritten.
412 heapq.heappop(self.overwrites)
413 while len(self.overwrites) > 0:
414 (start1, end1) = self.overwrites[0]
418 heapq.heappop(self.overwrites)
420 if end >= next_downloaded:
421 # This overwrite extends past the downloaded data, so there is no
422 # more data to consider on this call.
423 heapq.heappush(self.overwrites, (next_downloaded, end))
424 self._update_downloaded(next_downloaded)
426 elif end >= self.downloaded:
427 data = data[(end - self.downloaded):]
428 self._update_downloaded(end)
430 self.f.seek(self.downloaded)
432 self._update_downloaded(next_downloaded)
434 def _update_downloaded(self, new_downloaded):
435 self.downloaded = new_downloaded
436 milestone = new_downloaded
437 if len(self.overwrites) > 0:
438 (start, end) = self.overwrites[0]
439 if start <= new_downloaded and end > milestone:
442 while len(self.milestones) > 0:
443 (next, d) = self.milestones[0]
446 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
447 heapq.heappop(self.milestones)
448 eventually_callback(d)(None)
450 if milestone >= self.download_size:
453 def overwrite(self, offset, data):
454 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
455 if offset > self.current_size:
456 # Normally writing at an offset beyond the current end-of-file
457 # would leave a hole that appears filled with zeroes. However, an
458 # EncryptedTemporaryFile doesn't behave like that (if there is a
459 # hole in the file on disk, the zeroes that are read back will be
460 # XORed with the keystream). So we must explicitly write zeroes in
461 # the gap between the current EOF and the offset.
463 self.f.seek(self.current_size)
464 self.f.write("\x00" * (offset - self.current_size))
465 start = self.current_size
471 end = offset + len(data)
472 self.current_size = max(self.current_size, end)
473 if end > self.downloaded:
474 heapq.heappush(self.overwrites, (start, end))
476 def read(self, offset, length):
477 """When the data has been read, callback the Deferred that we return with this data.
478 Otherwise errback the Deferred that we return.
479 The caller must perform no more overwrites until the Deferred has fired."""
481 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
482 if offset >= self.current_size:
483 def _eof(): raise EOFError("read past end of file")
484 return defer.execute(_eof)
486 if offset + length > self.current_size:
487 length = self.current_size - offset
488 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
490 needed = min(offset + length, self.download_size)
491 d = self.when_reached(needed)
493 # It is not necessarily the case that self.downloaded >= needed, because
494 # the file might have been truncated (thus truncating the download) and
497 assert self.current_size >= offset + length, (self.current_size, offset, length)
498 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
500 return self.f.read(length)
501 d.addCallback(_reached)
504 def when_reached(self, index):
505 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
506 if index <= self.downloaded: # already reached
507 if noisy: self.log("already reached %r" % (index,), level=NOISY)
508 return defer.succeed(None)
511 if noisy: self.log("reached %r" % (index,), level=NOISY)
513 d.addCallback(_reached)
514 heapq.heappush(self.milestones, (index, d))
521 while len(self.milestones) > 0:
522 (next, d) = self.milestones[0]
523 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
524 heapq.heappop(self.milestones)
525 # The callback means that the milestone has been reached if
526 # it is ever going to be. Note that the file may have been
527 # truncated to before the milestone.
528 eventually_callback(d)(None)
530 # FIXME: causes spurious failures
531 #self.unregisterProducer()
534 self.is_closed = True
538 except EnvironmentError as e:
539 self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
541 def unregisterProducer(self):
543 self.producer.stopProducing()
547 SIZE_THRESHOLD = 1000
550 class ShortReadOnlySFTPFile(PrefixingLogMixin):
551 implements(ISFTPFile)
552 """I represent a file handle to a particular file on an SFTP connection.
553 I am used only for short immutable files opened in read-only mode.
554 The file contents are downloaded to memory when I am created."""
556 def __init__(self, userpath, filenode, metadata):
557 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
558 if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
560 assert IFileNode.providedBy(filenode), filenode
561 self.filenode = filenode
562 self.metadata = metadata
563 self.async = download_to_data(filenode)
566 def readChunk(self, offset, length):
567 request = ".readChunk(%r, %r)" % (offset, length)
568 self.log(request, level=OPERATIONAL)
571 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
572 return defer.execute(_closed)
576 if noisy: self.log("_read(<data of length %r>) in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY)
578 # "In response to this request, the server will read as many bytes as it
579 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
580 # message. If an error occurs or EOF is encountered before reading any
581 # data, the server will respond with SSH_FXP_STATUS. For normal disk
582 # files, it is guaranteed that this will read the specified number of
583 # bytes, or up to end of file."
585 # i.e. we respond with an EOF error iff offset is already at EOF.
587 if offset >= len(data):
588 eventually_errback(d)(SFTPError(FX_EOF, "read at or past end of file"))
590 eventually_callback(d)(data[offset:min(offset+length, len(data))])
592 self.async.addCallbacks(_read, eventually_errback(d))
593 d.addBoth(_convert_error, request)
596 def writeChunk(self, offset, data):
597 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
599 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
600 return defer.execute(_denied)
603 self.log(".close()", level=OPERATIONAL)
606 return defer.succeed(None)
609 request = ".getAttrs()"
610 self.log(request, level=OPERATIONAL)
613 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
614 return defer.execute(_closed)
616 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
617 d.addBoth(_convert_error, request)
620 def setAttrs(self, attrs):
621 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
622 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
623 return defer.execute(_denied)
626 class GeneralSFTPFile(PrefixingLogMixin):
627 implements(ISFTPFile)
628 """I represent a file handle to a particular file on an SFTP connection.
629 I wrap an instance of OverwriteableFileConsumer, which is responsible for
630 storing the file contents. In order to allow write requests to be satisfied
631 immediately, there is effectively a FIFO queue between requests made to this
632 file handle, and requests to my OverwriteableFileConsumer. This queue is
633 implemented by the callback chain of self.async.
635 When first constructed, I am in an 'unopened' state that causes most
636 operations to be delayed until 'open' is called."""
638 def __init__(self, userpath, flags, close_notify, convergence):
639 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
640 if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
641 (userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
643 self.userpath = userpath
645 self.close_notify = close_notify
646 self.convergence = convergence
647 self.async = defer.Deferred()
648 # Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created.
649 self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL)
651 self.abandoned = False
653 self.childname = None
657 # self.consumer should only be relied on in callbacks for self.async, since it might
658 # not be set before then.
661 def open(self, parent=None, childname=None, filenode=None, metadata=None):
662 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
663 (parent, childname, filenode, metadata), level=OPERATIONAL)
665 # If the file has been renamed, the new (parent, childname) takes precedence.
666 if self.parent is None:
668 if self.childname is None:
669 self.childname = childname
670 self.filenode = filenode
671 self.metadata = metadata
674 tempfile_maker = EncryptedTemporaryFile
676 if (self.flags & FXF_TRUNC) or not filenode:
677 # We're either truncating or creating the file, so we don't need the old contents.
678 self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
679 self.consumer.finish()
681 assert IFileNode.providedBy(filenode), filenode
683 # TODO: use download interface described in #993 when implemented.
684 if filenode.is_mutable():
685 self.async.addCallback(lambda ign: filenode.download_best_version())
686 def _downloaded(data):
687 self.consumer = OverwriteableFileConsumer(len(data), tempfile_maker)
688 self.consumer.write(data)
689 self.consumer.finish()
691 self.async.addCallback(_downloaded)
693 download_size = filenode.get_size()
694 assert download_size is not None, "download_size is None"
695 self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker)
697 if noisy: self.log("_read immutable", level=NOISY)
698 filenode.read(self.consumer, 0, None)
699 self.async.addCallback(_read)
701 eventually_callback(self.async)(None)
703 if noisy: self.log("open done", level=NOISY)
706 def rename(self, new_userpath, new_parent, new_childname):
707 self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
709 self.userpath = new_userpath
710 self.parent = new_parent
711 self.childname = new_childname
714 self.log(".abandon()", level=OPERATIONAL)
716 self.abandoned = True
719 self.log(".sync()", level=OPERATIONAL)
722 self.async.addBoth(eventually_callback(d))
725 def readChunk(self, offset, length):
726 request = ".readChunk(%r, %r)" % (offset, length)
727 self.log(request, level=OPERATIONAL)
729 if not (self.flags & FXF_READ):
730 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
731 return defer.execute(_denied)
734 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
735 return defer.execute(_closed)
739 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
740 d2 = self.consumer.read(offset, length)
741 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
742 # It is correct to drop d2 here.
744 self.async.addCallbacks(_read, eventually_errback(d))
745 d.addBoth(_convert_error, request)
748 def writeChunk(self, offset, data):
749 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
751 if not (self.flags & FXF_WRITE):
752 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
753 return defer.execute(_denied)
756 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
757 return defer.execute(_closed)
759 self.has_changed = True
761 # Note that we return without waiting for the write to occur. Reads and
762 # close wait for prior writes, and will fail if any prior operation failed.
763 # This is ok because SFTP makes no guarantee that the write completes
764 # before the request does. In fact it explicitly allows write errors to be
765 # delayed until close:
766 # "One should note that on some server platforms even a close can fail.
767 # This can happen e.g. if the server operating system caches writes,
768 # and an error occurs while flushing cached writes during the close."
771 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
772 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
773 # FXF_APPEND means that we should always write at the current end of file.
774 write_offset = offset
775 if self.flags & FXF_APPEND:
776 write_offset = self.consumer.get_current_size()
778 self.consumer.overwrite(write_offset, data)
779 if noisy: self.log("overwrite done", level=NOISY)
781 self.async.addCallback(_write)
782 # don't addErrback to self.async, just allow subsequent async ops to fail.
783 return defer.succeed(None)
787 self.log(request, level=OPERATIONAL)
790 return defer.succeed(None)
792 # This means that close has been called, not that the close has succeeded.
795 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
796 def _readonly_close():
798 self.consumer.close()
799 return defer.execute(_readonly_close)
801 # We must capture the abandoned, parent, and childname variables synchronously
802 # at the close call. This is needed by the correctness arguments in the comments
803 # for _abandon_any_heisenfiles and _rename_heisenfiles.
804 abandoned = self.abandoned
806 childname = self.childname
809 d2 = defer.succeed(None)
810 if self.has_changed and not abandoned:
811 d2.addCallback(lambda ign: self.consumer.when_done())
812 if self.filenode and self.filenode.is_mutable():
813 self.log("update mutable file %r childname=%r" % (self.filenode, self.childname,), level=OPERATIONAL)
814 d2.addCallback(lambda ign: self.consumer.get_current_size())
815 d2.addCallback(lambda size: self.consumer.read(0, size))
816 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
819 self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL)
820 u = FileHandle(self.consumer.get_file(), self.convergence)
821 return parent.add_file(childname, u)
822 d2.addCallback(_add_file)
825 if noisy: self.log("_committed(%r)" % (res,), level=NOISY)
827 self.consumer.close()
829 # We must close_notify before re-firing self.async.
830 if self.close_notify:
831 self.close_notify(self.userpath, self.parent, self.childname, self)
833 d2.addBoth(_committed)
836 self.async.addCallback(_close)
839 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
840 d.addBoth(_convert_error, request)
844 request = ".getAttrs()"
845 self.log(request, level=OPERATIONAL)
848 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
849 return defer.execute(_closed)
851 # Optimization for read-only handles, when we already know the metadata.
852 if not(self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
853 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
857 # self.filenode might be None, but that's ok.
858 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
859 eventually_callback(d)(attrs)
861 self.async.addCallbacks(_get, eventually_errback(d))
862 d.addBoth(_convert_error, request)
865 def setAttrs(self, attrs):
866 request = ".setAttrs(attrs) %r" % (attrs,)
867 self.log(request, level=OPERATIONAL)
869 if not (self.flags & FXF_WRITE):
870 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
871 return defer.execute(_denied)
874 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
875 return defer.execute(_closed)
877 if not "size" in attrs:
878 return defer.succeed(None)
881 if not isinstance(size, (int, long)) or size < 0:
882 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
883 return defer.execute(_bad)
887 self.consumer.set_current_size(size)
888 eventually_callback(d)(None)
890 self.async.addCallbacks(_resize, eventually_errback(d))
891 d.addBoth(_convert_error, request)
896 def __init__(self, items):
906 def __init__(self, value):
910 # A "heisenfile" is a file that has been opened with write flags
911 # (FXF_WRITE and/or FXF_CREAT) and not yet close-notified.
912 # 'all_heisenfiles' maps from a direntry string to
913 # (list_of_GeneralSFTPFile, open_time_utc).
914 # A direntry string is parent_write_uri + "/" + childname_utf8 for
915 # an immutable file, or file_write_uri for a mutable file.
916 # Updates to this dict are single-threaded.
921 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
922 implements(ISFTPServer)
923 def __init__(self, client, rootnode, username):
924 ConchUser.__init__(self)
925 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username)
926 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
928 self.channelLookup["session"] = session.SSHSession
929 self.subsystemLookup["sftp"] = FileTransferServer
931 self._client = client
932 self._root = rootnode
933 self._username = username
934 self._convergence = client.convergence
936 # maps from UTF-8 paths for this user, to files written and still open
937 self._heisenfiles = {}
939 def gotVersion(self, otherVersion, extData):
940 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
942 # advertise the same extensions as the OpenSSH SFTP server
943 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
944 return {'posix-rename@openssh.com': '1',
945 'statvfs@openssh.com': '2',
946 'fstatvfs@openssh.com': '2',
950 self.log(".logout()", level=OPERATIONAL)
952 for files in self._heisenfiles.itervalues():
956 def _add_heisenfiles_by_path(self, userpath, files):
957 if noisy: self.log("._add_heisenfiles_by_path(%r, %r)" % (userpath, files), level=NOISY)
959 if userpath in self._heisenfiles:
960 self._heisenfiles[userpath] += files
962 self._heisenfiles[userpath] = files
964 def _add_heisenfiles_by_direntry(self, direntry, files_to_add):
965 if noisy: self.log("._add_heisenfiles_by_direntry(%r, %r)" % (direntry, files_to_add), level=NOISY)
968 if direntry in all_heisenfiles:
969 (old_files, opentime) = all_heisenfiles[direntry]
970 all_heisenfiles[direntry] = (old_files + files_to_add, opentime)
972 all_heisenfiles[direntry] = (files_to_add, time())
974 def _abandon_any_heisenfiles(self, userpath, direntry):
975 if noisy: self.log("._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry), level=NOISY)
977 # First we synchronously mark all heisenfiles matching the userpath or direntry
978 # as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
979 # each file that we abandoned.
981 # For each file, the call to .abandon() occurs:
982 # * before the file is closed, in which case it will never be committed
983 # (uploaded+linked or published); or
984 # * after it is closed but before it has been close_notified, in which case the
985 # .sync() ensures that it has been committed (successfully or not) before we
988 # This avoids a race that might otherwise cause the file to be committed after
989 # the remove operation has completed.
991 # We return a Deferred that fires with True if any files were abandoned (this
992 # does not mean that they were not committed; it is used to determine whether
993 # a NoSuchChildError from the attempt to delete the file should be suppressed).
996 if direntry in all_heisenfiles:
997 (files, opentime) = all_heisenfiles[direntry]
998 del all_heisenfiles[direntry]
999 if userpath in self._heisenfiles:
1000 files += self._heisenfiles[userpath]
1001 del self._heisenfiles[userpath]
1006 d = defer.succeed(None)
1008 d.addBoth(lambda ign: f.sync())
1010 d.addBoth(lambda ign: len(files) > 0)
1013 def _rename_heisenfiles(self, from_userpath, from_parent, from_childname,
1014 to_userpath, to_parent, to_childname, overwrite=True):
1015 if noisy: self.log("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" %
1016 (from_userpath, from_parent, from_childname,
1017 to_userpath, to_parent, to_childname, overwrite), level=NOISY)
1019 # First we synchronously rename all heisenfiles matching the userpath or direntry.
1020 # Then we .sync() each file that we renamed.
1022 # For each file, the call to .rename occurs:
1023 # * before the file is closed, in which case it will be committed at the
1025 # * after it is closed but before it has been close_notified, in which case the
1026 # .sync() ensures that it has been committed (successfully or not) before we
1029 # This avoids a race that might otherwise cause the file to be committed at the
1030 # old name after the rename operation has completed.
1032 # Note that if overwrite is False, the caller should already have checked
1033 # whether a real direntry exists at the destination. It is possible that another
1034 # direntry (heisen or real) comes to exist at the destination after that check,
1035 # but in that case it is correct for the rename to succeed (and for the commit
1036 # of the heisenfile at the destination to possibly clobber the other entry, since
1037 # that can happen anyway when we have concurrent write handles to the same direntry).
1039 # We return a Deferred that fires with True if any files were renamed (this
1040 # does not mean that they were not committed; it is used to determine whether
1041 # a NoSuchChildError from the rename attempt should be suppressed). If overwrite
1042 # is False and there were already heisenfiles at the destination userpath or
1043 # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
1045 from_direntry = self._direntry_for(from_parent, from_childname)
1046 to_direntry = self._direntry_for(to_parent, to_childname)
1048 if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
1049 def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1050 return defer.execute(_existing)
1053 if from_direntry in all_heisenfiles:
1054 (from_files, opentime) = all_heisenfiles[from_direntry]
1055 del all_heisenfiles[from_direntry]
1056 if from_userpath in self._heisenfiles:
1057 from_files += self._heisenfiles[from_userpath]
1058 del self._heisenfiles[from_userpath]
1060 self._add_heisenfiles_by_direntry(to_direntry, from_files)
1061 self._add_heisenfiles_by_path(to_userpath, from_files)
1063 for f in from_files:
1064 f.rename(to_userpath, to_parent, to_childname)
1066 d = defer.succeed(None)
1067 for f in from_files:
1068 d.addBoth(lambda ign: f.sync())
1070 d.addBoth(lambda ign: len(from_files) > 0)
1073 def _sync_heisenfiles(self, userpath, direntry, ignore=None):
1074 if noisy: self.log("._sync_heisenfiles(%r, %r)" % (userpath, direntry), level=NOISY)
1077 if direntry in all_heisenfiles:
1078 (files, opentime) = all_heisenfiles[direntry]
1079 if userpath in self._heisenfiles:
1080 files += self._heisenfiles[userpath]
1082 d = defer.succeed(None)
1085 d.addCallback(lambda ign: f.sync())
1088 def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
1089 if noisy: self.log("._remove_file(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
1091 direntry = self._direntry_for(parent, childname)
1092 if direntry in all_heisenfiles:
1093 (all_old_files, opentime) = all_heisenfiles[direntry]
1094 all_new_files = [f for f in all_old_files if f is not file_to_remove]
1095 if len(all_new_files) > 0:
1096 all_heisenfiles[direntry] = (all_new_files, opentime)
1098 del all_heisenfiles[direntry]
1100 if userpath in self._heisenfiles:
1101 old_files = self._heisenfiles[userpath]
1102 new_files = [f for f in old_files if f is not file_to_remove]
1103 if len(new_files) > 0:
1104 self._heisenfiles[userpath] = new_files
1106 del self._heisenfiles[userpath]
1108 def _direntry_for(self, filenode_or_parent, childname=None):
1109 if filenode_or_parent:
1110 rw_uri = filenode_or_parent.get_write_uri()
1111 if rw_uri and childname:
1112 return rw_uri + "/" + childname.encode('utf-8')
1118 def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None):
1119 if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
1120 (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
1123 assert metadata is None or 'readonly' in metadata, metadata
1125 writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
1127 direntry = self._direntry_for(parent, childname)
1129 direntry = self._direntry_for(filenode)
1131 d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file)
1133 if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
1134 d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata))
1138 close_notify = self._remove_heisenfile
1140 d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence))
1141 def _got_file(file):
1143 self._add_heisenfiles_by_direntry(direntry, [file])
1144 return file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata)
1145 d.addCallback(_got_file)
1148 def openFile(self, pathstring, flags, attrs):
1149 request = ".openFile(%r, %r = %r, %r)" % (pathstring, flags, _repr_flags(flags), attrs)
1150 self.log(request, level=OPERATIONAL)
1152 # This is used for both reading and writing.
1153 # First exclude invalid combinations of flags, and empty paths.
1155 if not (flags & (FXF_READ | FXF_WRITE)):
1156 def _bad_readwrite():
1157 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
1158 return defer.execute(_bad_readwrite)
1160 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
1161 def _bad_exclcreat():
1162 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
1163 return defer.execute(_bad_exclcreat)
1165 path = self._path_from_string(pathstring)
1167 def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
1168 return defer.execute(_emptypath)
1170 # The combination of flags is potentially valid.
1172 # To work around clients that have race condition bugs, a getAttr, rename, or
1173 # remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags,
1174 # should succeed even if the 'open' request has not yet completed. So we now
1175 # synchronously add a file object into the self._heisenfiles dict, indexed
1176 # by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict,
1177 # because we don't yet have a user-independent path for the file.) The file
1178 # object does not know its filenode, parent, or childname at this point.
1180 userpath = self._path_to_utf8(path)
1182 if flags & (FXF_WRITE | FXF_CREAT):
1183 file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence)
1184 self._add_heisenfiles_by_path(userpath, [file])
1186 # We haven't decided which file implementation to use yet.
1189 # Now there are two major cases:
1191 # 1. The path is specified as /uri/FILECAP, with no parent directory.
1192 # If the FILECAP is mutable and writeable, then we can open it in write-only
1193 # or read/write mode (non-exclusively), otherwise we can only open it in
1194 # read-only mode. The open should succeed immediately as long as FILECAP is
1195 # a valid known filecap that grants the required permission.
1197 # 2. The path is specified relative to a parent. We find the parent dirnode and
1198 # get the child's URI and metadata if it exists. There are four subcases:
1199 # a. the child does not exist: FXF_CREAT must be set, and we must be able
1200 # to write to the parent directory.
1201 # b. the child exists but is not a valid known filecap: fail
1202 # c. the child is mutable: if we are trying to open it write-only or
1203 # read/write, then we must be able to write to the file.
1204 # d. the child is immutable: if we are trying to open it write-only or
1205 # read/write, then we must be able to write to the parent directory.
1207 # To reduce latency, open normally succeeds as soon as these conditions are
1208 # met, even though there might be a failure in downloading the existing file
1209 # or uploading a new one. However, there is an exception: if a file has been
1210 # written, then closed, and is now being reopened, then we have to delay the
1211 # open until the previous upload/publish has completed. This is necessary
1212 # because sshfs does not wait for the result of an FXF_CLOSE message before
1213 # reporting to the client that a file has been closed. It applies both to
1214 # mutable files, and to directory entries linked to an immutable file.
1216 # Note that the permission checks below are for more precise error reporting on
1217 # the open call; later operations would fail even if we did not make these checks.
1219 d = self._get_root(path)
1220 def _got_root( (root, path) ):
1221 if root.is_unknown():
1222 raise SFTPError(FX_PERMISSION_DENIED,
1223 "cannot open an unknown cap (or child of an unknown directory). "
1224 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1227 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
1228 if not IFileNode.providedBy(root):
1229 raise SFTPError(FX_PERMISSION_DENIED,
1230 "cannot open a directory cap")
1231 if (flags & FXF_WRITE) and root.is_readonly():
1232 raise SFTPError(FX_PERMISSION_DENIED,
1233 "cannot write to a non-writeable filecap without a parent directory")
1234 if flags & FXF_EXCL:
1235 raise SFTPError(FX_FAILURE,
1236 "cannot create a file exclusively when it already exists")
1238 # The file does not need to be added to all_heisenfiles, because it is not
1239 # associated with a directory entry that needs to be updated.
1241 return self._make_file(file, userpath, flags, filenode=root)
1244 childname = path[-1]
1245 if noisy: self.log("case 2: root = %r, childname = %r, path[:-1] = %r" %
1246 (root, childname, path[:-1]), level=NOISY)
1247 d2 = root.get_child_at_path(path[:-1])
1248 def _got_parent(parent):
1249 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
1250 if parent.is_unknown():
1251 raise SFTPError(FX_PERMISSION_DENIED,
1252 "cannot open an unknown cap (or child of an unknown directory). "
1253 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1255 parent_readonly = parent.is_readonly()
1256 d3 = defer.succeed(None)
1257 if flags & FXF_EXCL:
1258 # FXF_EXCL means that the link to the file (not the file itself) must
1259 # be created atomically wrt updates by this storage client.
1260 # That is, we need to create the link before returning success to the
1261 # SFTP open request (and not just on close, as would normally be the
1262 # case). We make the link initially point to a zero-length LIT file,
1263 # which is consistent with what might happen on a POSIX filesystem.
1266 raise SFTPError(FX_FAILURE,
1267 "cannot create a file exclusively when the parent directory is read-only")
1269 # 'overwrite=False' ensures failure if the link already exists.
1270 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
1272 zero_length_lit = "URI:LIT:"
1273 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
1274 (parent, zero_length_lit, childname), level=NOISY)
1275 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, overwrite=False))
1276 def _seturi_done(child):
1277 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
1278 d4 = parent.get_metadata_for(childname)
1279 d4.addCallback(lambda metadata: (child, metadata))
1281 d3.addCallback(_seturi_done)
1283 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
1284 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
1286 def _got_child( (filenode, metadata) ):
1287 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, metadata), level=NOISY)
1289 if filenode.is_unknown():
1290 raise SFTPError(FX_PERMISSION_DENIED,
1291 "cannot open an unknown cap. Upgrading the gateway "
1292 "to a later Tahoe-LAFS version may help")
1293 if not IFileNode.providedBy(filenode):
1294 raise SFTPError(FX_PERMISSION_DENIED,
1295 "cannot open a directory as if it were a file")
1296 if (flags & FXF_WRITE) and filenode.is_mutable() and filenode.is_readonly():
1297 raise SFTPError(FX_PERMISSION_DENIED,
1298 "cannot open a read-only mutable file for writing")
1299 if (flags & FXF_WRITE) and parent_readonly:
1300 raise SFTPError(FX_PERMISSION_DENIED,
1301 "cannot open a file for writing when the parent directory is read-only")
1303 metadata['readonly'] = _is_readonly(parent_readonly, filenode)
1304 return self._make_file(file, userpath, flags, parent=parent, childname=childname,
1305 filenode=filenode, metadata=metadata)
1307 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1308 f.trap(NoSuchChildError)
1310 if not (flags & FXF_CREAT):
1311 raise SFTPError(FX_NO_SUCH_FILE,
1312 "the file does not exist, and was not opened with the creation (CREAT) flag")
1314 raise SFTPError(FX_PERMISSION_DENIED,
1315 "cannot create a file when the parent directory is read-only")
1317 return self._make_file(file, userpath, flags, parent=parent, childname=childname)
1318 d3.addCallbacks(_got_child, _no_child)
1321 d2.addCallback(_got_parent)
1324 d.addCallback(_got_root)
1325 def _remove_on_error(err):
1327 self._remove_heisenfile(userpath, None, None, file)
1329 d.addErrback(_remove_on_error)
1330 d.addBoth(_convert_error, request)
1333 def renameFile(self, from_pathstring, to_pathstring, overwrite=False):
1334 request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring)
1335 self.log(request, level=OPERATIONAL)
1337 from_path = self._path_from_string(from_pathstring)
1338 to_path = self._path_from_string(to_pathstring)
1339 from_userpath = self._path_to_utf8(from_path)
1340 to_userpath = self._path_to_utf8(to_path)
1342 # the target directory must already exist
1343 d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
1344 self._get_parent_or_node(to_path)])
1345 def _got( (from_pair, to_pair) ):
1346 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
1347 (from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
1348 (from_parent, from_childname) = from_pair
1349 (to_parent, to_childname) = to_pair
1351 if from_childname is None:
1352 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1353 if to_childname is None:
1354 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1356 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1357 # "It is an error if there already exists a file with the name specified
1359 # OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error.
1361 # For the standard SSH_FXP_RENAME operation, overwrite=False.
1362 # We also support the posix-rename@openssh.com extension, which uses overwrite=True.
1364 d2 = defer.fail(NoSuchChildError())
1366 d2.addCallback(lambda ign: to_parent.get(to_childname))
1367 def _expect_fail(res):
1368 if not isinstance(res, Failure):
1369 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1371 # It is OK if we fail for errors other than NoSuchChildError, since that probably
1372 # indicates some problem accessing the destination directory.
1373 res.trap(NoSuchChildError)
1374 d2.addBoth(_expect_fail)
1376 # If there are heisenfiles to be written at the 'from' direntry, then ensure
1377 # they will now be written at the 'to' direntry instead.
1378 d2.addCallback(lambda ign:
1379 self._rename_heisenfiles(from_userpath, from_parent, from_childname,
1380 to_userpath, to_parent, to_childname, overwrite=overwrite))
1383 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1384 #d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite)
1386 d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite)
1388 if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" %
1389 (err, from_pathstring, to_pathstring, overwrite), level=NOISY)
1391 if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
1393 if not overwrite and err.check(ExistingChildError):
1394 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1399 d2.addCallback(_move)
1402 d.addBoth(_convert_error, request)
1405 def makeDirectory(self, pathstring, attrs):
1406 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1407 self.log(request, level=OPERATIONAL)
1409 path = self._path_from_string(pathstring)
1410 metadata = self._attrs_to_metadata(attrs)
1411 d = self._get_root(path)
1412 d.addCallback(lambda (root, path):
1413 self._get_or_create_directories(root, path, metadata))
1414 d.addBoth(_convert_error, request)
1417 def _get_or_create_directories(self, node, path, metadata):
1418 if not IDirectoryNode.providedBy(node):
1419 # TODO: provide the name of the blocking file in the error message.
1420 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1421 "is a file in the way") # close enough
1422 return defer.execute(_blocked)
1425 return defer.succeed(node)
1426 d = node.get(path[0])
1427 def _maybe_create(f):
1428 f.trap(NoSuchChildError)
1429 return node.create_subdirectory(path[0])
1430 d.addErrback(_maybe_create)
1431 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1434 def removeFile(self, pathstring):
1435 request = ".removeFile(%r)" % (pathstring,)
1436 self.log(request, level=OPERATIONAL)
1438 path = self._path_from_string(pathstring)
1439 d = self._remove_object(path, must_be_file=True)
1440 d.addBoth(_convert_error, request)
1443 def removeDirectory(self, pathstring):
1444 request = ".removeDirectory(%r)" % (pathstring,)
1445 self.log(request, level=OPERATIONAL)
1447 path = self._path_from_string(pathstring)
1448 d = self._remove_object(path, must_be_directory=True)
1449 d.addBoth(_convert_error, request)
1452 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1453 userpath = self._path_to_utf8(path)
1454 d = self._get_parent_or_node(path)
1455 def _got_parent( (parent, childname) ):
1456 if childname is None:
1457 raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
1459 direntry = self._direntry_for(parent, childname)
1460 d2 = defer.succeed(False)
1461 if not must_be_directory:
1462 d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry))
1464 d2.addCallback(lambda abandoned:
1465 parent.delete(childname, must_exist=not abandoned,
1466 must_be_directory=must_be_directory, must_be_file=must_be_file))
1468 d.addCallback(_got_parent)
1471 def openDirectory(self, pathstring):
1472 request = ".openDirectory(%r)" % (pathstring,)
1473 self.log(request, level=OPERATIONAL)
1475 path = self._path_from_string(pathstring)
1476 d = self._get_parent_or_node(path)
1477 def _got_parent_or_node( (parent_or_node, childname) ):
1478 if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
1479 (parent_or_node, childname, pathstring), level=NOISY)
1480 if childname is None:
1481 return parent_or_node
1483 return parent_or_node.get(childname)
1484 d.addCallback(_got_parent_or_node)
1486 if dirnode.is_unknown():
1487 raise SFTPError(FX_PERMISSION_DENIED,
1488 "cannot list an unknown cap as a directory. Upgrading the gateway "
1489 "to a later Tahoe-LAFS version may help")
1490 if not IDirectoryNode.providedBy(dirnode):
1491 raise SFTPError(FX_PERMISSION_DENIED,
1492 "cannot list a file as if it were a directory")
1495 def _render(children):
1496 parent_readonly = dirnode.is_readonly()
1498 for filename, (child, metadata) in children.iteritems():
1499 # The file size may be cached or absent.
1500 metadata['readonly'] = _is_readonly(parent_readonly, child)
1501 attrs = _populate_attrs(child, metadata)
1502 filename_utf8 = filename.encode('utf-8')
1503 longname = _lsLine(filename_utf8, attrs)
1504 results.append( (filename_utf8, longname, attrs) )
1505 return StoppableList(results)
1506 d2.addCallback(_render)
1508 d.addCallback(_list)
1509 d.addBoth(_convert_error, request)
1512 def getAttrs(self, pathstring, followLinks):
1513 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1514 self.log(request, level=OPERATIONAL)
1516 # When asked about a specific file, report its current size.
1517 # TODO: the modification time for a mutable file should be
1518 # reported as the update time of the best version. But that
1519 # information isn't currently stored in mutable shares, I think.
1521 # Some clients will incorrectly try to get the attributes
1522 # of a file immediately after opening it, before it has been put
1523 # into the all_heisenfiles table. This is a race condition bug in
1524 # the client, but we probably need to handle it anyway.
1526 path = self._path_from_string(pathstring)
1527 userpath = self._path_to_utf8(path)
1528 d = self._get_parent_or_node(path)
1529 def _got_parent_or_node( (parent_or_node, childname) ):
1530 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1532 direntry = self._direntry_for(parent_or_node, childname)
1533 d2 = self._sync_heisenfiles(userpath, direntry)
1535 if childname is None:
1536 node = parent_or_node
1537 d2.addCallback(lambda ign: node.get_current_size())
1538 d2.addCallback(lambda size:
1539 _populate_attrs(node, {'readonly': node.is_unknown() or node.is_readonly()}, size=size))
1541 parent = parent_or_node
1542 d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
1543 def _got( (child, metadata) ):
1544 if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
1545 assert IDirectoryNode.providedBy(parent), parent
1546 metadata['readonly'] = _is_readonly(parent.is_readonly(), child)
1547 d3 = child.get_current_size()
1548 d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size))
1551 if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY)
1552 err.trap(NoSuchChildError)
1553 direntry = self._direntry_for(parent, childname)
1554 if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" %
1555 (self._heisenfiles, all_heisenfiles, direntry), level=NOISY)
1556 if direntry in all_heisenfiles:
1557 (files, opentime) = all_heisenfiles[direntry]
1558 sftptime = _to_sftp_time(opentime)
1559 # A file that has been opened for writing necessarily has permissions rw-rw-rw-.
1560 return {'permissions': S_IFREG | 0666,
1562 'createtime': sftptime,
1568 d2.addCallbacks(_got, _nosuch)
1570 d.addCallback(_got_parent_or_node)
1571 d.addBoth(_convert_error, request)
1574 def setAttrs(self, pathstring, attrs):
1575 self.log(".setAttrs(%r, %r)" % (pathstring, attrs), level=OPERATIONAL)
1578 # this would require us to download and re-upload the truncated/extended
1580 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
1581 return defer.execute(_unsupported)
1582 return defer.succeed(None)
1584 def readLink(self, pathstring):
1585 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1587 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1588 return defer.execute(_unsupported)
1590 def makeLink(self, linkPathstring, targetPathstring):
1591 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1593 # If this is implemented, note the reversal of arguments described in point 7 of
1594 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
1596 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1597 return defer.execute(_unsupported)
1599 def extendedRequest(self, extensionName, extensionData):
1600 self.log(".extendedRequest(%r, <data of length %r>)" % (extensionName, len(extensionData)), level=OPERATIONAL)
1602 # We implement the three main OpenSSH SFTP extensions; see
1603 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1605 if extensionName == 'posix-rename@openssh.com':
1606 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
1608 (fromPathLen,) = struct.unpack('>L', extensionData[0:4])
1609 if 8 + fromPathLen > len(extensionData): return defer.execute(_bad)
1611 (toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)])
1612 if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad)
1614 fromPathstring = extensionData[4:(4 + fromPathLen)]
1615 toPathstring = extensionData[(8 + fromPathLen):]
1616 d = self.renameFile(fromPathstring, toPathstring, overwrite=True)
1618 # Twisted conch assumes that the response from an extended request is either
1619 # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
1620 # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
1621 def _succeeded(ign):
1622 raise SFTPError(FX_OK, "request succeeded")
1623 d.addCallback(_succeeded)
1626 if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
1627 return defer.succeed(struct.pack('>11Q',
1628 1024, # uint64 f_bsize /* file system block size */
1629 1024, # uint64 f_frsize /* fundamental fs block size */
1630 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1631 314159265, # uint64 f_bfree /* free blocks in file system */
1632 314159265, # uint64 f_bavail /* free blocks for non-root */
1633 200000000, # uint64 f_files /* total file inodes */
1634 100000000, # uint64 f_ffree /* free file inodes */
1635 100000000, # uint64 f_favail /* free file inodes for non-root */
1636 0x1AF5, # uint64 f_fsid /* file system id */
1637 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1638 65535, # uint64 f_namemax /* maximum filename length */
1641 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
1642 (extensionName, len(extensionData)))
1643 return defer.execute(_unsupported)
1645 def realPath(self, pathstring):
1646 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1648 return self._path_to_utf8(self._path_from_string(pathstring))
1650 def _path_to_utf8(self, path):
1651 return (u"/" + u"/".join(path)).encode('utf-8')
1653 def _path_from_string(self, pathstring):
1654 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1656 # The home directory is the root directory.
1657 pathstring = pathstring.strip("/")
1658 if pathstring == "" or pathstring == ".":
1661 path_utf8 = pathstring.split("/")
1663 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1664 # "Servers SHOULD interpret a path name component ".." as referring to
1665 # the parent directory, and "." as referring to the current directory."
1667 for p_utf8 in path_utf8:
1669 # ignore excess .. components at the root
1674 p = p_utf8.decode('utf-8', 'strict')
1675 except UnicodeError:
1676 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1679 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1682 def _get_root(self, path):
1683 # return Deferred (root, remaining_path)
1684 d = defer.succeed(None)
1685 if path and path[0] == u"uri":
1686 d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8')))
1687 d.addCallback(lambda root: (root, path[2:]))
1689 d.addCallback(lambda ign: (self._root, path))
1692 def _get_parent_or_node(self, path):
1693 # return Deferred (parent, childname) or (node, None)
1694 d = self._get_root(path)
1695 def _got_root( (root, remaining_path) ):
1696 if not remaining_path:
1699 d2 = root.get_child_at_path(remaining_path[:-1])
1700 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1702 d.addCallback(_got_root)
1705 def _attrs_to_metadata(self, attrs):
1709 if key == "mtime" or key == "ctime" or key == "createtime":
1710 metadata[key] = long(attrs[key])
1711 elif key.startswith("ext_"):
1712 metadata[key] = str(attrs[key])
1717 class SFTPUser(ConchUser, PrefixingLogMixin):
1718 implements(ISession)
1719 def __init__(self, check_abort, client, rootnode, username, convergence):
1720 ConchUser.__init__(self)
1721 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1723 self.channelLookup["session"] = session.SSHSession
1724 self.subsystemLookup["sftp"] = FileTransferServer
1726 self.check_abort = check_abort
1727 self.client = client
1728 self.root = rootnode
1729 self.username = username
1730 self.convergence = convergence
1732 def getPty(self, terminal, windowSize, attrs):
1733 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1734 raise NotImplementedError
1736 def openShell(self, protocol):
1737 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1738 raise NotImplementedError
1740 def execCommand(self, protocol, cmd):
1741 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1742 raise NotImplementedError
1744 def windowChanged(self, newWindowSize):
1745 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1748 self.log(".eofReceived()", level=OPERATIONAL)
1751 self.log(".closed()", level=OPERATIONAL)
1754 # if you have an SFTPUser, and you want something that provides ISFTPServer,
1755 # then you get SFTPHandler(user)
1756 components.registerAdapter(SFTPHandler, SFTPUser, ISFTPServer)
1758 from auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1761 implements(portal.IRealm)
1762 def __init__(self, client):
1763 self._client = client
1765 def requestAvatar(self, avatarID, mind, interface):
1766 assert interface == IConchUser, interface
1767 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1768 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1769 return (interface, handler, handler.logout)
1772 class SFTPServer(service.MultiService):
1773 def __init__(self, client, accountfile, accounturl,
1774 sftp_portstr, pubkey_file, privkey_file):
1775 service.MultiService.__init__(self)
1777 r = Dispatcher(client)
1778 p = portal.Portal(r)
1781 c = AccountFileChecker(self, accountfile)
1782 p.registerChecker(c)
1784 c = AccountURLChecker(self, accounturl)
1785 p.registerChecker(c)
1786 if not accountfile and not accounturl:
1787 # we could leave this anonymous, with just the /uri/CAP form
1788 raise NeedRootcapLookupScheme("must provide an account file or URL")
1790 pubkey = keys.Key.fromFile(pubkey_file)
1791 privkey = keys.Key.fromFile(privkey_file)
1792 class SSHFactory(factory.SSHFactory):
1793 publicKeys = {pubkey.sshType(): pubkey}
1794 privateKeys = {privkey.sshType(): privkey}
1795 def getPrimes(self):
1797 # if present, this enables diffie-hellman-group-exchange
1798 return primes.parseModuliFile("/etc/ssh/moduli")
1805 s = strports.service(sftp_portstr, f)
1806 s.setServiceParent(self)