2 import os, tempfile, heapq, binascii, traceback, array, stat, struct
3 from stat import S_IFREG, S_IFDIR
4 from time import time, strftime, localtime
6 from zope.interface import implements
7 from twisted.python import components
8 from twisted.application import service, strports
9 from twisted.conch.ssh import factory, keys, session
10 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
11 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
12 FX_BAD_MESSAGE, FX_FAILURE
13 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
14 FXF_CREAT, FXF_TRUNC, FXF_EXCL
15 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
16 from twisted.conch.avatar import ConchUser
17 from twisted.conch.openssh_compat import primes
18 from twisted.cred import portal
19 from twisted.internet.error import ProcessDone, ProcessTerminated
20 from twisted.python.failure import Failure
21 from twisted.internet.interfaces import ITransport
23 from twisted.internet import defer
24 from twisted.internet.interfaces import IFinishableConsumer
25 from foolscap.api import eventually
26 from allmydata.util import deferredutil
28 from allmydata.util.consumer import download_to_data
29 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
31 from allmydata.mutable.common import NotWriteableError
32 from allmydata.immutable.upload import FileHandle
34 from pycryptopp.cipher.aes import AES
36 # twisted.conch.ssh.filetransfer generates this warning, but not when it is imported,
39 warnings.filterwarnings("ignore", category=DeprecationWarning,
40 message="BaseException.message has been deprecated as of Python 2.6",
41 module=".*filetransfer", append=True)
44 use_foolscap_logging = True
46 from allmydata.util.log import NOISY, OPERATIONAL, SCARY
48 if use_foolscap_logging:
49 from allmydata.util.log import msg as logmsg, err as logerr, PrefixingLogMixin
51 def logmsg(s, level=None):
53 def logerr(s, level=None):
55 class PrefixingLogMixin:
56 def __init__(self, facility=None):
58 def log(self, s, level=None):
62 def eventually_callback(d):
63 s = traceback.format_stack()
66 if noisy: logmsg("CALLBACK %r" % (d,), level=NOISY)
68 except: # pragma: no cover
69 logerr("Failed to callback %r with %r\n"
70 "Original stack:\n!%s" %
71 (d, res, '!'.join(s)), level=SCARY)
74 return lambda res: eventually(_cb, res)
76 def eventually_errback(d):
77 s = traceback.format_stack()
80 if noisy: logmsg("ERRBACK %r %r" % (d, err), level=NOISY)
82 except: # pragma: no cover
83 logerr("Failed to errback %r with %r\n"
84 "Original stack:\n!%s" %
85 (d, err, '!'.join(s)), level=SCARY)
88 return lambda err: eventually(_eb, err)
90 def eventually_callback(d):
91 return lambda res: eventually(d.callback, res)
93 def eventually_errback(d):
94 return lambda err: eventually(d.errback, err)
97 def _convert_error(res, request):
98 if not isinstance(res, Failure):
100 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
101 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
105 logmsg("RAISE %r %r" % (request, err,), level=OPERATIONAL)
106 if noisy and not use_foolscap_logging: traceback.print_exc(err)
108 # The message argument to SFTPError must not reveal information that
109 # might compromise anonymity.
111 if err.check(SFTPError):
112 # original raiser of SFTPError has responsibility to ensure anonymity
114 if err.check(NoSuchChildError):
115 childname = err.value.args[0].encode('utf-8')
116 raise SFTPError(FX_NO_SUCH_FILE, childname)
117 if err.check(NotWriteableError):
118 msg = err.value.args[0].encode('utf-8')
119 raise SFTPError(FX_PERMISSION_DENIED, msg)
120 if err.check(ExistingChildError):
121 # Versions of SFTP after v3 (which is what twisted.conch implements)
122 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
123 # However v3 doesn't; instead, other servers such as sshd return
124 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
125 # to translate the error to the equivalent of POSIX EEXIST, which is
126 # necessary for some picky programs (such as gedit).
127 msg = err.value.args[0].encode('utf-8')
128 raise SFTPError(FX_FAILURE, msg)
129 if err.check(NotImplementedError):
130 raise SFTPError(FX_OP_UNSUPPORTED, str(err.value))
131 if err.check(EOFError):
132 raise SFTPError(FX_EOF, "end of file reached")
133 if err.check(defer.FirstError):
134 _convert_error(err.value.subFailure, request)
136 # We assume that the error message is not anonymity-sensitive.
137 raise SFTPError(FX_FAILURE, str(err.value))
140 def _repr_flags(flags):
141 return "|".join([f for f in
142 [(flags & FXF_READ) and "FXF_READ" or None,
143 (flags & FXF_WRITE) and "FXF_WRITE" or None,
144 (flags & FXF_APPEND) and "FXF_APPEND" or None,
145 (flags & FXF_CREAT) and "FXF_CREAT" or None,
146 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
147 (flags & FXF_EXCL) and "FXF_EXCL" or None,
152 def _lsLine(name, attrs):
155 st_mtime = attrs.get("mtime", 0)
156 st_mode = attrs["permissions"]
157 # TODO: check that clients are okay with this being a "?".
158 # (They should be because the longname is intended for human
160 st_size = attrs.get("size", "?")
161 # We don't know how many links there really are to this object.
164 # From <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
165 # We can't call the version in Twisted because we might have a version earlier than
166 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
169 perms = array.array('c', '-'*10)
170 ft = stat.S_IFMT(mode)
171 if stat.S_ISDIR(ft): perms[0] = 'd'
172 elif stat.S_ISCHR(ft): perms[0] = 'c'
173 elif stat.S_ISBLK(ft): perms[0] = 'b'
174 elif stat.S_ISREG(ft): perms[0] = '-'
175 elif stat.S_ISFIFO(ft): perms[0] = 'f'
176 elif stat.S_ISLNK(ft): perms[0] = 'l'
177 elif stat.S_ISSOCK(ft): perms[0] = 's'
180 if mode&stat.S_IRUSR: perms[1] = 'r'
181 if mode&stat.S_IWUSR: perms[2] = 'w'
182 if mode&stat.S_IXUSR: perms[3] = 'x'
184 if mode&stat.S_IRGRP: perms[4] = 'r'
185 if mode&stat.S_IWGRP: perms[5] = 'w'
186 if mode&stat.S_IXGRP: perms[6] = 'x'
188 if mode&stat.S_IROTH: perms[7] = 'r'
189 if mode&stat.S_IWOTH: perms[8] = 'w'
190 if mode&stat.S_IXOTH: perms[9] = 'x'
191 # suid/sgid never set
194 l += str(st_nlink).rjust(5) + ' '
202 sixmo = 60 * 60 * 24 * 7 * 26
203 if st_mtime + sixmo < time(): # last edited more than 6mo ago
204 l += strftime("%b %d %Y ", localtime(st_mtime))
206 l += strftime("%b %d %H:%M ", localtime(st_mtime))
211 def _is_readonly(parent_readonly, child):
212 """Whether child should be treated as having read-only permissions when listed
215 if child.is_unknown():
217 elif child.is_mutable():
218 return child.is_readonly()
220 return parent_readonly
223 def _populate_attrs(childnode, metadata, size=None):
226 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
227 # bits, otherwise the client may refuse to open a directory.
228 # Also, sshfs run as a non-root user requires files and directories
229 # to be world-readable/writeable.
231 # Directories and unknown nodes have no size, and SFTP doesn't
232 # require us to make one up.
234 # childnode might be None, meaning that the file doesn't exist yet,
235 # but we're going to write it later.
237 if childnode and childnode.is_unknown():
239 elif childnode and IDirectoryNode.providedBy(childnode):
240 perms = S_IFDIR | 0777
242 # For files, omit the size if we don't immediately know it.
243 if childnode and size is None:
244 size = childnode.get_size()
246 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
248 perms = S_IFREG | 0666
251 assert 'readonly' in metadata, metadata
252 if metadata['readonly']:
253 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
255 # see webapi.txt for what these times mean
256 if 'linkmotime' in metadata.get('tahoe', {}):
257 attrs['mtime'] = int(metadata['tahoe']['linkmotime'])
258 elif 'mtime' in metadata:
259 # We would prefer to omit atime, but SFTP version 3 can only
260 # accept mtime if atime is also set.
261 attrs['mtime'] = int(metadata['mtime'])
262 attrs['atime'] = attrs['mtime']
264 if 'linkcrtime' in metadata.get('tahoe', {}):
265 attrs['createtime'] = int(metadata['tahoe']['linkcrtime'])
267 if 'ctime' in metadata:
268 attrs['ctime'] = int(metadata['ctime'])
270 attrs['permissions'] = perms
272 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
273 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
278 class EncryptedTemporaryFile(PrefixingLogMixin):
279 # not implemented: next, readline, readlines, xreadlines, writelines
282 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
283 self.file = tempfile.TemporaryFile()
284 self.key = os.urandom(16) # AES-128
286 def _crypt(self, offset, data):
287 # FIXME: use random-access AES (pycryptopp ticket #18)
288 offset_big = offset // 16
289 offset_small = offset % 16
290 iv = binascii.unhexlify("%032x" % offset_big)
291 cipher = AES(self.key, iv=iv)
292 cipher.process("\x00"*offset_small)
293 return cipher.process(data)
301 def seek(self, offset, whence=os.SEEK_SET):
302 if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
303 self.file.seek(offset, whence)
306 offset = self.file.tell()
307 if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
310 def read(self, size=-1):
311 if noisy: self.log(".read(%r)" % (size,), level=NOISY)
312 index = self.file.tell()
313 ciphertext = self.file.read(size)
314 plaintext = self._crypt(index, ciphertext)
317 def write(self, plaintext):
318 if noisy: self.log(".write(<data of length %r>)" % (len(plaintext),), level=NOISY)
319 index = self.file.tell()
320 ciphertext = self._crypt(index, plaintext)
321 self.file.write(ciphertext)
323 def truncate(self, newsize):
324 if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
325 self.file.truncate(newsize)
328 class OverwriteableFileConsumer(PrefixingLogMixin):
329 implements(IFinishableConsumer)
330 """I act both as a consumer for the download of the original file contents, and as a
331 wrapper for a temporary file that records the downloaded data and any overwrites.
332 I use a priority queue to keep track of which regions of the file have been overwritten
333 but not yet downloaded, so that the download does not clobber overwritten data.
334 I use another priority queue to record milestones at which to make callbacks
335 indicating that a given number of bytes have been downloaded.
337 The temporary file reflects the contents of the file that I represent, except that:
338 - regions that have neither been downloaded nor overwritten, if present,
340 - the temporary file may be shorter than the represented file (it is never longer).
341 The latter's current size is stored in self.current_size.
343 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
344 useful for other frontends."""
346 def __init__(self, check_abort, download_size, tempfile_maker):
347 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
348 if noisy: self.log(".__init__(%r, %r, %r)" % (check_abort, download_size, tempfile_maker), level=NOISY)
349 self.check_abort = check_abort
350 self.download_size = download_size
351 self.current_size = download_size
352 self.f = tempfile_maker()
354 self.milestones = [] # empty heap of (offset, d)
355 self.overwrites = [] # empty heap of (start, end)
356 self.done = self.when_reached(download_size) # adds a milestone
358 def _signal_done(ign):
359 if noisy: self.log("DONE", level=NOISY)
361 self.done.addCallback(_signal_done)
367 def get_current_size(self):
368 return self.current_size
370 def set_current_size(self, size):
371 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
372 (size, self.current_size, self.downloaded), level=NOISY)
373 if size < self.current_size or size < self.downloaded:
374 self.f.truncate(size)
375 self.current_size = size
376 if size < self.download_size:
377 self.download_size = size
378 if self.downloaded >= self.download_size:
381 def registerProducer(self, p, streaming):
382 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
385 # call resumeProducing once to start things off
394 def write(self, data):
395 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
396 if self.check_abort():
400 if self.downloaded >= self.download_size:
403 next_downloaded = self.downloaded + len(data)
404 if next_downloaded > self.download_size:
405 data = data[:(self.download_size - self.downloaded)]
407 while len(self.overwrites) > 0:
408 (start, end) = self.overwrites[0]
409 if start >= next_downloaded:
410 # This and all remaining overwrites are after the data we just downloaded.
412 if start > self.downloaded:
413 # The data we just downloaded has been partially overwritten.
414 # Write the prefix of it that precedes the overwritten region.
415 self.f.seek(self.downloaded)
416 self.f.write(data[:(start - self.downloaded)])
418 # This merges consecutive overwrites if possible, which allows us to detect the
419 # case where the download can be stopped early because the remaining region
420 # to download has already been fully overwritten.
421 heapq.heappop(self.overwrites)
422 while len(self.overwrites) > 0:
423 (start1, end1) = self.overwrites[0]
427 heapq.heappop(self.overwrites)
429 if end >= next_downloaded:
430 # This overwrite extends past the downloaded data, so there is no
431 # more data to consider on this call.
432 heapq.heappush(self.overwrites, (next_downloaded, end))
433 self._update_downloaded(next_downloaded)
435 elif end >= self.downloaded:
436 data = data[(end - self.downloaded):]
437 self._update_downloaded(end)
439 self.f.seek(self.downloaded)
441 self._update_downloaded(next_downloaded)
443 def _update_downloaded(self, new_downloaded):
444 self.downloaded = new_downloaded
445 milestone = new_downloaded
446 if len(self.overwrites) > 0:
447 (start, end) = self.overwrites[0]
448 if start <= new_downloaded and end > milestone:
451 while len(self.milestones) > 0:
452 (next, d) = self.milestones[0]
455 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
456 heapq.heappop(self.milestones)
457 eventually_callback(d)(None)
459 if milestone >= self.download_size:
462 def overwrite(self, offset, data):
463 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
464 if offset > self.download_size and offset > self.current_size:
465 # Normally writing at an offset beyond the current end-of-file
466 # would leave a hole that appears filled with zeroes. However, an
467 # EncryptedTemporaryFile doesn't behave like that (if there is a
468 # hole in the file on disk, the zeroes that are read back will be
469 # XORed with the keystream). So we must explicitly write zeroes in
470 # the gap between the current EOF and the offset.
472 self.f.seek(self.current_size)
473 self.f.write("\x00" * (offset - self.current_size))
477 end = offset + len(data)
478 self.current_size = max(self.current_size, end)
479 if end > self.downloaded:
480 heapq.heappush(self.overwrites, (offset, end))
482 def read(self, offset, length):
483 """When the data has been read, callback the Deferred that we return with this data.
484 Otherwise errback the Deferred that we return.
485 The caller must perform no more overwrites until the Deferred has fired."""
487 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
488 if offset >= self.current_size:
489 def _eof(): raise EOFError("read past end of file")
490 return defer.execute(_eof)
492 if offset + length > self.current_size:
493 length = self.current_size - offset
494 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
496 needed = min(offset + length, self.download_size)
497 d = self.when_reached(needed)
499 # It is not necessarily the case that self.downloaded >= needed, because
500 # the file might have been truncated (thus truncating the download) and
503 assert self.current_size >= offset + length, (self.current_size, offset, length)
504 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
506 return self.f.read(length)
507 d.addCallback(_reached)
510 def when_reached(self, index):
511 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
512 if index <= self.downloaded: # already reached
513 if noisy: self.log("already reached %r" % (index,), level=NOISY)
514 return defer.succeed(None)
517 if noisy: self.log("reached %r" % (index,), level=NOISY)
519 d.addCallback(_reached)
520 heapq.heappush(self.milestones, (index, d))
527 while len(self.milestones) > 0:
528 (next, d) = self.milestones[0]
529 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
530 heapq.heappop(self.milestones)
531 # The callback means that the milestone has been reached if
532 # it is ever going to be. Note that the file may have been
533 # truncated to before the milestone.
534 eventually_callback(d)(None)
536 # FIXME: causes spurious failures
537 #self.unregisterProducer()
543 def unregisterProducer(self):
545 self.producer.stopProducing()
549 SIZE_THRESHOLD = 1000
551 def _make_sftp_file(close_notify, check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
552 if noisy: logmsg("_make_sftp_file(%r, %r, %r, <convergence censored>, parent=%r, childname=%r, filenode=%r, metadata=%r" %
553 (close_notify, check_abort, flags, parent, childname, filenode, metadata), level=NOISY)
555 assert metadata is None or 'readonly' in metadata, metadata
556 if not (flags & (FXF_WRITE | FXF_CREAT)) and (flags & FXF_READ) and filenode and \
557 not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
558 return ShortReadOnlySFTPFile(filenode, metadata)
560 return GeneralSFTPFile(close_notify, check_abort, flags, convergence,
561 parent=parent, childname=childname, filenode=filenode, metadata=metadata)
564 class ShortReadOnlySFTPFile(PrefixingLogMixin):
565 implements(ISFTPFile)
566 """I represent a file handle to a particular file on an SFTP connection.
567 I am used only for short immutable files opened in read-only mode.
568 The file contents are downloaded to memory when I am created."""
570 def __init__(self, filenode, metadata):
571 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
572 if noisy: self.log(".__init__(%r, %r)" % (filenode, metadata), level=NOISY)
574 assert IFileNode.providedBy(filenode), filenode
575 self.filenode = filenode
576 self.metadata = metadata
577 self.async = download_to_data(filenode)
580 def readChunk(self, offset, length):
581 request = ".readChunk(%r, %r)" % (offset, length)
582 self.log(request, level=OPERATIONAL)
585 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
586 return defer.execute(_closed)
590 if noisy: self.log("_read(%r) in readChunk(%r, %r)" % (data, offset, length), level=NOISY)
592 # "In response to this request, the server will read as many bytes as it
593 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
594 # message. If an error occurs or EOF is encountered before reading any
595 # data, the server will respond with SSH_FXP_STATUS. For normal disk
596 # files, it is guaranteed that this will read the specified number of
597 # bytes, or up to end of file."
599 # i.e. we respond with an EOF error iff offset is already at EOF.
601 if offset >= len(data):
602 eventually_errback(d)(SFTPError(FX_EOF, "read at or past end of file"))
604 eventually_callback(d)(data[offset:min(offset+length, len(data))])
606 self.async.addCallbacks(_read, eventually_errback(d))
607 d.addBoth(_convert_error, request)
610 def writeChunk(self, offset, data):
611 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
613 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
614 return defer.execute(_denied)
617 self.log(".close()", level=OPERATIONAL)
620 return defer.succeed(None)
623 request = ".getAttrs()"
624 self.log(request, level=OPERATIONAL)
627 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
628 return defer.execute(_closed)
630 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
631 d.addBoth(_convert_error, request)
634 def setAttrs(self, attrs):
635 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
636 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
637 return defer.execute(_denied)
640 class GeneralSFTPFile(PrefixingLogMixin):
641 implements(ISFTPFile)
642 """I represent a file handle to a particular file on an SFTP connection.
643 I wrap an instance of OverwriteableFileConsumer, which is responsible for
644 storing the file contents. In order to allow write requests to be satisfied
645 immediately, there is effectively a FIFO queue between requests made to this
646 file handle, and requests to my OverwriteableFileConsumer. This queue is
647 implemented by the callback chain of self.async."""
649 def __init__(self, close_notify, check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
650 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
651 if noisy: self.log(".__init__(%r, %r, %r, <convergence censored>, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
652 (close_notify, check_abort, flags, parent, childname, filenode, metadata), level=NOISY)
654 self.close_notify = close_notify
655 self.check_abort = check_abort
657 self.convergence = convergence
659 self.childname = childname
660 self.filenode = filenode
661 self.metadata = metadata
662 self.async = defer.succeed(None)
665 # self.consumer should only be relied on in callbacks for self.async, since it might
666 # not be set before then.
668 tempfile_maker = EncryptedTemporaryFile
670 if (flags & FXF_TRUNC) or not filenode:
671 # We're either truncating or creating the file, so we don't need the old contents.
672 self.consumer = OverwriteableFileConsumer(self.check_abort, 0, tempfile_maker)
673 self.consumer.finish()
675 assert IFileNode.providedBy(filenode), filenode
677 # TODO: use download interface described in #993 when implemented.
678 if filenode.is_mutable():
679 self.async.addCallback(lambda ign: filenode.download_best_version())
680 def _downloaded(data):
681 self.consumer = OverwriteableFileConsumer(self.check_abort, len(data), tempfile_maker)
682 self.consumer.write(data)
683 self.consumer.finish()
685 self.async.addCallback(_downloaded)
687 download_size = filenode.get_size()
688 assert download_size is not None, "download_size is None"
689 self.consumer = OverwriteableFileConsumer(self.check_abort, download_size, tempfile_maker)
691 if noisy: self.log("_read immutable", level=NOISY)
692 filenode.read(self.consumer, 0, None)
693 self.async.addCallback(_read)
695 if noisy: logmsg("__init__ done", level=NOISY)
697 def readChunk(self, offset, length):
698 request = ".readChunk(%r, %r)" % (offset, length)
699 self.log(request, level=OPERATIONAL)
701 if not (self.flags & FXF_READ):
702 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
703 return defer.execute(_denied)
706 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
707 return defer.execute(_closed)
711 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
712 d2 = self.consumer.read(offset, length)
713 d2.addErrback(_convert_error, request)
714 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
715 # It is correct to drop d2 here.
717 self.async.addCallbacks(_read, eventually_errback(d))
718 d.addBoth(_convert_error, request)
721 def writeChunk(self, offset, data):
722 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
724 if not (self.flags & FXF_WRITE):
725 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
726 return defer.execute(_denied)
729 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
730 return defer.execute(_closed)
732 # Note that we return without waiting for the write to occur. Reads and
733 # close wait for prior writes, and will fail if any prior operation failed.
734 # This is ok because SFTP makes no guarantee that the request completes
735 # before the write. In fact it explicitly allows write errors to be delayed
737 # "One should note that on some server platforms even a close can fail.
738 # This can happen e.g. if the server operating system caches writes,
739 # and an error occurs while flushing cached writes during the close."
742 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
743 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
744 # FXF_APPEND means that we should always write at the current end of file.
745 write_offset = offset
746 if self.flags & FXF_APPEND:
747 write_offset = self.consumer.get_current_size()
749 self.consumer.overwrite(write_offset, data)
750 if noisy: self.log("overwrite done", level=NOISY)
752 self.async.addCallback(_write)
753 # don't addErrback to self.async, just allow subsequent async ops to fail.
754 return defer.succeed(None)
758 self.log(request, level=OPERATIONAL)
761 return defer.succeed(None)
763 # This means that close has been called, not that the close has succeeded.
766 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
767 return defer.execute(self.consumer.close)
770 d2 = self.consumer.when_done()
771 if self.filenode and self.filenode.is_mutable():
772 d2.addCallback(lambda ign: self.consumer.get_current_size())
773 d2.addCallback(lambda size: self.consumer.read(0, size))
774 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
775 elif (self.flags & FXF_EXCL) and self.consumer.get_current_size() == 0:
776 # The file will already have been written by the open call, so we can
777 # optimize out the extra directory write (useful for zero-length lockfiles).
781 self.log("_add_file childname=%r" % (self.childname,), level=OPERATIONAL)
782 u = FileHandle(self.consumer.get_file(), self.convergence)
783 return self.parent.add_file(self.childname, u)
784 d2.addCallback(_add_file)
786 d2.addCallback(lambda ign: self.consumer.close())
788 self.async.addCallback(_close)
791 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
797 d.addBoth(_convert_error, request)
801 request = ".getAttrs()"
802 self.log(request, level=OPERATIONAL)
805 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
806 return defer.execute(_closed)
808 # Optimization for read-only handles, when we already know the metadata.
809 if not(self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
810 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
814 # self.filenode might be None, but that's ok.
815 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
816 eventually_callback(d)(attrs)
818 self.async.addCallbacks(_get, eventually_errback(d))
819 d.addBoth(_convert_error, request)
822 def setAttrs(self, attrs):
823 request = ".setAttrs(attrs) %r" % (attrs,)
824 self.log(request, level=OPERATIONAL)
826 if not (self.flags & FXF_WRITE):
827 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
828 return defer.execute(_denied)
831 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
832 return defer.execute(_closed)
834 if not "size" in attrs:
835 return defer.succeed(None)
838 if not isinstance(size, (int, long)) or size < 0:
839 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
840 return defer.execute(_bad)
844 self.consumer.set_current_size(size)
845 eventually_callback(d)(None)
847 self.async.addCallbacks(_resize, eventually_errback(d))
848 d.addBoth(_convert_error, request)
853 def __init__(self, items):
863 def __init__(self, value):
867 global_open_files = {}
869 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
870 implements(ISFTPServer)
871 def __init__(self, client, rootnode, username):
872 ConchUser.__init__(self)
873 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
874 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
876 self.channelLookup["session"] = session.SSHSession
877 self.subsystemLookup["sftp"] = FileTransferServer
879 self._client = client
880 self._root = rootnode
881 self._username = username
882 self._convergence = client.convergence
883 self._logged_out = False
884 self._open_files = {}
886 def add_open_file(self, canonpath):
887 if canonpath in self._open_files:
888 count = self._open_files[canonpath]
889 self._open_files[canonpath] = count+1
891 self._open_files[canonpath] = 1
893 if canonpath in global_open_files:
894 (gcount, times) = global_open_files[canonpath]
895 global_open_files[canonpath] = (gcount+1, times)
897 global_open_files[canonpath] = (1, time())
899 def remove_open_file(self, canonpath):
900 if not self._logged_out:
901 assert canonpath in self._open_files, (canonpath, self._open_files)
902 count = self._open_files[canonpath]
904 self._open_files[canonpath] = count-1
906 del self._open_files[canonpath]
908 assert canonpath in global_open_files, (canonpath, global_open_files)
909 (gcount, times) = global_open_files[canonpath]
911 global_open_files[canonpath] = (gcount-1, times)
913 del global_open_files[canonpath]
916 if not self._logged_out:
917 self._logged_out = True
918 for canonpath in self._open_files:
919 assert canonpath in global_open_files, (canonpath, global_open_files)
920 count = self._open_files[canonpath]
921 (gcount, times) = global_open_files[canonpath]
923 global_open_files[canonpath] = (gcount - count, times)
925 del global_open_files[canonpath]
927 def check_abort(self):
928 return self._logged_out
930 def gotVersion(self, otherVersion, extData):
931 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
934 def openFile(self, pathstring, flags, attrs):
935 request = ".openFile(%r, %r = %r, %r)" % (pathstring, flags, _repr_flags(flags), attrs)
936 self.log(request, level=OPERATIONAL)
938 # This is used for both reading and writing.
939 # First exclude invalid combinations of flags.
941 # /usr/bin/sftp 'get' gives us FXF_READ, while 'put' on a new file
942 # gives FXF_WRITE | FXF_CREAT | FXF_TRUNC. I'm guessing that 'put' on an
943 # existing file gives the same.
945 if not (flags & (FXF_READ | FXF_WRITE)):
946 raise SFTPError(FX_BAD_MESSAGE,
947 "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
949 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
950 raise SFTPError(FX_BAD_MESSAGE,
951 "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
953 path = self._path_from_string(pathstring)
955 raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
957 canonpath = u"/" + u"/".join(path)
959 # The combination of flags is potentially valid. Now there are two major cases:
961 # 1. The path is specified as /uri/FILECAP, with no parent directory.
962 # If the FILECAP is mutable and writeable, then we can open it in write-only
963 # or read/write mode (non-exclusively), otherwise we can only open it in
964 # read-only mode. The open should succeed immediately as long as FILECAP is
965 # a valid known filecap that grants the required permission.
967 # 2. The path is specified relative to a parent. We find the parent dirnode and
968 # get the child's URI and metadata if it exists. There are four subcases:
969 # a. the child does not exist: FXF_CREAT must be set, and we must be able
970 # to write to the parent directory.
971 # b. the child exists but is not a valid known filecap: fail
972 # c. the child is mutable: if we are trying to open it write-only or
973 # read/write, then we must be able to write to the file.
974 # d. the child is immutable: if we are trying to open it write-only or
975 # read/write, then we must be able to write to the parent directory.
977 # To reduce latency, open succeeds as soon as these conditions are met, even
978 # though there might be a failure in downloading the existing file or uploading
981 # Note that the permission checks below are for more precise error reporting on
982 # the open call; later operations would fail even if we did not make these checks.
984 d = self._get_root(path)
985 def _got_root( (root, path) ):
986 if root.is_unknown():
987 raise SFTPError(FX_PERMISSION_DENIED,
988 "cannot open an unknown cap (or child of an unknown directory). "
989 "Upgrading the gateway to a later Tahoe-LAFS version may help")
992 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
993 if not IFileNode.providedBy(root):
994 raise SFTPError(FX_PERMISSION_DENIED,
995 "cannot open a directory cap")
996 if (flags & FXF_WRITE) and root.is_readonly():
997 raise SFTPError(FX_PERMISSION_DENIED,
998 "cannot write to a non-writeable filecap without a parent directory")
1000 raise SFTPError(FX_FAILURE,
1001 "cannot create a file exclusively when it already exists")
1003 return _make_sftp_file(lambda: None, self.check_abort, flags, self._convergence, filenode=root)
1006 childname = path[-1]
1007 if noisy: self.log("case 2: root = %r, childname = %r, path[:-1] = %r" %
1008 (root, childname, path[:-1]), level=NOISY)
1009 d2 = root.get_child_at_path(path[:-1])
1010 def _got_parent(parent):
1011 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
1012 if parent.is_unknown():
1013 raise SFTPError(FX_PERMISSION_DENIED,
1014 "cannot open an unknown cap (or child of an unknown directory). "
1015 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1017 parent_readonly = parent.is_readonly()
1018 d3 = defer.succeed(None)
1019 if flags & FXF_EXCL:
1020 # FXF_EXCL means that the link to the file (not the file itself) must
1021 # be created atomically wrt updates by this storage client.
1022 # That is, we need to create the link before returning success to the
1023 # SFTP open request (and not just on close, as would normally be the
1024 # case). We make the link initially point to a zero-length LIT file,
1025 # which is consistent with what might happen on a POSIX filesystem.
1028 raise SFTPError(FX_FAILURE,
1029 "cannot create a file exclusively when the parent directory is read-only")
1031 # 'overwrite=False' ensures failure if the link already exists.
1032 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
1034 zero_length_lit = "URI:LIT:"
1035 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
1036 (parent, zero_length_lit, childname), level=NOISY)
1037 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, overwrite=False))
1038 def _seturi_done(child):
1039 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
1040 d4 = parent.get_metadata_for(childname)
1041 d4.addCallback(lambda metadata: (child, metadata))
1043 d3.addCallback(_seturi_done)
1045 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
1046 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
1048 def _got_child( (filenode, metadata) ):
1049 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, metadata), level=NOISY)
1051 if filenode.is_unknown():
1052 raise SFTPError(FX_PERMISSION_DENIED,
1053 "cannot open an unknown cap. Upgrading the gateway "
1054 "to a later Tahoe-LAFS version may help")
1055 if not IFileNode.providedBy(filenode):
1056 raise SFTPError(FX_PERMISSION_DENIED,
1057 "cannot open a directory as if it were a file")
1058 if (flags & FXF_WRITE) and filenode.is_mutable() and filenode.is_readonly():
1059 raise SFTPError(FX_PERMISSION_DENIED,
1060 "cannot open a read-only mutable file for writing")
1061 if (flags & FXF_WRITE) and parent_readonly:
1062 raise SFTPError(FX_PERMISSION_DENIED,
1063 "cannot open a file for writing when the parent directory is read-only")
1065 metadata['readonly'] = _is_readonly(parent_readonly, filenode)
1066 return _make_sftp_file(lambda: None, self.check_abort, flags, self._convergence, parent=parent,
1067 childname=childname, filenode=filenode, metadata=metadata)
1069 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1070 f.trap(NoSuchChildError)
1072 if not (flags & FXF_CREAT):
1073 raise SFTPError(FX_NO_SUCH_FILE,
1074 "the file does not exist, and was not opened with the creation (CREAT) flag")
1076 raise SFTPError(FX_PERMISSION_DENIED,
1077 "cannot create a file when the parent directory is read-only")
1079 file = _make_sftp_file(lambda: self.remove_open_file(canonpath),
1080 self.check_abort, flags, self._convergence, parent=parent,
1081 childname=childname)
1082 self.add_open_file(canonpath)
1084 d3.addCallbacks(_got_child, _no_child)
1087 d2.addCallback(_got_parent)
1090 d.addCallback(_got_root)
1091 d.addBoth(_convert_error, request)
1094 def renameFile(self, oldpathstring, newpathstring):
1095 request = ".renameFile(%r, %r)" % (oldpathstring, newpathstring)
1096 self.log(request, level=OPERATIONAL)
1098 fromPath = self._path_from_string(oldpathstring)
1099 toPath = self._path_from_string(newpathstring)
1101 # the target directory must already exist
1102 d = deferredutil.gatherResults([self._get_parent_or_node(fromPath),
1103 self._get_parent_or_node(toPath)])
1104 def _got( (fromPair, toPair) ):
1105 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r)" %
1106 (fromPair, toPair, oldpathstring, newpathstring), level=NOISY)
1107 (fromParent, fromChildname) = fromPair
1108 (toParent, toChildname) = toPair
1110 if fromChildname is None:
1111 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1112 if toChildname is None:
1113 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1115 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1116 # "It is an error if there already exists a file with the name specified
1118 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1119 d = fromParent.move_child_to(fromChildname, toParent, toChildname, overwrite=False)
1120 #d = parent.move_child_to_path(fromChildname, toRoot, toPath[:-1],
1121 # toPath[-1], overwrite=False)
1124 d.addBoth(_convert_error, request)
1127 def makeDirectory(self, pathstring, attrs):
1128 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1129 self.log(request, level=OPERATIONAL)
1131 path = self._path_from_string(pathstring)
1132 metadata = self._attrs_to_metadata(attrs)
1133 d = self._get_root(path)
1134 d.addCallback(lambda (root, path):
1135 self._get_or_create_directories(root, path, metadata))
1136 d.addBoth(_convert_error, request)
1139 def _get_or_create_directories(self, node, path, metadata):
1140 if not IDirectoryNode.providedBy(node):
1141 # TODO: provide the name of the blocking file in the error message.
1142 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1143 "is a file in the way") # close enough
1144 return defer.execute(_blocked)
1147 return defer.succeed(node)
1148 d = node.get(path[0])
1149 def _maybe_create(f):
1150 f.trap(NoSuchChildError)
1151 return node.create_subdirectory(path[0])
1152 d.addErrback(_maybe_create)
1153 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1156 def removeFile(self, pathstring):
1157 request = ".removeFile(%r)" % (pathstring,)
1158 self.log(request, level=OPERATIONAL)
1160 path = self._path_from_string(pathstring)
1161 d = self._remove_object(path, must_be_file=True)
1162 d.addBoth(_convert_error, request)
1165 def removeDirectory(self, pathstring):
1166 request = ".removeDirectory(%r)" % (pathstring,)
1167 self.log(request, level=OPERATIONAL)
1169 path = self._path_from_string(pathstring)
1170 d = self._remove_object(path, must_be_directory=True)
1171 d.addBoth(_convert_error, request)
1174 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1175 d = defer.maybeDeferred(self._get_parent_or_node, path)
1176 def _got_parent( (parent, childname) ):
1177 # FIXME (minor): there is a race condition between the 'get' and 'delete',
1178 # so it is possible that the must_be_directory or must_be_file restrictions
1179 # might not be enforced correctly if the type has just changed.
1181 if childname is None:
1182 raise SFTPError(FX_NO_SUCH_FILE, "cannot delete an object specified by URI")
1184 d2 = parent.get(childname)
1185 def _got_child(child):
1186 # Unknown children can be removed by either removeFile or removeDirectory.
1187 if must_be_directory and IFileNode.providedBy(child):
1188 raise SFTPError(FX_PERMISSION_DENIED, "rmdir called on a file")
1189 if must_be_file and IDirectoryNode.providedBy(child):
1190 raise SFTPError(FX_PERMISSION_DENIED, "rmfile called on a directory")
1191 return parent.delete(childname)
1192 d2.addCallback(_got_child)
1194 d.addCallback(_got_parent)
1197 def openDirectory(self, pathstring):
1198 request = ".openDirectory(%r)" % (pathstring,)
1199 self.log(request, level=OPERATIONAL)
1201 path = self._path_from_string(pathstring)
1202 d = self._get_node_and_metadata_for_path(path)
1203 def _list( (dirnode, metadata) ):
1204 if dirnode.is_unknown():
1205 raise SFTPError(FX_PERMISSION_DENIED,
1206 "cannot list an unknown cap as a directory. Upgrading the gateway "
1207 "to a later Tahoe-LAFS version may help")
1208 if not IDirectoryNode.providedBy(dirnode):
1209 raise SFTPError(FX_PERMISSION_DENIED,
1210 "cannot list a file as if it were a directory")
1213 def _render(children):
1214 parent_readonly = dirnode.is_readonly()
1216 for filename, (child, metadata) in children.iteritems():
1217 # The file size may be cached or absent.
1218 metadata['readonly'] = _is_readonly(parent_readonly, child)
1219 attrs = _populate_attrs(child, metadata)
1220 filename_utf8 = filename.encode('utf-8')
1221 longname = _lsLine(filename_utf8, attrs)
1222 results.append( (filename_utf8, longname, attrs) )
1223 return StoppableList(results)
1224 d2.addCallback(_render)
1226 d.addCallback(_list)
1227 d.addBoth(_convert_error, request)
1230 def getAttrs(self, pathstring, followLinks):
1231 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1232 self.log(request, level=OPERATIONAL)
1234 path = self._path_from_string(pathstring)
1235 canonpath = u"/" + u"/".join(path)
1237 d = self._get_node_and_metadata_for_path(path)
1238 def _render( (node, metadata) ):
1239 # When asked about a specific file, report its current size.
1240 # TODO: the modification time for a mutable file should be
1241 # reported as the update time of the best version. But that
1242 # information isn't currently stored in mutable shares, I think.
1244 d2 = node.get_current_size()
1245 d2.addCallback(lambda size: _populate_attrs(node, metadata, size=size))
1248 err.trap(NoSuchChildError)
1249 if canonpath in global_open_files:
1250 (count, times) = global_open_files[canonpath]
1251 # A file that has been opened for creation necessarily has permissions rw-rw-rw-.
1252 return {'permissions': S_IFREG | 0666,
1254 'createtime': times,
1260 d.addCallbacks(_render, _noexist)
1261 d.addBoth(_convert_error, request)
1264 def setAttrs(self, pathstring, attrs):
1265 self.log(".setAttrs(%r, %r)" % (pathstring, attrs), level=OPERATIONAL)
1268 # this would require us to download and re-upload the truncated/extended
1270 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute")
1271 return defer.execute(_unsupported)
1272 return defer.succeed(None)
1274 def readLink(self, pathstring):
1275 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1277 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1278 return defer.execute(_unsupported)
1280 def makeLink(self, linkPathstring, targetPathstring):
1281 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1283 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1284 return defer.execute(_unsupported)
1286 def extendedRequest(self, extendedName, extendedData):
1287 self.log(".extendedRequest(%r, %r)" % (extendedName, extendedData), level=OPERATIONAL)
1289 if extendedName == 'statvfs@openssh.com' or extendedName == 'fstatvfs@openssh.com':
1290 # <http://dev.libssh.org/ticket/11>
1291 return defer.succeed(struct.pack('>11Q',
1292 1024, # uint64 f_bsize /* file system block size */
1293 1024, # uint64 f_frsize /* fundamental fs block size */
1294 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1295 314159265, # uint64 f_bfree /* free blocks in file system */
1296 314159265, # uint64 f_bavail /* free blocks for non-root */
1297 200000000, # uint64 f_files /* total file inodes */
1298 100000000, # uint64 f_ffree /* free file inodes */
1299 100000000, # uint64 f_favail /* free file inodes for non-root */
1300 0x1AF5, # uint64 f_fsid /* file system id */
1301 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1302 65535, # uint64 f_namemax /* maximum filename length */
1305 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "extendedRequest %r" % extendedName)
1306 return defer.execute(_unsupported)
1308 def realPath(self, pathstring):
1309 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1311 path_utf8 = [p.encode('utf-8') for p in self._path_from_string(pathstring)]
1312 return "/" + "/".join(path_utf8)
1314 def _path_from_string(self, pathstring):
1315 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1317 # The home directory is the root directory.
1318 pathstring = pathstring.strip("/")
1319 if pathstring == "" or pathstring == ".":
1322 path_utf8 = pathstring.split("/")
1324 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1325 # "Servers SHOULD interpret a path name component ".." as referring to
1326 # the parent directory, and "." as referring to the current directory."
1328 for p_utf8 in path_utf8:
1330 # ignore excess .. components at the root
1335 p = p_utf8.decode('utf-8', 'strict')
1336 except UnicodeError:
1337 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1340 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1343 def _get_root(self, path):
1344 # return Deferred (root, remaining_path)
1345 if path and path[0] == u"uri":
1346 d = defer.maybeDeferred(self._client.create_node_from_uri, path[1].encode('utf-8'))
1347 d.addCallback(lambda root: (root, path[2:]))
1349 d = defer.succeed((self._root, path))
1352 def _get_parent_or_node(self, path):
1353 # return Deferred (parent, childname) or (node, None)
1354 d = self._get_root(path)
1355 def _got_root( (root, remaining_path) ):
1356 if not remaining_path:
1359 d2 = root.get_child_at_path(remaining_path[:-1])
1360 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1362 d.addCallback(_got_root)
1365 def _get_node_and_metadata_for_path(self, path):
1366 # return Deferred (node, metadata)
1367 # where metadata always has a 'readonly' key
1368 d = self._get_parent_or_node(path)
1369 def _got_parent_or_node( (parent_or_node, childname) ):
1370 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1371 if childname is None:
1372 node = parent_or_node
1373 return (node, {'readonly': node.is_unknown() or node.is_readonly()})
1375 parent = parent_or_node
1376 d2 = parent.get_child_and_metadata_at_path([childname])
1377 def _got( (child, metadata) ):
1378 assert IDirectoryNode.providedBy(parent), parent
1379 metadata['readonly'] = _is_readonly(parent.is_readonly(), child)
1380 return (child, metadata)
1381 d2.addCallback(_got)
1383 d.addCallback(_got_parent_or_node)
1386 def _attrs_to_metadata(self, attrs):
1390 if key == "mtime" or key == "ctime" or key == "createtime":
1391 metadata[key] = long(attrs[key])
1392 elif key.startswith("ext_"):
1393 metadata[key] = str(attrs[key])
1398 class SFTPUser(ConchUser, PrefixingLogMixin):
1399 implements(ISession)
1400 def __init__(self, check_abort, client, rootnode, username, convergence):
1401 ConchUser.__init__(self)
1402 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1404 self.channelLookup["session"] = session.SSHSession
1405 self.subsystemLookup["sftp"] = FileTransferServer
1407 self.check_abort = check_abort
1408 self.client = client
1409 self.root = rootnode
1410 self.username = username
1411 self.convergence = convergence
1413 def getPty(self, terminal, windowSize, attrs):
1414 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1415 raise NotImplementedError
1417 def openShell(self, protocol):
1418 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1419 raise NotImplementedError
1421 def execCommand(self, protocol, cmd):
1422 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1423 raise NotImplementedError
1425 def windowChanged(self, newWindowSize):
1426 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1429 self.log(".eofReceived()", level=OPERATIONAL)
1432 self.log(".closed()", level=OPERATIONAL)
1435 # if you have an SFTPUser, and you want something that provides ISFTPServer,
1436 # then you get SFTPHandler(user)
1437 components.registerAdapter(SFTPHandler, SFTPUser, ISFTPServer)
1439 from auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1442 implements(portal.IRealm)
1443 def __init__(self, client):
1444 self._client = client
1446 def requestAvatar(self, avatarID, mind, interface):
1447 assert interface == IConchUser, interface
1448 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1449 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1450 return (interface, handler, handler.logout)
1453 class SFTPServer(service.MultiService):
1454 def __init__(self, client, accountfile, accounturl,
1455 sftp_portstr, pubkey_file, privkey_file):
1456 service.MultiService.__init__(self)
1458 r = Dispatcher(client)
1459 p = portal.Portal(r)
1462 c = AccountFileChecker(self, accountfile)
1463 p.registerChecker(c)
1465 c = AccountURLChecker(self, accounturl)
1466 p.registerChecker(c)
1467 if not accountfile and not accounturl:
1468 # we could leave this anonymous, with just the /uri/CAP form
1469 raise NeedRootcapLookupScheme("must provide an account file or URL")
1471 pubkey = keys.Key.fromFile(pubkey_file)
1472 privkey = keys.Key.fromFile(privkey_file)
1473 class SSHFactory(factory.SSHFactory):
1474 publicKeys = {pubkey.sshType(): pubkey}
1475 privateKeys = {privkey.sshType(): privkey}
1476 def getPrimes(self):
1478 # if present, this enables diffie-hellman-group-exchange
1479 return primes.parseModuliFile("/etc/ssh/moduli")
1486 s = strports.service(sftp_portstr, f)
1487 s.setServiceParent(self)