2 import os, tempfile, heapq, binascii, traceback, array, stat, struct
3 from stat import S_IFREG, S_IFDIR
4 from time import time, strftime, localtime
6 from zope.interface import implements
7 from twisted.python import components
8 from twisted.application import service, strports
9 from twisted.conch.ssh import factory, keys, session
10 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
11 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
12 FX_BAD_MESSAGE, FX_FAILURE
13 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
14 FXF_CREAT, FXF_TRUNC, FXF_EXCL
15 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
16 from twisted.conch.avatar import ConchUser
17 from twisted.conch.openssh_compat import primes
18 from twisted.cred import portal
19 from twisted.internet.error import ProcessDone, ProcessTerminated
20 from twisted.python.failure import Failure
21 from twisted.internet.interfaces import ITransport
23 from twisted.internet import defer
24 from twisted.internet.interfaces import IFinishableConsumer
25 from foolscap.api import eventually
26 from allmydata.util import deferredutil
28 from allmydata.util.consumer import download_to_data
29 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
31 from allmydata.mutable.common import NotWriteableError
32 from allmydata.immutable.upload import FileHandle
34 from pycryptopp.cipher.aes import AES
36 # twisted.conch.ssh.filetransfer generates this warning, but not when it is imported,
39 warnings.filterwarnings("ignore", category=DeprecationWarning,
40 message="BaseException.message has been deprecated as of Python 2.6",
41 module=".*filetransfer", append=True)
44 use_foolscap_logging = True
46 from allmydata.util.log import NOISY, OPERATIONAL, SCARY
48 if use_foolscap_logging:
49 from allmydata.util.log import msg as logmsg, err as logerr, PrefixingLogMixin
51 def logmsg(s, level=None):
53 def logerr(s, level=None):
55 class PrefixingLogMixin:
56 def __init__(self, facility=None):
58 def log(self, s, level=None):
62 def eventually_callback(d):
63 s = traceback.format_stack()
66 if noisy: logmsg("CALLBACK %r" % (d,), level=NOISY)
68 except: # pragma: no cover
69 logerr("Failed to callback %r with %r\n"
70 "Original stack:\n!%s" %
71 (d, res, '!'.join(s)), level=SCARY)
74 return lambda res: eventually(_cb, res)
76 def eventually_errback(d):
77 s = traceback.format_stack()
80 if noisy: logmsg("ERRBACK %r %r" % (d, err), level=NOISY)
82 except: # pragma: no cover
83 logerr("Failed to errback %r with %r\n"
84 "Original stack:\n!%s" %
85 (d, err, '!'.join(s)), level=SCARY)
88 return lambda err: eventually(_eb, err)
90 def eventually_callback(d):
91 return lambda res: eventually(d.callback, res)
93 def eventually_errback(d):
94 return lambda err: eventually(d.errback, err)
97 def _convert_error(res, request):
98 if not isinstance(res, Failure):
100 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
101 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
105 logmsg("RAISE %r %r" % (request, err,), level=OPERATIONAL)
106 if noisy and not use_foolscap_logging: traceback.print_exc(err)
108 # The message argument to SFTPError must not reveal information that
109 # might compromise anonymity.
111 if err.check(SFTPError):
112 # original raiser of SFTPError has responsibility to ensure anonymity
114 if err.check(NoSuchChildError):
115 childname = err.value.args[0].encode('utf-8')
116 raise SFTPError(FX_NO_SUCH_FILE, childname)
117 if err.check(NotWriteableError):
118 msg = err.value.args[0].encode('utf-8')
119 raise SFTPError(FX_PERMISSION_DENIED, msg)
120 if err.check(ExistingChildError):
121 # Versions of SFTP after v3 (which is what twisted.conch implements)
122 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
123 # However v3 doesn't; instead, other servers such as sshd return
124 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
125 # to translate the error to the equivalent of POSIX EEXIST, which is
126 # necessary for some picky programs (such as gedit).
127 msg = err.value.args[0].encode('utf-8')
128 raise SFTPError(FX_FAILURE, msg)
129 if err.check(NotImplementedError):
130 raise SFTPError(FX_OP_UNSUPPORTED, str(err.value))
131 if err.check(EOFError):
132 raise SFTPError(FX_EOF, "end of file reached")
133 if err.check(defer.FirstError):
134 _convert_error(err.value.subFailure, request)
136 # We assume that the error message is not anonymity-sensitive.
137 raise SFTPError(FX_FAILURE, str(err.value))
140 def _repr_flags(flags):
141 return "|".join([f for f in
142 [(flags & FXF_READ) and "FXF_READ" or None,
143 (flags & FXF_WRITE) and "FXF_WRITE" or None,
144 (flags & FXF_APPEND) and "FXF_APPEND" or None,
145 (flags & FXF_CREAT) and "FXF_CREAT" or None,
146 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
147 (flags & FXF_EXCL) and "FXF_EXCL" or None,
152 def _lsLine(name, attrs):
155 st_mtime = attrs.get("mtime", 0)
156 st_mode = attrs["permissions"]
157 # TODO: check that clients are okay with this being a "?".
158 # (They should be because the longname is intended for human
160 st_size = attrs.get("size", "?")
161 # We don't know how many links there really are to this object.
164 # From <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
165 # We can't call the version in Twisted because we might have a version earlier than
166 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
169 perms = array.array('c', '-'*10)
170 ft = stat.S_IFMT(mode)
171 if stat.S_ISDIR(ft): perms[0] = 'd'
172 elif stat.S_ISCHR(ft): perms[0] = 'c'
173 elif stat.S_ISBLK(ft): perms[0] = 'b'
174 elif stat.S_ISREG(ft): perms[0] = '-'
175 elif stat.S_ISFIFO(ft): perms[0] = 'f'
176 elif stat.S_ISLNK(ft): perms[0] = 'l'
177 elif stat.S_ISSOCK(ft): perms[0] = 's'
180 if mode&stat.S_IRUSR: perms[1] = 'r'
181 if mode&stat.S_IWUSR: perms[2] = 'w'
182 if mode&stat.S_IXUSR: perms[3] = 'x'
184 if mode&stat.S_IRGRP: perms[4] = 'r'
185 if mode&stat.S_IWGRP: perms[5] = 'w'
186 if mode&stat.S_IXGRP: perms[6] = 'x'
188 if mode&stat.S_IROTH: perms[7] = 'r'
189 if mode&stat.S_IWOTH: perms[8] = 'w'
190 if mode&stat.S_IXOTH: perms[9] = 'x'
191 # suid/sgid never set
194 l += str(st_nlink).rjust(5) + ' '
202 sixmo = 60 * 60 * 24 * 7 * 26
203 if st_mtime + sixmo < time(): # last edited more than 6mo ago
204 l += strftime("%b %d %Y ", localtime(st_mtime))
206 l += strftime("%b %d %H:%M ", localtime(st_mtime))
211 def _is_readonly(parent_readonly, child):
212 """Whether child should be treated as having read-only permissions when listed
215 if child.is_unknown():
217 elif child.is_mutable():
218 return child.is_readonly()
220 return parent_readonly
223 def _populate_attrs(childnode, metadata, size=None):
226 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
227 # bits, otherwise the client may refuse to open a directory.
228 # Also, sshfs run as a non-root user requires files and directories
229 # to be world-readable/writeable.
231 # Directories and unknown nodes have no size, and SFTP doesn't
232 # require us to make one up.
234 # childnode might be None, meaning that the file doesn't exist yet,
235 # but we're going to write it later.
237 if childnode and childnode.is_unknown():
239 elif childnode and IDirectoryNode.providedBy(childnode):
240 perms = S_IFDIR | 0777
242 # For files, omit the size if we don't immediately know it.
243 if childnode and size is None:
244 size = childnode.get_size()
246 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
248 perms = S_IFREG | 0666
251 assert 'readonly' in metadata, metadata
252 if metadata['readonly']:
253 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
255 # see webapi.txt for what these times mean
256 if 'linkmotime' in metadata.get('tahoe', {}):
257 attrs['mtime'] = int(metadata['tahoe']['linkmotime'])
258 elif 'mtime' in metadata:
259 # We would prefer to omit atime, but SFTP version 3 can only
260 # accept mtime if atime is also set.
261 attrs['mtime'] = int(metadata['mtime'])
262 attrs['atime'] = attrs['mtime']
264 if 'linkcrtime' in metadata.get('tahoe', {}):
265 attrs['createtime'] = int(metadata['tahoe']['linkcrtime'])
267 if 'ctime' in metadata:
268 attrs['ctime'] = int(metadata['ctime'])
270 attrs['permissions'] = perms
272 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
273 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
278 class EncryptedTemporaryFile(PrefixingLogMixin):
279 # not implemented: next, readline, readlines, xreadlines, writelines
282 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
283 self.file = tempfile.TemporaryFile()
284 self.key = os.urandom(16) # AES-128
286 def _crypt(self, offset, data):
287 # FIXME: use random-access AES (pycryptopp ticket #18)
288 offset_big = offset // 16
289 offset_small = offset % 16
290 iv = binascii.unhexlify("%032x" % offset_big)
291 cipher = AES(self.key, iv=iv)
292 cipher.process("\x00"*offset_small)
293 return cipher.process(data)
301 def seek(self, offset, whence=os.SEEK_SET):
302 if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
303 self.file.seek(offset, whence)
306 offset = self.file.tell()
307 if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
310 def read(self, size=-1):
311 if noisy: self.log(".read(%r)" % (size,), level=NOISY)
312 index = self.file.tell()
313 ciphertext = self.file.read(size)
314 plaintext = self._crypt(index, ciphertext)
317 def write(self, plaintext):
318 if noisy: self.log(".write(<data of length %r>)" % (len(plaintext),), level=NOISY)
319 index = self.file.tell()
320 ciphertext = self._crypt(index, plaintext)
321 self.file.write(ciphertext)
323 def truncate(self, newsize):
324 if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
325 self.file.truncate(newsize)
328 class OverwriteableFileConsumer(PrefixingLogMixin):
329 implements(IFinishableConsumer)
330 """I act both as a consumer for the download of the original file contents, and as a
331 wrapper for a temporary file that records the downloaded data and any overwrites.
332 I use a priority queue to keep track of which regions of the file have been overwritten
333 but not yet downloaded, so that the download does not clobber overwritten data.
334 I use another priority queue to record milestones at which to make callbacks
335 indicating that a given number of bytes have been downloaded.
337 The temporary file reflects the contents of the file that I represent, except that:
338 - regions that have neither been downloaded nor overwritten, if present,
340 - the temporary file may be shorter than the represented file (it is never longer).
341 The latter's current size is stored in self.current_size.
343 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
344 useful for other frontends."""
346 def __init__(self, check_abort, download_size, tempfile_maker):
347 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
348 if noisy: self.log(".__init__(%r, %r, %r)" % (check_abort, download_size, tempfile_maker), level=NOISY)
349 self.check_abort = check_abort
350 self.download_size = download_size
351 self.current_size = download_size
352 self.f = tempfile_maker()
354 self.milestones = [] # empty heap of (offset, d)
355 self.overwrites = [] # empty heap of (start, end)
356 self.done = self.when_reached(download_size) # adds a milestone
358 def _signal_done(ign):
359 if noisy: self.log("DONE", level=NOISY)
361 self.done.addCallback(_signal_done)
367 def get_current_size(self):
368 return self.current_size
370 def set_current_size(self, size):
371 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
372 (size, self.current_size, self.downloaded), level=NOISY)
373 if size < self.current_size or size < self.downloaded:
374 self.f.truncate(size)
375 self.current_size = size
376 if size < self.download_size:
377 self.download_size = size
378 if self.downloaded >= self.download_size:
381 def registerProducer(self, p, streaming):
382 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
385 # call resumeProducing once to start things off
394 def write(self, data):
395 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
396 if self.check_abort():
400 if self.downloaded >= self.download_size:
403 next_downloaded = self.downloaded + len(data)
404 if next_downloaded > self.download_size:
405 data = data[:(self.download_size - self.downloaded)]
407 while len(self.overwrites) > 0:
408 (start, end) = self.overwrites[0]
409 if start >= next_downloaded:
410 # This and all remaining overwrites are after the data we just downloaded.
412 if start > self.downloaded:
413 # The data we just downloaded has been partially overwritten.
414 # Write the prefix of it that precedes the overwritten region.
415 self.f.seek(self.downloaded)
416 self.f.write(data[:(start - self.downloaded)])
418 # This merges consecutive overwrites if possible, which allows us to detect the
419 # case where the download can be stopped early because the remaining region
420 # to download has already been fully overwritten.
421 heapq.heappop(self.overwrites)
422 while len(self.overwrites) > 0:
423 (start1, end1) = self.overwrites[0]
427 heapq.heappop(self.overwrites)
429 if end >= next_downloaded:
430 # This overwrite extends past the downloaded data, so there is no
431 # more data to consider on this call.
432 heapq.heappush(self.overwrites, (next_downloaded, end))
433 self._update_downloaded(next_downloaded)
435 elif end >= self.downloaded:
436 data = data[(end - self.downloaded):]
437 self._update_downloaded(end)
439 self.f.seek(self.downloaded)
441 self._update_downloaded(next_downloaded)
443 def _update_downloaded(self, new_downloaded):
444 self.downloaded = new_downloaded
445 milestone = new_downloaded
446 if len(self.overwrites) > 0:
447 (start, end) = self.overwrites[0]
448 if start <= new_downloaded and end > milestone:
451 while len(self.milestones) > 0:
452 (next, d) = self.milestones[0]
455 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
456 heapq.heappop(self.milestones)
457 eventually_callback(d)(None)
459 if milestone >= self.download_size:
462 def overwrite(self, offset, data):
463 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
464 if offset > self.download_size and offset > self.current_size:
465 # Normally writing at an offset beyond the current end-of-file
466 # would leave a hole that appears filled with zeroes. However, an
467 # EncryptedTemporaryFile doesn't behave like that (if there is a
468 # hole in the file on disk, the zeroes that are read back will be
469 # XORed with the keystream). So we must explicitly write zeroes in
470 # the gap between the current EOF and the offset.
472 self.f.seek(self.current_size)
473 self.f.write("\x00" * (offset - self.current_size))
477 end = offset + len(data)
478 self.current_size = max(self.current_size, end)
479 if end > self.downloaded:
480 heapq.heappush(self.overwrites, (offset, end))
482 def read(self, offset, length):
483 """When the data has been read, callback the Deferred that we return with this data.
484 Otherwise errback the Deferred that we return.
485 The caller must perform no more overwrites until the Deferred has fired."""
487 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
488 if offset >= self.current_size:
489 def _eof(): raise EOFError("read past end of file")
490 return defer.execute(_eof)
492 if offset + length > self.current_size:
493 length = self.current_size - offset
494 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
496 needed = min(offset + length, self.download_size)
497 d = self.when_reached(needed)
499 # It is not necessarily the case that self.downloaded >= needed, because
500 # the file might have been truncated (thus truncating the download) and
503 assert self.current_size >= offset + length, (self.current_size, offset, length)
504 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
506 return self.f.read(length)
507 d.addCallback(_reached)
510 def when_reached(self, index):
511 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
512 if index <= self.downloaded: # already reached
513 if noisy: self.log("already reached %r" % (index,), level=NOISY)
514 return defer.succeed(None)
517 if noisy: self.log("reached %r" % (index,), level=NOISY)
519 d.addCallback(_reached)
520 heapq.heappush(self.milestones, (index, d))
527 while len(self.milestones) > 0:
528 (next, d) = self.milestones[0]
529 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
530 heapq.heappop(self.milestones)
531 # The callback means that the milestone has been reached if
532 # it is ever going to be. Note that the file may have been
533 # truncated to before the milestone.
534 eventually_callback(d)(None)
536 # FIXME: causes spurious failures
537 #self.unregisterProducer()
543 def unregisterProducer(self):
545 self.producer.stopProducing()
549 SIZE_THRESHOLD = 1000
551 def _make_sftp_file(check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
552 if noisy: logmsg("_make_sftp_file(%r, %r, <convergence censored>, parent=%r, childname=%r, filenode=%r, metadata=%r" %
553 (check_abort, flags, parent, childname, filenode, metadata), level=NOISY)
555 assert metadata is None or 'readonly' in metadata, metadata
556 if not (flags & (FXF_WRITE | FXF_CREAT)) and (flags & FXF_READ) and filenode and \
557 not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
558 return ShortReadOnlySFTPFile(filenode, metadata)
560 return GeneralSFTPFile(check_abort, flags, convergence,
561 parent=parent, childname=childname, filenode=filenode, metadata=metadata)
564 class ShortReadOnlySFTPFile(PrefixingLogMixin):
565 implements(ISFTPFile)
566 """I represent a file handle to a particular file on an SFTP connection.
567 I am used only for short immutable files opened in read-only mode.
568 The file contents are downloaded to memory when I am created."""
570 def __init__(self, filenode, metadata):
571 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
572 if noisy: self.log(".__init__(%r, %r)" % (filenode, metadata), level=NOISY)
574 assert IFileNode.providedBy(filenode), filenode
575 self.filenode = filenode
576 self.metadata = metadata
577 self.async = download_to_data(filenode)
580 def readChunk(self, offset, length):
581 request = ".readChunk(%r, %r)" % (offset, length)
582 self.log(request, level=OPERATIONAL)
585 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
586 return defer.execute(_closed)
590 if noisy: self.log("_read(%r) in readChunk(%r, %r)" % (data, offset, length), level=NOISY)
592 # "In response to this request, the server will read as many bytes as it
593 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
594 # message. If an error occurs or EOF is encountered before reading any
595 # data, the server will respond with SSH_FXP_STATUS. For normal disk
596 # files, it is guaranteed that this will read the specified number of
597 # bytes, or up to end of file."
599 # i.e. we respond with an EOF error iff offset is already at EOF.
601 if offset >= len(data):
602 eventually_errback(d)(SFTPError(FX_EOF, "read at or past end of file"))
604 eventually_callback(d)(data[offset:min(offset+length, len(data))])
606 self.async.addCallbacks(_read, eventually_errback(d))
607 d.addBoth(_convert_error, request)
610 def writeChunk(self, offset, data):
611 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
613 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
614 return defer.execute(_denied)
617 self.log(".close()", level=OPERATIONAL)
620 return defer.succeed(None)
623 request = ".getAttrs()"
624 self.log(request, level=OPERATIONAL)
627 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
628 return defer.execute(_closed)
630 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
631 d.addBoth(_convert_error, request)
634 def setAttrs(self, attrs):
635 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
636 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
637 return defer.execute(_denied)
640 class GeneralSFTPFile(PrefixingLogMixin):
641 implements(ISFTPFile)
642 """I represent a file handle to a particular file on an SFTP connection.
643 I wrap an instance of OverwriteableFileConsumer, which is responsible for
644 storing the file contents. In order to allow write requests to be satisfied
645 immediately, there is effectively a FIFO queue between requests made to this
646 file handle, and requests to my OverwriteableFileConsumer. This queue is
647 implemented by the callback chain of self.async."""
649 def __init__(self, check_abort, flags, convergence, parent=None, childname=None, filenode=None, metadata=None):
650 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
651 if noisy: self.log(".__init__(%r, %r, <convergence censored>, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
652 (check_abort, flags, parent, childname, filenode, metadata), level=NOISY)
654 self.check_abort = check_abort
656 self.convergence = convergence
658 self.childname = childname
659 self.filenode = filenode
660 self.metadata = metadata
661 self.async = defer.succeed(None)
664 # self.consumer should only be relied on in callbacks for self.async, since it might
665 # not be set before then.
667 tempfile_maker = EncryptedTemporaryFile
669 if (flags & FXF_TRUNC) or not filenode:
670 # We're either truncating or creating the file, so we don't need the old contents.
671 self.consumer = OverwriteableFileConsumer(self.check_abort, 0, tempfile_maker)
672 self.consumer.finish()
674 assert IFileNode.providedBy(filenode), filenode
676 # TODO: use download interface described in #993 when implemented.
677 if filenode.is_mutable():
678 self.async.addCallback(lambda ign: filenode.download_best_version())
679 def _downloaded(data):
680 self.consumer = OverwriteableFileConsumer(self.check_abort, len(data), tempfile_maker)
681 self.consumer.write(data)
682 self.consumer.finish()
684 self.async.addCallback(_downloaded)
686 download_size = filenode.get_size()
687 assert download_size is not None, "download_size is None"
688 self.consumer = OverwriteableFileConsumer(self.check_abort, download_size, tempfile_maker)
690 if noisy: self.log("_read immutable", level=NOISY)
691 filenode.read(self.consumer, 0, None)
692 self.async.addCallback(_read)
694 if noisy: logmsg("__init__ done", level=NOISY)
696 def readChunk(self, offset, length):
697 request = ".readChunk(%r, %r)" % (offset, length)
698 self.log(request, level=OPERATIONAL)
700 if not (self.flags & FXF_READ):
701 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
702 return defer.execute(_denied)
705 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
706 return defer.execute(_closed)
710 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
711 d2 = self.consumer.read(offset, length)
712 d2.addErrback(_convert_error, request)
713 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
714 # It is correct to drop d2 here.
716 self.async.addCallbacks(_read, eventually_errback(d))
717 d.addBoth(_convert_error, request)
720 def writeChunk(self, offset, data):
721 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
723 if not (self.flags & FXF_WRITE):
724 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
725 return defer.execute(_denied)
728 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
729 return defer.execute(_closed)
731 # Note that we return without waiting for the write to occur. Reads and
732 # close wait for prior writes, and will fail if any prior operation failed.
733 # This is ok because SFTP makes no guarantee that the request completes
734 # before the write. In fact it explicitly allows write errors to be delayed
736 # "One should note that on some server platforms even a close can fail.
737 # This can happen e.g. if the server operating system caches writes,
738 # and an error occurs while flushing cached writes during the close."
741 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
742 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
743 # FXF_APPEND means that we should always write at the current end of file.
744 write_offset = offset
745 if self.flags & FXF_APPEND:
746 write_offset = self.consumer.get_current_size()
748 self.consumer.overwrite(write_offset, data)
749 if noisy: self.log("overwrite done", level=NOISY)
751 self.async.addCallback(_write)
752 # don't addErrback to self.async, just allow subsequent async ops to fail.
753 return defer.succeed(None)
757 self.log(request, level=OPERATIONAL)
760 return defer.succeed(None)
762 # This means that close has been called, not that the close has succeeded.
765 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
766 return defer.execute(self.consumer.close)
769 d2 = self.consumer.when_done()
770 if self.filenode and self.filenode.is_mutable():
771 d2.addCallback(lambda ign: self.consumer.get_current_size())
772 d2.addCallback(lambda size: self.consumer.read(0, size))
773 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
774 elif (self.flags & FXF_EXCL) and self.consumer.get_current_size() == 0:
775 # The file will already have been written by the open call, so we can
776 # optimize out the extra directory write (useful for zero-length lockfiles).
780 self.log("_add_file childname=%r" % (self.childname,), level=OPERATIONAL)
781 u = FileHandle(self.consumer.get_file(), self.convergence)
782 return self.parent.add_file(self.childname, u)
783 d2.addCallback(_add_file)
785 d2.addCallback(lambda ign: self.consumer.close())
787 self.async.addCallback(_close)
790 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
791 d.addBoth(_convert_error, request)
795 request = ".getAttrs()"
796 self.log(request, level=OPERATIONAL)
799 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
800 return defer.execute(_closed)
802 # Optimization for read-only handles, when we already know the metadata.
803 if not(self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
804 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
808 # self.filenode might be None, but that's ok.
809 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
810 eventually_callback(d)(attrs)
812 self.async.addCallbacks(_get, eventually_errback(d))
813 d.addBoth(_convert_error, request)
816 def setAttrs(self, attrs):
817 request = ".setAttrs(attrs) %r" % (attrs,)
818 self.log(request, level=OPERATIONAL)
820 if not (self.flags & FXF_WRITE):
821 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
822 return defer.execute(_denied)
825 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
826 return defer.execute(_closed)
828 if not "size" in attrs:
829 return defer.succeed(None)
832 if not isinstance(size, (int, long)) or size < 0:
833 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
834 return defer.execute(_bad)
838 self.consumer.set_current_size(size)
839 eventually_callback(d)(None)
841 self.async.addCallbacks(_resize, eventually_errback(d))
842 d.addBoth(_convert_error, request)
847 def __init__(self, items):
857 def __init__(self, value):
861 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
862 implements(ISFTPServer)
863 def __init__(self, client, rootnode, username):
864 ConchUser.__init__(self)
865 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
866 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
868 self.channelLookup["session"] = session.SSHSession
869 self.subsystemLookup["sftp"] = FileTransferServer
871 self._client = client
872 self._root = rootnode
873 self._username = username
874 self._convergence = client.convergence
875 self._logged_out = False
878 self._logged_out = True
880 def check_abort(self):
881 return self._logged_out
883 def gotVersion(self, otherVersion, extData):
884 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
887 def openFile(self, pathstring, flags, attrs):
888 request = ".openFile(%r, %r = %r, %r)" % (pathstring, flags, _repr_flags(flags), attrs)
889 self.log(request, level=OPERATIONAL)
891 # This is used for both reading and writing.
892 # First exclude invalid combinations of flags.
894 # /usr/bin/sftp 'get' gives us FXF_READ, while 'put' on a new file
895 # gives FXF_WRITE | FXF_CREAT | FXF_TRUNC. I'm guessing that 'put' on an
896 # existing file gives the same.
898 if not (flags & (FXF_READ | FXF_WRITE)):
899 raise SFTPError(FX_BAD_MESSAGE,
900 "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
902 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
903 raise SFTPError(FX_BAD_MESSAGE,
904 "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
906 path = self._path_from_string(pathstring)
908 raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
910 # The combination of flags is potentially valid. Now there are two major cases:
912 # 1. The path is specified as /uri/FILECAP, with no parent directory.
913 # If the FILECAP is mutable and writeable, then we can open it in write-only
914 # or read/write mode (non-exclusively), otherwise we can only open it in
915 # read-only mode. The open should succeed immediately as long as FILECAP is
916 # a valid known filecap that grants the required permission.
918 # 2. The path is specified relative to a parent. We find the parent dirnode and
919 # get the child's URI and metadata if it exists. There are four subcases:
920 # a. the child does not exist: FXF_CREAT must be set, and we must be able
921 # to write to the parent directory.
922 # b. the child exists but is not a valid known filecap: fail
923 # c. the child is mutable: if we are trying to open it write-only or
924 # read/write, then we must be able to write to the file.
925 # d. the child is immutable: if we are trying to open it write-only or
926 # read/write, then we must be able to write to the parent directory.
928 # To reduce latency, open succeeds as soon as these conditions are met, even
929 # though there might be a failure in downloading the existing file or uploading
932 # Note that the permission checks below are for more precise error reporting on
933 # the open call; later operations would fail even if we did not make these checks.
935 d = self._get_root(path)
936 def _got_root( (root, path) ):
937 if root.is_unknown():
938 raise SFTPError(FX_PERMISSION_DENIED,
939 "cannot open an unknown cap (or child of an unknown directory). "
940 "Upgrading the gateway to a later Tahoe-LAFS version may help")
943 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
944 if not IFileNode.providedBy(root):
945 raise SFTPError(FX_PERMISSION_DENIED,
946 "cannot open a directory cap")
947 if (flags & FXF_WRITE) and root.is_readonly():
948 raise SFTPError(FX_PERMISSION_DENIED,
949 "cannot write to a non-writeable filecap without a parent directory")
951 raise SFTPError(FX_FAILURE,
952 "cannot create a file exclusively when it already exists")
954 return _make_sftp_file(self.check_abort, flags, self._convergence, filenode=root)
958 if noisy: self.log("case 2: root = %r, childname = %r, path[:-1] = %r" %
959 (root, childname, path[:-1]), level=NOISY)
960 d2 = root.get_child_at_path(path[:-1])
961 def _got_parent(parent):
962 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
963 if parent.is_unknown():
964 raise SFTPError(FX_PERMISSION_DENIED,
965 "cannot open an unknown cap (or child of an unknown directory). "
966 "Upgrading the gateway to a later Tahoe-LAFS version may help")
968 parent_readonly = parent.is_readonly()
969 d3 = defer.succeed(None)
971 # FXF_EXCL means that the link to the file (not the file itself) must
972 # be created atomically wrt updates by this storage client.
973 # That is, we need to create the link before returning success to the
974 # SFTP open request (and not just on close, as would normally be the
975 # case). We make the link initially point to a zero-length LIT file,
976 # which is consistent with what might happen on a POSIX filesystem.
979 raise SFTPError(FX_FAILURE,
980 "cannot create a file exclusively when the parent directory is read-only")
982 # 'overwrite=False' ensures failure if the link already exists.
983 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
985 zero_length_lit = "URI:LIT:"
986 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
987 (parent, zero_length_lit, childname), level=NOISY)
988 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit, overwrite=False))
989 def _seturi_done(child):
990 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
991 d4 = parent.get_metadata_for(childname)
992 d4.addCallback(lambda metadata: (child, metadata))
994 d3.addCallback(_seturi_done)
996 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
997 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
999 def _got_child( (filenode, metadata) ):
1000 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, metadata), level=NOISY)
1002 if filenode.is_unknown():
1003 raise SFTPError(FX_PERMISSION_DENIED,
1004 "cannot open an unknown cap. Upgrading the gateway "
1005 "to a later Tahoe-LAFS version may help")
1006 if not IFileNode.providedBy(filenode):
1007 raise SFTPError(FX_PERMISSION_DENIED,
1008 "cannot open a directory as if it were a file")
1009 if (flags & FXF_WRITE) and filenode.is_mutable() and filenode.is_readonly():
1010 raise SFTPError(FX_PERMISSION_DENIED,
1011 "cannot open a read-only mutable file for writing")
1012 if (flags & FXF_WRITE) and parent_readonly:
1013 raise SFTPError(FX_PERMISSION_DENIED,
1014 "cannot open a file for writing when the parent directory is read-only")
1016 metadata['readonly'] = _is_readonly(parent_readonly, filenode)
1017 return _make_sftp_file(self.check_abort, flags, self._convergence, parent=parent,
1018 childname=childname, filenode=filenode, metadata=metadata)
1020 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1021 f.trap(NoSuchChildError)
1023 if not (flags & FXF_CREAT):
1024 raise SFTPError(FX_NO_SUCH_FILE,
1025 "the file does not exist, and was not opened with the creation (CREAT) flag")
1027 raise SFTPError(FX_PERMISSION_DENIED,
1028 "cannot create a file when the parent directory is read-only")
1030 return _make_sftp_file(self.check_abort, flags, self._convergence, parent=parent,
1031 childname=childname)
1032 d3.addCallbacks(_got_child, _no_child)
1035 d2.addCallback(_got_parent)
1037 d.addCallback(_got_root)
1038 d.addBoth(_convert_error, request)
1041 def renameFile(self, oldpathstring, newpathstring):
1042 request = ".renameFile(%r, %r)" % (oldpathstring, newpathstring)
1043 self.log(request, level=OPERATIONAL)
1045 fromPath = self._path_from_string(oldpathstring)
1046 toPath = self._path_from_string(newpathstring)
1048 # the target directory must already exist
1049 d = deferredutil.gatherResults([self._get_parent_or_node(fromPath),
1050 self._get_parent_or_node(toPath)])
1051 def _got( (fromPair, toPair) ):
1052 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r)" %
1053 (fromPair, toPair, oldpathstring, newpathstring), level=NOISY)
1054 (fromParent, fromChildname) = fromPair
1055 (toParent, toChildname) = toPair
1057 if fromChildname is None:
1058 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1059 if toChildname is None:
1060 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1062 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1063 # "It is an error if there already exists a file with the name specified
1065 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1066 d = fromParent.move_child_to(fromChildname, toParent, toChildname, overwrite=False)
1067 #d = parent.move_child_to_path(fromChildname, toRoot, toPath[:-1],
1068 # toPath[-1], overwrite=False)
1071 d.addBoth(_convert_error, request)
1074 def makeDirectory(self, pathstring, attrs):
1075 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1076 self.log(request, level=OPERATIONAL)
1078 path = self._path_from_string(pathstring)
1079 metadata = self._attrs_to_metadata(attrs)
1080 d = self._get_root(path)
1081 d.addCallback(lambda (root, path):
1082 self._get_or_create_directories(root, path, metadata))
1083 d.addBoth(_convert_error, request)
1086 def _get_or_create_directories(self, node, path, metadata):
1087 if not IDirectoryNode.providedBy(node):
1088 # TODO: provide the name of the blocking file in the error message.
1089 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1090 "is a file in the way") # close enough
1091 return defer.execute(_blocked)
1094 return defer.succeed(node)
1095 d = node.get(path[0])
1096 def _maybe_create(f):
1097 f.trap(NoSuchChildError)
1098 return node.create_subdirectory(path[0])
1099 d.addErrback(_maybe_create)
1100 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1103 def removeFile(self, pathstring):
1104 request = ".removeFile(%r)" % (pathstring,)
1105 self.log(request, level=OPERATIONAL)
1107 path = self._path_from_string(pathstring)
1108 d = self._remove_object(path, must_be_file=True)
1109 d.addBoth(_convert_error, request)
1112 def removeDirectory(self, pathstring):
1113 request = ".removeDirectory(%r)" % (pathstring,)
1114 self.log(request, level=OPERATIONAL)
1116 path = self._path_from_string(pathstring)
1117 d = self._remove_object(path, must_be_directory=True)
1118 d.addBoth(_convert_error, request)
1121 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1122 d = defer.maybeDeferred(self._get_parent_or_node, path)
1123 def _got_parent( (parent, childname) ):
1124 # FIXME (minor): there is a race condition between the 'get' and 'delete',
1125 # so it is possible that the must_be_directory or must_be_file restrictions
1126 # might not be enforced correctly if the type has just changed.
1128 if childname is None:
1129 raise SFTPError(FX_NO_SUCH_FILE, "cannot delete an object specified by URI")
1131 d2 = parent.get(childname)
1132 def _got_child(child):
1133 # Unknown children can be removed by either removeFile or removeDirectory.
1134 if must_be_directory and IFileNode.providedBy(child):
1135 raise SFTPError(FX_PERMISSION_DENIED, "rmdir called on a file")
1136 if must_be_file and IDirectoryNode.providedBy(child):
1137 raise SFTPError(FX_PERMISSION_DENIED, "rmfile called on a directory")
1138 return parent.delete(childname)
1139 d2.addCallback(_got_child)
1141 d.addCallback(_got_parent)
1144 def openDirectory(self, pathstring):
1145 request = ".openDirectory(%r)" % (pathstring,)
1146 self.log(request, level=OPERATIONAL)
1148 path = self._path_from_string(pathstring)
1149 d = self._get_node_and_metadata_for_path(path)
1150 def _list( (dirnode, metadata) ):
1151 if dirnode.is_unknown():
1152 raise SFTPError(FX_PERMISSION_DENIED,
1153 "cannot list an unknown cap as a directory. Upgrading the gateway "
1154 "to a later Tahoe-LAFS version may help")
1155 if not IDirectoryNode.providedBy(dirnode):
1156 raise SFTPError(FX_PERMISSION_DENIED,
1157 "cannot list a file as if it were a directory")
1160 def _render(children):
1161 parent_readonly = dirnode.is_readonly()
1163 for filename, (child, metadata) in children.iteritems():
1164 # The file size may be cached or absent.
1165 metadata['readonly'] = _is_readonly(parent_readonly, child)
1166 attrs = _populate_attrs(child, metadata)
1167 filename_utf8 = filename.encode('utf-8')
1168 longname = _lsLine(filename_utf8, attrs)
1169 results.append( (filename_utf8, longname, attrs) )
1170 return StoppableList(results)
1171 d2.addCallback(_render)
1173 d.addCallback(_list)
1174 d.addBoth(_convert_error, request)
1177 def getAttrs(self, pathstring, followLinks):
1178 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1179 self.log(request, level=OPERATIONAL)
1181 d = self._get_node_and_metadata_for_path(self._path_from_string(pathstring))
1182 def _render( (node, metadata) ):
1183 # When asked about a specific file, report its current size.
1184 # TODO: the modification time for a mutable file should be
1185 # reported as the update time of the best version. But that
1186 # information isn't currently stored in mutable shares, I think.
1188 d2 = node.get_current_size()
1189 d2.addCallback(lambda size: _populate_attrs(node, metadata, size=size))
1191 d.addCallback(_render)
1192 d.addBoth(_convert_error, request)
1195 def setAttrs(self, pathstring, attrs):
1196 self.log(".setAttrs(%r, %r)" % (pathstring, attrs), level=OPERATIONAL)
1199 # this would require us to download and re-upload the truncated/extended
1201 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute")
1202 return defer.execute(_unsupported)
1203 return defer.succeed(None)
1205 def readLink(self, pathstring):
1206 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1208 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1209 return defer.execute(_unsupported)
1211 def makeLink(self, linkPathstring, targetPathstring):
1212 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1214 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1215 return defer.execute(_unsupported)
1217 def extendedRequest(self, extendedName, extendedData):
1218 self.log(".extendedRequest(%r, %r)" % (extendedName, extendedData), level=OPERATIONAL)
1220 if extendedName == 'statvfs@openssh.com' or extendedName == 'fstatvfs@openssh.com':
1221 # <http://dev.libssh.org/ticket/11>
1222 return defer.succeed(struct.pack('>11Q',
1223 1024, # uint64 f_bsize /* file system block size */
1224 1024, # uint64 f_frsize /* fundamental fs block size */
1225 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1226 314159265, # uint64 f_bfree /* free blocks in file system */
1227 314159265, # uint64 f_bavail /* free blocks for non-root */
1228 200000000, # uint64 f_files /* total file inodes */
1229 100000000, # uint64 f_ffree /* free file inodes */
1230 100000000, # uint64 f_favail /* free file inodes for non-root */
1231 0x1AF5, # uint64 f_fsid /* file system id */
1232 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1233 65535, # uint64 f_namemax /* maximum filename length */
1236 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "extendedRequest %r" % extendedName)
1237 return defer.execute(_unsupported)
1239 def realPath(self, pathstring):
1240 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1242 path_utf8 = [p.encode('utf-8') for p in self._path_from_string(pathstring)]
1243 return "/" + "/".join(path_utf8)
1245 def _path_from_string(self, pathstring):
1246 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1248 # The home directory is the root directory.
1249 pathstring = pathstring.strip("/")
1250 if pathstring == "" or pathstring == ".":
1253 path_utf8 = pathstring.split("/")
1255 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1256 # "Servers SHOULD interpret a path name component ".." as referring to
1257 # the parent directory, and "." as referring to the current directory."
1259 for p_utf8 in path_utf8:
1261 # ignore excess .. components at the root
1266 p = p_utf8.decode('utf-8', 'strict')
1267 except UnicodeError:
1268 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1271 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1274 def _get_root(self, path):
1275 # return Deferred (root, remaining_path)
1276 if path and path[0] == u"uri":
1277 d = defer.maybeDeferred(self._client.create_node_from_uri, path[1].encode('utf-8'))
1278 d.addCallback(lambda root: (root, path[2:]))
1280 d = defer.succeed((self._root, path))
1283 def _get_parent_or_node(self, path):
1284 # return Deferred (parent, childname) or (node, None)
1285 d = self._get_root(path)
1286 def _got_root( (root, remaining_path) ):
1287 if not remaining_path:
1290 d2 = root.get_child_at_path(remaining_path[:-1])
1291 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1293 d.addCallback(_got_root)
1296 def _get_node_and_metadata_for_path(self, path):
1297 # return Deferred (node, metadata)
1298 # where metadata always has a 'readonly' key
1299 d = self._get_parent_or_node(path)
1300 def _got_parent_or_node( (parent_or_node, childname) ):
1301 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1302 if childname is None:
1303 node = parent_or_node
1304 return (node, {'readonly': node.is_unknown() or node.is_readonly()})
1306 parent = parent_or_node
1307 d2 = parent.get_child_and_metadata_at_path([childname])
1308 def _got( (child, metadata) ):
1309 assert IDirectoryNode.providedBy(parent), parent
1310 metadata['readonly'] = _is_readonly(parent.is_readonly(), child)
1311 return (child, metadata)
1312 d2.addCallback(_got)
1314 d.addCallback(_got_parent_or_node)
1317 def _attrs_to_metadata(self, attrs):
1321 if key == "mtime" or key == "ctime" or key == "createtime":
1322 metadata[key] = long(attrs[key])
1323 elif key.startswith("ext_"):
1324 metadata[key] = str(attrs[key])
1329 class SFTPUser(ConchUser, PrefixingLogMixin):
1330 implements(ISession)
1331 def __init__(self, check_abort, client, rootnode, username, convergence):
1332 ConchUser.__init__(self)
1333 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1335 self.channelLookup["session"] = session.SSHSession
1336 self.subsystemLookup["sftp"] = FileTransferServer
1338 self.check_abort = check_abort
1339 self.client = client
1340 self.root = rootnode
1341 self.username = username
1342 self.convergence = convergence
1344 def getPty(self, terminal, windowSize, attrs):
1345 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1346 raise NotImplementedError
1348 def openShell(self, protocol):
1349 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1350 raise NotImplementedError
1352 def execCommand(self, protocol, cmd):
1353 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1354 raise NotImplementedError
1356 def windowChanged(self, newWindowSize):
1357 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1360 self.log(".eofReceived()", level=OPERATIONAL)
1363 self.log(".closed()", level=OPERATIONAL)
1366 # if you have an SFTPUser, and you want something that provides ISFTPServer,
1367 # then you get SFTPHandler(user)
1368 components.registerAdapter(SFTPHandler, SFTPUser, ISFTPServer)
1370 from auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1373 implements(portal.IRealm)
1374 def __init__(self, client):
1375 self._client = client
1377 def requestAvatar(self, avatarID, mind, interface):
1378 assert interface == IConchUser, interface
1379 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1380 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1381 return (interface, handler, handler.logout)
1384 class SFTPServer(service.MultiService):
1385 def __init__(self, client, accountfile, accounturl,
1386 sftp_portstr, pubkey_file, privkey_file):
1387 service.MultiService.__init__(self)
1389 r = Dispatcher(client)
1390 p = portal.Portal(r)
1393 c = AccountFileChecker(self, accountfile)
1394 p.registerChecker(c)
1396 c = AccountURLChecker(self, accounturl)
1397 p.registerChecker(c)
1398 if not accountfile and not accounturl:
1399 # we could leave this anonymous, with just the /uri/CAP form
1400 raise NeedRootcapLookupScheme("must provide an account file or URL")
1402 pubkey = keys.Key.fromFile(pubkey_file)
1403 privkey = keys.Key.fromFile(privkey_file)
1404 class SSHFactory(factory.SSHFactory):
1405 publicKeys = {pubkey.sshType(): pubkey}
1406 privateKeys = {privkey.sshType(): privkey}
1407 def getPrimes(self):
1409 # if present, this enables diffie-hellman-group-exchange
1410 return primes.parseModuliFile("/etc/ssh/moduli")
1417 s = strports.service(sftp_portstr, f)
1418 s.setServiceParent(self)