2 import os, tempfile, heapq, binascii, traceback, array, stat, struct
3 from stat import S_IFREG, S_IFDIR
4 from time import time, strftime, localtime
6 from zope.interface import implements
7 from twisted.python import components
8 from twisted.application import service, strports
9 from twisted.conch.ssh import factory, keys, session
10 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
11 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
12 FX_BAD_MESSAGE, FX_FAILURE, FX_OK
13 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
14 FXF_CREAT, FXF_TRUNC, FXF_EXCL
15 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
16 from twisted.conch.avatar import ConchUser
17 from twisted.conch.openssh_compat import primes
18 from twisted.cred import portal
19 from twisted.internet.error import ProcessDone, ProcessTerminated
20 from twisted.python.failure import Failure
21 from twisted.internet.interfaces import ITransport
23 from twisted.internet import defer
24 from twisted.internet.interfaces import IFinishableConsumer
25 from foolscap.api import eventually
26 from allmydata.util import deferredutil
28 from allmydata.util.consumer import download_to_data
29 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
30 NoSuchChildError, ChildOfWrongTypeError
31 from allmydata.mutable.common import NotWriteableError
32 from allmydata.immutable.upload import FileHandle
33 from allmydata.dirnode import update_metadata
35 from pycryptopp.cipher.aes import AES
38 use_foolscap_logging = True
40 from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
41 msg as _msg, err as _err, PrefixingLogMixin as _PrefixingLogMixin
43 if use_foolscap_logging:
44 (logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
45 else: # pragma: no cover
46 def logmsg(s, level=None):
48 def logerr(s, level=None):
50 class PrefixingLogMixin:
51 def __init__(self, facility=None, prefix=''):
53 def log(self, s, level=None):
54 print "%r %s" % (self.prefix, s)
57 def eventually_callback(d):
58 return lambda res: eventually(d.callback, res)
60 def eventually_errback(d):
61 return lambda err: eventually(d.errback, err)
65 if isinstance(x, unicode):
66 return x.encode('utf-8')
67 if isinstance(x, str):
73 """SFTP times are unsigned 32-bit integers representing UTC seconds
74 (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
75 A Tahoe time is the corresponding float."""
76 return long(t) & 0xFFFFFFFFL
79 def _convert_error(res, request):
80 if not isinstance(res, Failure):
82 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
83 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
87 logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL)
89 if noisy: logmsg(traceback.format_exc(err.value), level=NOISY)
90 except: # pragma: no cover
93 # The message argument to SFTPError must not reveal information that
94 # might compromise anonymity.
96 if err.check(SFTPError):
97 # original raiser of SFTPError has responsibility to ensure anonymity
99 if err.check(NoSuchChildError):
100 childname = _utf8(err.value.args[0])
101 raise SFTPError(FX_NO_SUCH_FILE, childname)
102 if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
103 msg = _utf8(err.value.args[0])
104 raise SFTPError(FX_PERMISSION_DENIED, msg)
105 if err.check(ExistingChildError):
106 # Versions of SFTP after v3 (which is what twisted.conch implements)
107 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
108 # However v3 doesn't; instead, other servers such as sshd return
109 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
110 # to translate the error to the equivalent of POSIX EEXIST, which is
111 # necessary for some picky programs (such as gedit).
112 msg = _utf8(err.value.args[0])
113 raise SFTPError(FX_FAILURE, msg)
114 if err.check(NotImplementedError):
115 raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
116 if err.check(EOFError):
117 raise SFTPError(FX_EOF, "end of file reached")
118 if err.check(defer.FirstError):
119 _convert_error(err.value.subFailure, request)
121 # We assume that the error message is not anonymity-sensitive.
122 raise SFTPError(FX_FAILURE, _utf8(err.value))
125 def _repr_flags(flags):
126 return "|".join([f for f in
127 [(flags & FXF_READ) and "FXF_READ" or None,
128 (flags & FXF_WRITE) and "FXF_WRITE" or None,
129 (flags & FXF_APPEND) and "FXF_APPEND" or None,
130 (flags & FXF_CREAT) and "FXF_CREAT" or None,
131 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
132 (flags & FXF_EXCL) and "FXF_EXCL" or None,
137 def _lsLine(name, attrs):
140 st_mtime = attrs.get("mtime", 0)
141 st_mode = attrs["permissions"]
142 # TODO: check that clients are okay with this being a "?".
143 # (They should be because the longname is intended for human
145 st_size = attrs.get("size", "?")
146 # We don't know how many links there really are to this object.
149 # Based on <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
150 # We can't call the version in Twisted because we might have a version earlier than
151 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
154 perms = array.array('c', '-'*10)
155 ft = stat.S_IFMT(mode)
156 if stat.S_ISDIR(ft): perms[0] = 'd'
157 elif stat.S_ISREG(ft): perms[0] = '-'
160 if mode&stat.S_IRUSR: perms[1] = 'r'
161 if mode&stat.S_IWUSR: perms[2] = 'w'
162 if mode&stat.S_IXUSR: perms[3] = 'x'
164 if mode&stat.S_IRGRP: perms[4] = 'r'
165 if mode&stat.S_IWGRP: perms[5] = 'w'
166 if mode&stat.S_IXGRP: perms[6] = 'x'
168 if mode&stat.S_IROTH: perms[7] = 'r'
169 if mode&stat.S_IWOTH: perms[8] = 'w'
170 if mode&stat.S_IXOTH: perms[9] = 'x'
171 # suid/sgid never set
174 l += str(st_nlink).rjust(5) + ' '
185 if st_mtime + sixmo < now or st_mtime > now + day:
186 # mtime is more than 6 months ago, or more than one day in the future
187 l += strftime("%b %d %Y ", localtime(st_mtime))
189 l += strftime("%b %d %H:%M ", localtime(st_mtime))
194 def _no_write(parent_readonly, child, metadata):
195 """Whether child should be listed as having read-only permissions in parent."""
197 if child.is_unknown():
199 elif child.is_mutable():
200 return child.is_readonly()
201 elif parent_readonly or IDirectoryNode.providedBy(child):
204 return metadata.get('no-write', False)
207 def _populate_attrs(childnode, metadata, size=None):
210 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
211 # bits, otherwise the client may refuse to open a directory.
212 # Also, sshfs run as a non-root user requires files and directories
213 # to be world-readable/writeable.
214 # It is important that we never set the executable bits on files.
216 # Directories and unknown nodes have no size, and SFTP doesn't
217 # require us to make one up.
219 # childnode might be None, meaning that the file doesn't exist yet,
220 # but we're going to write it later.
222 if childnode and childnode.is_unknown():
224 elif childnode and IDirectoryNode.providedBy(childnode):
225 perms = S_IFDIR | 0777
227 # For files, omit the size if we don't immediately know it.
228 if childnode and size is None:
229 size = childnode.get_size()
231 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
233 perms = S_IFREG | 0666
236 if metadata.get('no-write', False):
237 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
239 # See webapi.txt for what these times mean.
240 # We would prefer to omit atime, but SFTP version 3 can only
241 # accept mtime if atime is also set.
242 if 'linkmotime' in metadata.get('tahoe', {}):
243 attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['tahoe']['linkmotime'])
244 elif 'mtime' in metadata:
245 attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['mtime'])
247 if 'linkcrtime' in metadata.get('tahoe', {}):
248 attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime'])
250 if 'ctime' in metadata:
251 attrs['ctime'] = _to_sftp_time(metadata['ctime'])
253 attrs['permissions'] = perms
255 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
256 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
261 def _attrs_to_metadata(attrs):
265 if key == "mtime" or key == "ctime" or key == "createtime":
266 metadata[key] = long(attrs[key])
267 elif key.startswith("ext_"):
268 metadata[key] = str(attrs[key])
270 perms = attrs.get('permissions', stat.S_IWUSR)
271 if not (perms & stat.S_IWUSR):
272 metadata['no-write'] = True
277 def _direntry_for(filenode_or_parent, childname, filenode=None):
278 if childname is None:
279 filenode_or_parent = filenode
281 if filenode_or_parent:
282 rw_uri = filenode_or_parent.get_write_uri()
283 if rw_uri and childname:
284 return rw_uri + "/" + childname.encode('utf-8')
291 class EncryptedTemporaryFile(PrefixingLogMixin):
292 # not implemented: next, readline, readlines, xreadlines, writelines
295 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
296 self.file = tempfile.TemporaryFile()
297 self.key = os.urandom(16) # AES-128
299 def _crypt(self, offset, data):
300 # TODO: use random-access AES (pycryptopp ticket #18)
301 offset_big = offset // 16
302 offset_small = offset % 16
303 iv = binascii.unhexlify("%032x" % offset_big)
304 cipher = AES(self.key, iv=iv)
305 cipher.process("\x00"*offset_small)
306 return cipher.process(data)
314 def seek(self, offset, whence=os.SEEK_SET):
315 if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
316 self.file.seek(offset, whence)
319 offset = self.file.tell()
320 if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
323 def read(self, size=-1):
324 if noisy: self.log(".read(%r)" % (size,), level=NOISY)
325 index = self.file.tell()
326 ciphertext = self.file.read(size)
327 plaintext = self._crypt(index, ciphertext)
330 def write(self, plaintext):
331 if noisy: self.log(".write(<data of length %r>)" % (len(plaintext),), level=NOISY)
332 index = self.file.tell()
333 ciphertext = self._crypt(index, plaintext)
334 self.file.write(ciphertext)
336 def truncate(self, newsize):
337 if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
338 self.file.truncate(newsize)
341 class OverwriteableFileConsumer(PrefixingLogMixin):
342 implements(IFinishableConsumer)
343 """I act both as a consumer for the download of the original file contents, and as a
344 wrapper for a temporary file that records the downloaded data and any overwrites.
345 I use a priority queue to keep track of which regions of the file have been overwritten
346 but not yet downloaded, so that the download does not clobber overwritten data.
347 I use another priority queue to record milestones at which to make callbacks
348 indicating that a given number of bytes have been downloaded.
350 The temporary file reflects the contents of the file that I represent, except that:
351 - regions that have neither been downloaded nor overwritten, if present,
353 - the temporary file may be shorter than the represented file (it is never longer).
354 The latter's current size is stored in self.current_size.
356 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
357 useful for other frontends."""
359 def __init__(self, download_size, tempfile_maker):
360 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
361 if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY)
362 self.download_size = download_size
363 self.current_size = download_size
364 self.f = tempfile_maker()
366 self.milestones = [] # empty heap of (offset, d)
367 self.overwrites = [] # empty heap of (start, end)
368 self.is_closed = False
369 self.done = self.when_reached(download_size) # adds a milestone
371 def _signal_done(ign):
372 if noisy: self.log("DONE", level=NOISY)
374 self.done.addCallback(_signal_done)
380 def get_current_size(self):
381 return self.current_size
383 def set_current_size(self, size):
384 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
385 (size, self.current_size, self.downloaded), level=NOISY)
386 if size < self.current_size or size < self.downloaded:
387 self.f.truncate(size)
388 if size > self.current_size:
389 self.overwrite(self.current_size, "\x00" * (size - self.current_size))
390 self.current_size = size
392 # invariant: self.download_size <= self.current_size
393 if size < self.download_size:
394 self.download_size = size
395 if self.downloaded >= self.download_size:
398 def registerProducer(self, p, streaming):
399 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
402 # call resumeProducing once to start things off
411 def write(self, data):
412 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
416 if self.downloaded >= self.download_size:
419 next_downloaded = self.downloaded + len(data)
420 if next_downloaded > self.download_size:
421 data = data[:(self.download_size - self.downloaded)]
423 while len(self.overwrites) > 0:
424 (start, end) = self.overwrites[0]
425 if start >= next_downloaded:
426 # This and all remaining overwrites are after the data we just downloaded.
428 if start > self.downloaded:
429 # The data we just downloaded has been partially overwritten.
430 # Write the prefix of it that precedes the overwritten region.
431 self.f.seek(self.downloaded)
432 self.f.write(data[:(start - self.downloaded)])
434 # This merges consecutive overwrites if possible, which allows us to detect the
435 # case where the download can be stopped early because the remaining region
436 # to download has already been fully overwritten.
437 heapq.heappop(self.overwrites)
438 while len(self.overwrites) > 0:
439 (start1, end1) = self.overwrites[0]
443 heapq.heappop(self.overwrites)
445 if end >= next_downloaded:
446 # This overwrite extends past the downloaded data, so there is no
447 # more data to consider on this call.
448 heapq.heappush(self.overwrites, (next_downloaded, end))
449 self._update_downloaded(next_downloaded)
451 elif end >= self.downloaded:
452 data = data[(end - self.downloaded):]
453 self._update_downloaded(end)
455 self.f.seek(self.downloaded)
457 self._update_downloaded(next_downloaded)
459 def _update_downloaded(self, new_downloaded):
460 self.downloaded = new_downloaded
461 milestone = new_downloaded
462 if len(self.overwrites) > 0:
463 (start, end) = self.overwrites[0]
464 if start <= new_downloaded and end > milestone:
467 while len(self.milestones) > 0:
468 (next, d) = self.milestones[0]
471 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
472 heapq.heappop(self.milestones)
473 eventually_callback(d)(None)
475 if milestone >= self.download_size:
478 def overwrite(self, offset, data):
479 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
480 if offset > self.current_size:
481 # Normally writing at an offset beyond the current end-of-file
482 # would leave a hole that appears filled with zeroes. However, an
483 # EncryptedTemporaryFile doesn't behave like that (if there is a
484 # hole in the file on disk, the zeroes that are read back will be
485 # XORed with the keystream). So we must explicitly write zeroes in
486 # the gap between the current EOF and the offset.
488 self.f.seek(self.current_size)
489 self.f.write("\x00" * (offset - self.current_size))
490 start = self.current_size
496 end = offset + len(data)
497 self.current_size = max(self.current_size, end)
498 if end > self.downloaded:
499 heapq.heappush(self.overwrites, (start, end))
501 def read(self, offset, length):
502 """When the data has been read, callback the Deferred that we return with this data.
503 Otherwise errback the Deferred that we return.
504 The caller must perform no more overwrites until the Deferred has fired."""
506 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
507 if offset >= self.current_size:
508 def _eof(): raise EOFError("read past end of file")
509 return defer.execute(_eof)
511 if offset + length > self.current_size:
512 length = self.current_size - offset
513 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
515 needed = min(offset + length, self.download_size)
516 d = self.when_reached(needed)
518 # It is not necessarily the case that self.downloaded >= needed, because
519 # the file might have been truncated (thus truncating the download) and
522 assert self.current_size >= offset + length, (self.current_size, offset, length)
523 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
525 return self.f.read(length)
526 d.addCallback(_reached)
529 def when_reached(self, index):
530 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
531 if index <= self.downloaded: # already reached
532 if noisy: self.log("already reached %r" % (index,), level=NOISY)
533 return defer.succeed(None)
536 if noisy: self.log("reached %r" % (index,), level=NOISY)
538 d.addCallback(_reached)
539 heapq.heappush(self.milestones, (index, d))
546 while len(self.milestones) > 0:
547 (next, d) = self.milestones[0]
548 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
549 heapq.heappop(self.milestones)
550 # The callback means that the milestone has been reached if
551 # it is ever going to be. Note that the file may have been
552 # truncated to before the milestone.
553 eventually_callback(d)(None)
555 # FIXME: causes spurious failures
556 #self.unregisterProducer()
559 if not self.is_closed:
560 self.is_closed = True
563 except BaseException, e:
564 self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
567 def unregisterProducer(self):
569 self.producer.stopProducing()
573 SIZE_THRESHOLD = 1000
576 class ShortReadOnlySFTPFile(PrefixingLogMixin):
577 implements(ISFTPFile)
578 """I represent a file handle to a particular file on an SFTP connection.
579 I am used only for short immutable files opened in read-only mode.
580 The file contents are downloaded to memory when I am created."""
582 def __init__(self, userpath, filenode, metadata):
583 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
584 if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
586 assert IFileNode.providedBy(filenode), filenode
587 self.filenode = filenode
588 self.metadata = metadata
589 self.async = download_to_data(filenode)
592 def readChunk(self, offset, length):
593 request = ".readChunk(%r, %r)" % (offset, length)
594 self.log(request, level=OPERATIONAL)
597 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
598 return defer.execute(_closed)
602 if noisy: self.log("_read(<data of length %r>) in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY)
604 # "In response to this request, the server will read as many bytes as it
605 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
606 # message. If an error occurs or EOF is encountered before reading any
607 # data, the server will respond with SSH_FXP_STATUS. For normal disk
608 # files, it is guaranteed that this will read the specified number of
609 # bytes, or up to end of file."
611 # i.e. we respond with an EOF error iff offset is already at EOF.
613 if offset >= len(data):
614 eventually_errback(d)(SFTPError(FX_EOF, "read at or past end of file"))
616 eventually_callback(d)(data[offset:min(offset+length, len(data))])
618 self.async.addCallbacks(_read, eventually_errback(d))
619 d.addBoth(_convert_error, request)
622 def writeChunk(self, offset, data):
623 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
625 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
626 return defer.execute(_denied)
629 self.log(".close()", level=OPERATIONAL)
632 return defer.succeed(None)
635 request = ".getAttrs()"
636 self.log(request, level=OPERATIONAL)
639 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
640 return defer.execute(_closed)
642 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
643 d.addBoth(_convert_error, request)
646 def setAttrs(self, attrs):
647 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
648 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
649 return defer.execute(_denied)
652 class GeneralSFTPFile(PrefixingLogMixin):
653 implements(ISFTPFile)
654 """I represent a file handle to a particular file on an SFTP connection.
655 I wrap an instance of OverwriteableFileConsumer, which is responsible for
656 storing the file contents. In order to allow write requests to be satisfied
657 immediately, there is effectively a FIFO queue between requests made to this
658 file handle, and requests to my OverwriteableFileConsumer. This queue is
659 implemented by the callback chain of self.async.
661 When first constructed, I am in an 'unopened' state that causes most
662 operations to be delayed until 'open' is called."""
664 def __init__(self, userpath, flags, close_notify, convergence):
665 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
666 if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
667 (userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
669 self.userpath = userpath
671 self.close_notify = close_notify
672 self.convergence = convergence
673 self.async = defer.Deferred()
674 # Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created.
675 self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL)
677 self.abandoned = False
679 self.childname = None
683 # self.consumer should only be relied on in callbacks for self.async, since it might
684 # not be set before then.
687 def open(self, parent=None, childname=None, filenode=None, metadata=None):
688 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
689 (parent, childname, filenode, metadata), level=OPERATIONAL)
691 # If the file has been renamed, the new (parent, childname) takes precedence.
692 if self.parent is None:
694 if self.childname is None:
695 self.childname = childname
696 self.filenode = filenode
697 self.metadata = metadata
699 assert not self.closed
700 tempfile_maker = EncryptedTemporaryFile
702 if (self.flags & FXF_TRUNC) or not filenode:
703 # We're either truncating or creating the file, so we don't need the old contents.
704 self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
705 self.consumer.finish()
707 assert IFileNode.providedBy(filenode), filenode
709 # TODO: use download interface described in #993 when implemented.
710 if filenode.is_mutable():
711 self.async.addCallback(lambda ign: filenode.download_best_version())
712 def _downloaded(data):
713 self.consumer = OverwriteableFileConsumer(len(data), tempfile_maker)
714 self.consumer.write(data)
715 self.consumer.finish()
717 self.async.addCallback(_downloaded)
719 download_size = filenode.get_size()
720 assert download_size is not None, "download_size is None"
721 self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker)
723 if noisy: self.log("_read immutable", level=NOISY)
724 filenode.read(self.consumer, 0, None)
725 self.async.addCallback(_read)
727 eventually_callback(self.async)(None)
729 if noisy: self.log("open done", level=NOISY)
732 def rename(self, new_userpath, new_parent, new_childname):
733 self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
735 self.userpath = new_userpath
736 self.parent = new_parent
737 self.childname = new_childname
740 self.log(".abandon()", level=OPERATIONAL)
742 self.abandoned = True
744 def sync(self, ign=None):
745 # The ign argument allows some_file.sync to be used as a callback.
746 self.log(".sync()", level=OPERATIONAL)
749 self.async.addBoth(eventually_callback(d))
751 if noisy: self.log("_done(%r) in .sync()" % (res,), level=NOISY)
756 def get_metadata(self):
759 def readChunk(self, offset, length):
760 request = ".readChunk(%r, %r)" % (offset, length)
761 self.log(request, level=OPERATIONAL)
763 if not (self.flags & FXF_READ):
764 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
765 return defer.execute(_denied)
768 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
769 return defer.execute(_closed)
773 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
774 d2 = self.consumer.read(offset, length)
775 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
776 # It is correct to drop d2 here.
778 self.async.addCallbacks(_read, eventually_errback(d))
779 d.addBoth(_convert_error, request)
782 def writeChunk(self, offset, data):
783 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
785 if not (self.flags & FXF_WRITE):
786 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
787 return defer.execute(_denied)
790 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
791 return defer.execute(_closed)
793 self.has_changed = True
795 # Note that we return without waiting for the write to occur. Reads and
796 # close wait for prior writes, and will fail if any prior operation failed.
797 # This is ok because SFTP makes no guarantee that the write completes
798 # before the request does. In fact it explicitly allows write errors to be
799 # delayed until close:
800 # "One should note that on some server platforms even a close can fail.
801 # This can happen e.g. if the server operating system caches writes,
802 # and an error occurs while flushing cached writes during the close."
805 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
806 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
807 # FXF_APPEND means that we should always write at the current end of file.
808 write_offset = offset
809 if self.flags & FXF_APPEND:
810 write_offset = self.consumer.get_current_size()
812 self.consumer.overwrite(write_offset, data)
813 if noisy: self.log("overwrite done", level=NOISY)
815 self.async.addCallback(_write)
816 # don't addErrback to self.async, just allow subsequent async ops to fail.
817 return defer.succeed(None)
821 self.log(request, level=OPERATIONAL)
824 return defer.succeed(None)
826 # This means that close has been called, not that the close has succeeded.
829 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
830 def _readonly_close():
832 self.consumer.close()
833 return defer.execute(_readonly_close)
835 # We must capture the abandoned, parent, and childname variables synchronously
836 # at the close call. This is needed by the correctness arguments in the comments
837 # for _abandon_any_heisenfiles and _rename_heisenfiles.
838 # Note that the file must have been opened before it can be closed.
839 abandoned = self.abandoned
841 childname = self.childname
843 # has_changed is set when writeChunk is called, not when the write occurs, so
844 # it is correct to optimize out the commit if it is False at the close call.
845 has_changed = self.has_changed
848 if noisy: self.log("_committed(%r)" % (res,), level=NOISY)
850 self.consumer.close()
852 # We must close_notify before re-firing self.async.
853 if self.close_notify:
854 self.close_notify(self.userpath, self.parent, self.childname, self)
858 d2 = self.consumer.when_done()
859 if self.filenode and self.filenode.is_mutable():
860 self.log("update mutable file %r childname=%r" % (self.filenode, childname), level=OPERATIONAL)
861 if self.metadata.get('no-write', False) and not self.filenode.is_readonly():
862 assert parent and childname, (parent, childname, self.metadata)
863 d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata))
865 d2.addCallback(lambda ign: self.consumer.get_current_size())
866 d2.addCallback(lambda size: self.consumer.read(0, size))
867 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
870 self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL)
871 u = FileHandle(self.consumer.get_file(), self.convergence)
872 return parent.add_file(childname, u, metadata=self.metadata)
873 d2.addCallback(_add_file)
875 d2.addBoth(_committed)
880 # If the file has been abandoned, we don't want the close operation to get "stuck",
881 # even if self.async fails to re-fire. Doing the close independently of self.async
882 # in that case ensures that dropping an ssh connection is sufficient to abandon
883 # any heisenfiles that were not explicitly closed in that connection.
884 if abandoned or not has_changed:
885 d.addCallback(_committed)
887 self.async.addCallback(_close)
889 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
890 d.addBoth(_convert_error, request)
894 request = ".getAttrs()"
895 self.log(request, level=OPERATIONAL)
898 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
899 return defer.execute(_closed)
901 # Optimization for read-only handles, when we already know the metadata.
902 if not (self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
903 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
907 if noisy: self.log("_get(%r) in %r, filenode = %r, metadata = %r" % (ign, request, self.filenode, self.metadata), level=NOISY)
909 # self.filenode might be None, but that's ok.
910 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
911 eventually_callback(d)(attrs)
913 self.async.addCallbacks(_get, eventually_errback(d))
914 d.addBoth(_convert_error, request)
917 def setAttrs(self, attrs, only_if_at=None):
918 request = ".setAttrs(%r, only_if_at=%r)" % (attrs, only_if_at)
919 self.log(request, level=OPERATIONAL)
921 if not (self.flags & FXF_WRITE):
922 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
923 return defer.execute(_denied)
926 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
927 return defer.execute(_closed)
929 size = attrs.get("size", None)
930 if size is not None and (not isinstance(size, (int, long)) or size < 0):
931 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
932 return defer.execute(_bad)
936 if noisy: self.log("_set(%r) in %r" % (ign, request), level=NOISY)
937 if only_if_at and only_if_at != _direntry_for(self.parent, self.childname, self.filenode):
941 self.metadata = update_metadata(self.metadata, _attrs_to_metadata(attrs), now)
943 self.consumer.set_current_size(size)
944 eventually_callback(d)(None)
946 self.async.addCallbacks(_set, eventually_errback(d))
947 d.addBoth(_convert_error, request)
952 def __init__(self, items):
962 def __init__(self, value):
966 # A "heisenfile" is a file that has been opened with write flags
967 # (FXF_WRITE and/or FXF_CREAT) and not yet close-notified.
968 # 'all_heisenfiles' maps from a direntry string to a list of
971 # A direntry string is parent_write_uri + "/" + childname_utf8 for
972 # an immutable file, or file_write_uri for a mutable file.
973 # Updates to this dict are single-threaded.
978 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
979 implements(ISFTPServer)
980 def __init__(self, client, rootnode, username):
981 ConchUser.__init__(self)
982 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username)
983 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
985 self.channelLookup["session"] = session.SSHSession
986 self.subsystemLookup["sftp"] = FileTransferServer
988 self._client = client
989 self._root = rootnode
990 self._username = username
991 self._convergence = client.convergence
993 # maps from UTF-8 paths for this user, to files written and still open
994 self._heisenfiles = {}
996 def gotVersion(self, otherVersion, extData):
997 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
999 # advertise the same extensions as the OpenSSH SFTP server
1000 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1001 return {'posix-rename@openssh.com': '1',
1002 'statvfs@openssh.com': '2',
1003 'fstatvfs@openssh.com': '2',
1007 self.log(".logout()", level=OPERATIONAL)
1009 for files in self._heisenfiles.itervalues():
1013 def _add_heisenfiles_by_path(self, userpath, files_to_add):
1014 self.log("._add_heisenfiles_by_path(%r, %r)" % (userpath, files_to_add), level=OPERATIONAL)
1016 if userpath in self._heisenfiles:
1017 self._heisenfiles[userpath] += files_to_add
1019 self._heisenfiles[userpath] = files_to_add
1021 def _add_heisenfiles_by_direntry(self, direntry, files_to_add):
1022 self.log("._add_heisenfiles_by_direntry(%r, %r)" % (direntry, files_to_add), level=OPERATIONAL)
1025 if direntry in all_heisenfiles:
1026 all_heisenfiles[direntry] += files_to_add
1028 all_heisenfiles[direntry] = files_to_add
1030 def _abandon_any_heisenfiles(self, userpath, direntry):
1031 request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
1032 self.log(request, level=OPERATIONAL)
1034 # First we synchronously mark all heisenfiles matching the userpath or direntry
1035 # as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
1036 # each file that we abandoned.
1038 # For each file, the call to .abandon() occurs:
1039 # * before the file is closed, in which case it will never be committed
1040 # (uploaded+linked or published); or
1041 # * after it is closed but before it has been close_notified, in which case the
1042 # .sync() ensures that it has been committed (successfully or not) before we
1045 # This avoids a race that might otherwise cause the file to be committed after
1046 # the remove operation has completed.
1048 # We return a Deferred that fires with True if any files were abandoned (this
1049 # does not mean that they were not committed; it is used to determine whether
1050 # a NoSuchChildError from the attempt to delete the file should be suppressed).
1053 if direntry in all_heisenfiles:
1054 files = all_heisenfiles[direntry]
1055 del all_heisenfiles[direntry]
1056 if userpath in self._heisenfiles:
1057 files += self._heisenfiles[userpath]
1058 del self._heisenfiles[userpath]
1060 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1065 d = defer.succeed(None)
1070 self.log("done %r" % (request,), level=OPERATIONAL)
1071 return len(files) > 0
1075 def _rename_heisenfiles(self, from_userpath, from_parent, from_childname,
1076 to_userpath, to_parent, to_childname, overwrite=True):
1077 request = ("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" %
1078 (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
1079 self.log(request, level=OPERATIONAL)
1081 # First we synchronously rename all heisenfiles matching the userpath or direntry.
1082 # Then we .sync() each file that we renamed.
1084 # For each file, the call to .rename occurs:
1085 # * before the file is closed, in which case it will be committed at the
1087 # * after it is closed but before it has been close_notified, in which case the
1088 # .sync() ensures that it has been committed (successfully or not) before we
1091 # This avoids a race that might otherwise cause the file to be committed at the
1092 # old name after the rename operation has completed.
1094 # Note that if overwrite is False, the caller should already have checked
1095 # whether a real direntry exists at the destination. It is possible that another
1096 # direntry (heisen or real) comes to exist at the destination after that check,
1097 # but in that case it is correct for the rename to succeed (and for the commit
1098 # of the heisenfile at the destination to possibly clobber the other entry, since
1099 # that can happen anyway when we have concurrent write handles to the same direntry).
1101 # We return a Deferred that fires with True if any files were renamed (this
1102 # does not mean that they were not committed; it is used to determine whether
1103 # a NoSuchChildError from the rename attempt should be suppressed). If overwrite
1104 # is False and there were already heisenfiles at the destination userpath or
1105 # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
1107 from_direntry = _direntry_for(from_parent, from_childname)
1108 to_direntry = _direntry_for(to_parent, to_childname)
1110 if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
1111 def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1112 return defer.execute(_existing)
1115 if from_direntry in all_heisenfiles:
1116 from_files = all_heisenfiles[from_direntry]
1117 del all_heisenfiles[from_direntry]
1118 if from_userpath in self._heisenfiles:
1119 from_files += self._heisenfiles[from_userpath]
1120 del self._heisenfiles[from_userpath]
1122 if noisy: self.log("from_files = %r in %r" % (from_files, request), level=NOISY)
1124 self._add_heisenfiles_by_direntry(to_direntry, from_files)
1125 self._add_heisenfiles_by_path(to_userpath, from_files)
1127 for f in from_files:
1128 f.rename(to_userpath, to_parent, to_childname)
1130 d = defer.succeed(None)
1131 for f in from_files:
1135 self.log("done %r" % (request,), level=OPERATIONAL)
1136 return len(from_files) > 0
1140 def _update_attrs_for_heisenfiles(self, userpath, direntry, attrs):
1141 request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
1142 self.log(request, level=OPERATIONAL)
1145 if direntry in all_heisenfiles:
1146 files = all_heisenfiles[direntry]
1147 if userpath in self._heisenfiles:
1148 files += self._heisenfiles[userpath]
1150 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1152 # We set the metadata for all heisenfiles at this path or direntry.
1153 # Since a direntry includes a write URI, we must have authority to
1154 # change the metadata of heisenfiles found in the all_heisenfiles dict.
1155 # However that's not necessarily the case for heisenfiles found by
1156 # path. Therefore we tell the setAttrs method of each file to only
1157 # perform the update if the file is at the correct direntry.
1159 d = defer.succeed(None)
1161 d.addBoth(f.setAttrs, attrs, only_if_at=direntry)
1164 self.log("done %r" % (request,), level=OPERATIONAL)
1165 return len(files) > 0
1169 def _sync_heisenfiles(self, userpath, direntry, ignore=None):
1170 request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
1171 self.log(request, level=OPERATIONAL)
1174 if direntry in all_heisenfiles:
1175 files = all_heisenfiles[direntry]
1176 if userpath in self._heisenfiles:
1177 files += self._heisenfiles[userpath]
1179 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1181 d = defer.succeed(None)
1187 self.log("done %r" % (request,), level=OPERATIONAL)
1192 def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
1193 if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
1195 direntry = _direntry_for(parent, childname)
1196 if direntry in all_heisenfiles:
1197 all_old_files = all_heisenfiles[direntry]
1198 all_new_files = [f for f in all_old_files if f is not file_to_remove]
1199 if len(all_new_files) > 0:
1200 all_heisenfiles[direntry] = all_new_files
1202 del all_heisenfiles[direntry]
1204 if userpath in self._heisenfiles:
1205 old_files = self._heisenfiles[userpath]
1206 new_files = [f for f in old_files if f is not file_to_remove]
1207 if len(new_files) > 0:
1208 self._heisenfiles[userpath] = new_files
1210 del self._heisenfiles[userpath]
1212 def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None):
1213 if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
1214 (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
1217 assert metadata is None or 'no-write' in metadata, metadata
1219 writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
1220 direntry = _direntry_for(parent, childname, filenode)
1222 d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file)
1224 if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
1225 d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata))
1229 close_notify = self._remove_heisenfile
1231 d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence))
1232 def _got_file(file):
1233 file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata)
1235 self._add_heisenfiles_by_direntry(direntry, [file])
1237 d.addCallback(_got_file)
1240 def openFile(self, pathstring, flags, attrs, delay=None):
1241 request = ".openFile(%r, %r = %r, %r, delay=%r)" % (pathstring, flags, _repr_flags(flags), attrs, delay)
1242 self.log(request, level=OPERATIONAL)
1244 # This is used for both reading and writing.
1245 # First exclude invalid combinations of flags, and empty paths.
1247 if not (flags & (FXF_READ | FXF_WRITE)):
1248 def _bad_readwrite():
1249 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
1250 return defer.execute(_bad_readwrite)
1252 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
1253 def _bad_exclcreat():
1254 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
1255 return defer.execute(_bad_exclcreat)
1257 path = self._path_from_string(pathstring)
1259 def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
1260 return defer.execute(_emptypath)
1262 # The combination of flags is potentially valid.
1264 # To work around clients that have race condition bugs, a getAttr, rename, or
1265 # remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags,
1266 # should succeed even if the 'open' request has not yet completed. So we now
1267 # synchronously add a file object into the self._heisenfiles dict, indexed
1268 # by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict,
1269 # because we don't yet have a user-independent path for the file.) The file
1270 # object does not know its filenode, parent, or childname at this point.
1272 userpath = self._path_to_utf8(path)
1274 if flags & (FXF_WRITE | FXF_CREAT):
1275 file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence)
1276 self._add_heisenfiles_by_path(userpath, [file])
1278 # We haven't decided which file implementation to use yet.
1281 desired_metadata = _attrs_to_metadata(attrs)
1283 # Now there are two major cases:
1285 # 1. The path is specified as /uri/FILECAP, with no parent directory.
1286 # If the FILECAP is mutable and writeable, then we can open it in write-only
1287 # or read/write mode (non-exclusively), otherwise we can only open it in
1288 # read-only mode. The open should succeed immediately as long as FILECAP is
1289 # a valid known filecap that grants the required permission.
1291 # 2. The path is specified relative to a parent. We find the parent dirnode and
1292 # get the child's URI and metadata if it exists. There are four subcases:
1293 # a. the child does not exist: FXF_CREAT must be set, and we must be able
1294 # to write to the parent directory.
1295 # b. the child exists but is not a valid known filecap: fail
1296 # c. the child is mutable: if we are trying to open it write-only or
1297 # read/write, then we must be able to write to the file.
1298 # d. the child is immutable: if we are trying to open it write-only or
1299 # read/write, then we must be able to write to the parent directory.
1301 # To reduce latency, open normally succeeds as soon as these conditions are
1302 # met, even though there might be a failure in downloading the existing file
1303 # or uploading a new one. However, there is an exception: if a file has been
1304 # written, then closed, and is now being reopened, then we have to delay the
1305 # open until the previous upload/publish has completed. This is necessary
1306 # because sshfs does not wait for the result of an FXF_CLOSE message before
1307 # reporting to the client that a file has been closed. It applies both to
1308 # mutable files, and to directory entries linked to an immutable file.
1310 # Note that the permission checks below are for more precise error reporting on
1311 # the open call; later operations would fail even if we did not make these checks.
1313 d = delay or defer.succeed(None)
1314 d.addCallback(lambda ign: self._get_root(path))
1315 def _got_root( (root, path) ):
1316 if root.is_unknown():
1317 raise SFTPError(FX_PERMISSION_DENIED,
1318 "cannot open an unknown cap (or child of an unknown object). "
1319 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1322 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
1323 if not IFileNode.providedBy(root):
1324 raise SFTPError(FX_PERMISSION_DENIED,
1325 "cannot open a directory cap")
1326 if (flags & FXF_WRITE) and root.is_readonly():
1327 raise SFTPError(FX_PERMISSION_DENIED,
1328 "cannot write to a non-writeable filecap without a parent directory")
1329 if (flags & FXF_WRITE) and root.is_mutable() and desired_metadata.get('no-write', False):
1330 raise SFTPError(FX_PERMISSION_DENIED,
1331 "cannot write to a mutable filecap without a parent directory, when the "
1332 "specified permissions would require the link from the parent to be made read-only")
1333 if flags & FXF_EXCL:
1334 raise SFTPError(FX_FAILURE,
1335 "cannot create a file exclusively when it already exists")
1337 # The file does not need to be added to all_heisenfiles, because it is not
1338 # associated with a directory entry that needs to be updated.
1340 metadata = update_metadata(None, desired_metadata, time())
1342 # We have to decide what to pass for the 'parent_readonly' argument to _no_write,
1343 # given that we don't actually have a parent. This only affects the permissions
1344 # reported by a getAttrs on this file handle in the case of an immutable file.
1345 # We choose 'parent_readonly=True' since that will cause the permissions to be
1346 # reported as r--r--r--, which is appropriate because an immutable file can't be
1347 # written via this path.
1349 metadata['no-write'] = _no_write(True, root, metadata)
1350 return self._make_file(file, userpath, flags, filenode=root, metadata=metadata)
1353 childname = path[-1]
1355 if noisy: self.log("case 2: root = %r, childname = %r, desired_metadata = %r, path[:-1] = %r" %
1356 (root, childname, desired_metadata, path[:-1]), level=NOISY)
1357 d2 = root.get_child_at_path(path[:-1])
1358 def _got_parent(parent):
1359 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
1360 if parent.is_unknown():
1361 raise SFTPError(FX_PERMISSION_DENIED,
1362 "cannot open a child of an unknown object. "
1363 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1365 parent_readonly = parent.is_readonly()
1366 d3 = defer.succeed(None)
1367 if flags & FXF_EXCL:
1368 # FXF_EXCL means that the link to the file (not the file itself) must
1369 # be created atomically wrt updates by this storage client.
1370 # That is, we need to create the link before returning success to the
1371 # SFTP open request (and not just on close, as would normally be the
1372 # case). We make the link initially point to a zero-length LIT file,
1373 # which is consistent with what might happen on a POSIX filesystem.
1376 raise SFTPError(FX_FAILURE,
1377 "cannot create a file exclusively when the parent directory is read-only")
1379 # 'overwrite=False' ensures failure if the link already exists.
1380 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
1382 zero_length_lit = "URI:LIT:"
1383 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
1384 (parent, zero_length_lit, childname), level=NOISY)
1385 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
1386 metadata=desired_metadata, overwrite=False))
1387 def _seturi_done(child):
1388 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
1389 d4 = parent.get_metadata_for(childname)
1390 d4.addCallback(lambda metadata: (child, metadata))
1392 d3.addCallback(_seturi_done)
1394 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
1395 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
1397 def _got_child( (filenode, current_metadata) ):
1398 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY)
1400 metadata = update_metadata(current_metadata, desired_metadata, time())
1401 metadata['no-write'] = _no_write(parent_readonly, filenode, metadata)
1403 if filenode.is_unknown():
1404 raise SFTPError(FX_PERMISSION_DENIED,
1405 "cannot open an unknown cap. Upgrading the gateway "
1406 "to a later Tahoe-LAFS version may help")
1407 if not IFileNode.providedBy(filenode):
1408 raise SFTPError(FX_PERMISSION_DENIED,
1409 "cannot open a directory as if it were a file")
1410 if (flags & FXF_WRITE) and metadata['no-write']:
1411 raise SFTPError(FX_PERMISSION_DENIED,
1412 "cannot open a non-writeable file for writing")
1414 return self._make_file(file, userpath, flags, parent=parent, childname=childname,
1415 filenode=filenode, metadata=metadata)
1417 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1418 f.trap(NoSuchChildError)
1420 if not (flags & FXF_CREAT):
1421 raise SFTPError(FX_NO_SUCH_FILE,
1422 "the file does not exist, and was not opened with the creation (CREAT) flag")
1424 raise SFTPError(FX_PERMISSION_DENIED,
1425 "cannot create a file when the parent directory is read-only")
1427 return self._make_file(file, userpath, flags, parent=parent, childname=childname)
1428 d3.addCallbacks(_got_child, _no_child)
1431 d2.addCallback(_got_parent)
1434 d.addCallback(_got_root)
1435 def _remove_on_error(err):
1437 self._remove_heisenfile(userpath, None, None, file)
1439 d.addErrback(_remove_on_error)
1440 d.addBoth(_convert_error, request)
1443 def renameFile(self, from_pathstring, to_pathstring, overwrite=False):
1444 request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring)
1445 self.log(request, level=OPERATIONAL)
1447 from_path = self._path_from_string(from_pathstring)
1448 to_path = self._path_from_string(to_pathstring)
1449 from_userpath = self._path_to_utf8(from_path)
1450 to_userpath = self._path_to_utf8(to_path)
1452 # the target directory must already exist
1453 d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
1454 self._get_parent_or_node(to_path)])
1455 def _got( (from_pair, to_pair) ):
1456 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
1457 (from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
1458 (from_parent, from_childname) = from_pair
1459 (to_parent, to_childname) = to_pair
1461 if from_childname is None:
1462 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1463 if to_childname is None:
1464 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1466 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1467 # "It is an error if there already exists a file with the name specified
1469 # OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error.
1471 # For the standard SSH_FXP_RENAME operation, overwrite=False.
1472 # We also support the posix-rename@openssh.com extension, which uses overwrite=True.
1474 d2 = defer.fail(NoSuchChildError())
1476 d2.addCallback(lambda ign: to_parent.get(to_childname))
1477 def _expect_fail(res):
1478 if not isinstance(res, Failure):
1479 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1481 # It is OK if we fail for errors other than NoSuchChildError, since that probably
1482 # indicates some problem accessing the destination directory.
1483 res.trap(NoSuchChildError)
1484 d2.addBoth(_expect_fail)
1486 # If there are heisenfiles to be written at the 'from' direntry, then ensure
1487 # they will now be written at the 'to' direntry instead.
1488 d2.addCallback(lambda ign:
1489 self._rename_heisenfiles(from_userpath, from_parent, from_childname,
1490 to_userpath, to_parent, to_childname, overwrite=overwrite))
1493 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1494 #d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite)
1496 d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite)
1498 if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" %
1499 (err, from_pathstring, to_pathstring, overwrite), level=NOISY)
1501 if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
1503 if not overwrite and err.check(ExistingChildError):
1504 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1509 d2.addCallback(_move)
1512 d.addBoth(_convert_error, request)
1515 def makeDirectory(self, pathstring, attrs):
1516 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1517 self.log(request, level=OPERATIONAL)
1519 path = self._path_from_string(pathstring)
1520 metadata = _attrs_to_metadata(attrs)
1521 if 'no-write' in metadata:
1522 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
1523 return defer.execute(_denied)
1525 d = self._get_root(path)
1526 d.addCallback(lambda (root, path):
1527 self._get_or_create_directories(root, path, metadata))
1528 d.addBoth(_convert_error, request)
1531 def _get_or_create_directories(self, node, path, metadata):
1532 if not IDirectoryNode.providedBy(node):
1533 # TODO: provide the name of the blocking file in the error message.
1534 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1535 "is a file in the way") # close enough
1536 return defer.execute(_blocked)
1539 return defer.succeed(node)
1540 d = node.get(path[0])
1541 def _maybe_create(f):
1542 f.trap(NoSuchChildError)
1543 return node.create_subdirectory(path[0])
1544 d.addErrback(_maybe_create)
1545 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1548 def removeFile(self, pathstring):
1549 request = ".removeFile(%r)" % (pathstring,)
1550 self.log(request, level=OPERATIONAL)
1552 path = self._path_from_string(pathstring)
1553 d = self._remove_object(path, must_be_file=True)
1554 d.addBoth(_convert_error, request)
1557 def removeDirectory(self, pathstring):
1558 request = ".removeDirectory(%r)" % (pathstring,)
1559 self.log(request, level=OPERATIONAL)
1561 path = self._path_from_string(pathstring)
1562 d = self._remove_object(path, must_be_directory=True)
1563 d.addBoth(_convert_error, request)
1566 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1567 userpath = self._path_to_utf8(path)
1568 d = self._get_parent_or_node(path)
1569 def _got_parent( (parent, childname) ):
1570 if childname is None:
1571 raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
1573 direntry = _direntry_for(parent, childname)
1574 d2 = defer.succeed(False)
1575 if not must_be_directory:
1576 d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry))
1578 d2.addCallback(lambda abandoned:
1579 parent.delete(childname, must_exist=not abandoned,
1580 must_be_directory=must_be_directory, must_be_file=must_be_file))
1582 d.addCallback(_got_parent)
1585 def openDirectory(self, pathstring):
1586 request = ".openDirectory(%r)" % (pathstring,)
1587 self.log(request, level=OPERATIONAL)
1589 path = self._path_from_string(pathstring)
1590 d = self._get_parent_or_node(path)
1591 def _got_parent_or_node( (parent_or_node, childname) ):
1592 if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
1593 (parent_or_node, childname, pathstring), level=NOISY)
1594 if childname is None:
1595 return parent_or_node
1597 return parent_or_node.get(childname)
1598 d.addCallback(_got_parent_or_node)
1600 if dirnode.is_unknown():
1601 raise SFTPError(FX_PERMISSION_DENIED,
1602 "cannot list an unknown cap as a directory. Upgrading the gateway "
1603 "to a later Tahoe-LAFS version may help")
1604 if not IDirectoryNode.providedBy(dirnode):
1605 raise SFTPError(FX_PERMISSION_DENIED,
1606 "cannot list a file as if it were a directory")
1609 def _render(children):
1610 parent_readonly = dirnode.is_readonly()
1612 for filename, (child, metadata) in children.iteritems():
1613 # The file size may be cached or absent.
1614 metadata['no-write'] = _no_write(parent_readonly, child, metadata)
1615 attrs = _populate_attrs(child, metadata)
1616 filename_utf8 = filename.encode('utf-8')
1617 longname = _lsLine(filename_utf8, attrs)
1618 results.append( (filename_utf8, longname, attrs) )
1619 return StoppableList(results)
1620 d2.addCallback(_render)
1622 d.addCallback(_list)
1623 d.addBoth(_convert_error, request)
1626 def getAttrs(self, pathstring, followLinks):
1627 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1628 self.log(request, level=OPERATIONAL)
1630 # When asked about a specific file, report its current size.
1631 # TODO: the modification time for a mutable file should be
1632 # reported as the update time of the best version. But that
1633 # information isn't currently stored in mutable shares, I think.
1635 path = self._path_from_string(pathstring)
1636 userpath = self._path_to_utf8(path)
1637 d = self._get_parent_or_node(path)
1638 def _got_parent_or_node( (parent_or_node, childname) ):
1639 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1641 # Some clients will incorrectly try to get the attributes
1642 # of a file immediately after opening it, before it has been put
1643 # into the all_heisenfiles table. This is a race condition bug in
1644 # the client, but we handle it anyway by calling .sync() on all
1645 # files matching either the path or the direntry.
1647 direntry = _direntry_for(parent_or_node, childname)
1648 d2 = self._sync_heisenfiles(userpath, direntry)
1650 if childname is None:
1651 node = parent_or_node
1652 d2.addCallback(lambda ign: node.get_current_size())
1653 d2.addCallback(lambda size:
1654 _populate_attrs(node, {'no-write': node.is_unknown() or node.is_readonly()}, size=size))
1656 parent = parent_or_node
1657 d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
1658 def _got( (child, metadata) ):
1659 if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
1660 assert IDirectoryNode.providedBy(parent), parent
1661 metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata)
1662 d3 = child.get_current_size()
1663 d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size))
1666 if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY)
1667 err.trap(NoSuchChildError)
1668 if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" %
1669 (self._heisenfiles, all_heisenfiles, direntry), level=NOISY)
1670 if direntry in all_heisenfiles:
1671 files = all_heisenfiles[direntry]
1672 if len(files) == 0: # pragma: no cover
1674 # use the heisenfile that was most recently opened
1675 return files[-1].getAttrs()
1677 d2.addCallbacks(_got, _nosuch)
1679 d.addCallback(_got_parent_or_node)
1680 d.addBoth(_convert_error, request)
1683 def setAttrs(self, pathstring, attrs):
1684 request = ".setAttrs(%r, %r)" % (pathstring, attrs)
1685 self.log(request, level=OPERATIONAL)
1688 # this would require us to download and re-upload the truncated/extended
1690 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
1691 return defer.execute(_unsupported)
1693 path = self._path_from_string(pathstring)
1694 userpath = self._path_to_utf8(path)
1695 d = self._get_parent_or_node(path)
1696 def _got_parent_or_node( (parent_or_node, childname) ):
1697 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1699 direntry = _direntry_for(parent_or_node, childname)
1700 d2 = self._update_attrs_for_heisenfiles(userpath, direntry, attrs)
1702 def _update(updated_heisenfiles):
1703 if childname is None:
1704 if updated_heisenfiles:
1706 raise SFTPError(FX_NO_SUCH_FILE, userpath)
1708 desired_metadata = _attrs_to_metadata(attrs)
1709 if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
1711 return parent_or_node.set_metadata_for(childname, desired_metadata)
1712 d2.addCallback(_update)
1713 d2.addCallback(lambda ign: None)
1715 d.addCallback(_got_parent_or_node)
1716 d.addBoth(_convert_error, request)
1719 def readLink(self, pathstring):
1720 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1722 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1723 return defer.execute(_unsupported)
1725 def makeLink(self, linkPathstring, targetPathstring):
1726 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1728 # If this is implemented, note the reversal of arguments described in point 7 of
1729 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
1731 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1732 return defer.execute(_unsupported)
1734 def extendedRequest(self, extensionName, extensionData):
1735 self.log(".extendedRequest(%r, <data of length %r>)" % (extensionName, len(extensionData)), level=OPERATIONAL)
1737 # We implement the three main OpenSSH SFTP extensions; see
1738 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1740 if extensionName == 'posix-rename@openssh.com':
1741 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
1743 (fromPathLen,) = struct.unpack('>L', extensionData[0:4])
1744 if 8 + fromPathLen > len(extensionData): return defer.execute(_bad)
1746 (toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)])
1747 if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad)
1749 fromPathstring = extensionData[4:(4 + fromPathLen)]
1750 toPathstring = extensionData[(8 + fromPathLen):]
1751 d = self.renameFile(fromPathstring, toPathstring, overwrite=True)
1753 # Twisted conch assumes that the response from an extended request is either
1754 # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
1755 # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
1756 def _succeeded(ign):
1757 raise SFTPError(FX_OK, "request succeeded")
1758 d.addCallback(_succeeded)
1761 if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
1762 # f_bsize and f_frsize should be the same to avoid a bug in 'df'
1763 return defer.succeed(struct.pack('>11Q',
1764 1024, # uint64 f_bsize /* file system block size */
1765 1024, # uint64 f_frsize /* fundamental fs block size */
1766 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1767 314159265, # uint64 f_bfree /* free blocks in file system */
1768 314159265, # uint64 f_bavail /* free blocks for non-root */
1769 200000000, # uint64 f_files /* total file inodes */
1770 100000000, # uint64 f_ffree /* free file inodes */
1771 100000000, # uint64 f_favail /* free file inodes for non-root */
1772 0x1AF5, # uint64 f_fsid /* file system id */
1773 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1774 65535, # uint64 f_namemax /* maximum filename length */
1777 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
1778 (extensionName, len(extensionData)))
1779 return defer.execute(_unsupported)
1781 def realPath(self, pathstring):
1782 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1784 return self._path_to_utf8(self._path_from_string(pathstring))
1786 def _path_to_utf8(self, path):
1787 return (u"/" + u"/".join(path)).encode('utf-8')
1789 def _path_from_string(self, pathstring):
1790 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1792 # The home directory is the root directory.
1793 pathstring = pathstring.strip("/")
1794 if pathstring == "" or pathstring == ".":
1797 path_utf8 = pathstring.split("/")
1799 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1800 # "Servers SHOULD interpret a path name component ".." as referring to
1801 # the parent directory, and "." as referring to the current directory."
1803 for p_utf8 in path_utf8:
1805 # ignore excess .. components at the root
1810 p = p_utf8.decode('utf-8', 'strict')
1811 except UnicodeError:
1812 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1815 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1818 def _get_root(self, path):
1819 # return Deferred (root, remaining_path)
1820 d = defer.succeed(None)
1821 if path and path[0] == u"uri":
1822 d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8')))
1823 d.addCallback(lambda root: (root, path[2:]))
1825 d.addCallback(lambda ign: (self._root, path))
1828 def _get_parent_or_node(self, path):
1829 # return Deferred (parent, childname) or (node, None)
1830 d = self._get_root(path)
1831 def _got_root( (root, remaining_path) ):
1832 if not remaining_path:
1835 d2 = root.get_child_at_path(remaining_path[:-1])
1836 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1838 d.addCallback(_got_root)
1842 class FakeTransport:
1843 implements(ITransport)
1844 def write(self, data):
1845 logmsg("FakeTransport.write(<data of length %r>)" % (len(data),), level=NOISY)
1847 def writeSequence(self, data):
1848 logmsg("FakeTransport.writeSequence(...)", level=NOISY)
1850 def loseConnection(self):
1851 logmsg("FakeTransport.loseConnection()", level=NOISY)
1853 # getPeer and getHost can just raise errors, since we don't know what to return
1856 class ShellSession(PrefixingLogMixin):
1857 implements(ISession)
1858 def __init__(self, userHandler):
1859 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1860 if noisy: self.log(".__init__(%r)" % (userHandler), level=NOISY)
1862 def getPty(self, terminal, windowSize, attrs):
1863 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1865 def openShell(self, protocol):
1866 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1867 if hasattr(protocol, 'transport') and protocol.transport is None:
1868 protocol.transport = FakeTransport() # work around Twisted bug
1870 d = defer.succeed(None)
1871 d.addCallback(lambda ign: protocol.write("This server supports only SFTP, not shell sessions.\n"))
1872 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1875 def execCommand(self, protocol, cmd):
1876 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1877 if hasattr(protocol, 'transport') and protocol.transport is None:
1878 protocol.transport = FakeTransport() # work around Twisted bug
1880 d = defer.succeed(None)
1881 if cmd == "df -P -k /":
1882 d.addCallback(lambda ign: protocol.write(
1883 "Filesystem 1024-blocks Used Available Capacity Mounted on\n"
1884 "tahoe 628318530 314159265 314159265 50% /\n"))
1885 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessDone(None))))
1887 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1890 def windowChanged(self, newWindowSize):
1891 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1893 def eofReceived(self):
1894 self.log(".eofReceived()", level=OPERATIONAL)
1897 self.log(".closed()", level=OPERATIONAL)
1900 # If you have an SFTPUserHandler and want something that provides ISession, you get
1901 # ShellSession(userHandler).
1902 # We use adaptation because this must be a different object to the SFTPUserHandler.
1903 components.registerAdapter(ShellSession, SFTPUserHandler, ISession)
1906 from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1909 implements(portal.IRealm)
1910 def __init__(self, client):
1911 self._client = client
1913 def requestAvatar(self, avatarID, mind, interface):
1914 assert interface == IConchUser, interface
1915 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1916 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1917 return (interface, handler, handler.logout)
1920 class SFTPServer(service.MultiService):
1921 def __init__(self, client, accountfile, accounturl,
1922 sftp_portstr, pubkey_file, privkey_file):
1923 service.MultiService.__init__(self)
1925 r = Dispatcher(client)
1926 p = portal.Portal(r)
1929 c = AccountFileChecker(self, accountfile)
1930 p.registerChecker(c)
1932 c = AccountURLChecker(self, accounturl)
1933 p.registerChecker(c)
1934 if not accountfile and not accounturl:
1935 # we could leave this anonymous, with just the /uri/CAP form
1936 raise NeedRootcapLookupScheme("must provide an account file or URL")
1938 pubkey = keys.Key.fromFile(pubkey_file)
1939 privkey = keys.Key.fromFile(privkey_file)
1940 class SSHFactory(factory.SSHFactory):
1941 publicKeys = {pubkey.sshType(): pubkey}
1942 privateKeys = {privkey.sshType(): privkey}
1943 def getPrimes(self):
1945 # if present, this enables diffie-hellman-group-exchange
1946 return primes.parseModuliFile("/etc/ssh/moduli")
1953 s = strports.service(sftp_portstr, f)
1954 s.setServiceParent(self)