2 import os, tempfile, heapq, binascii, traceback, array, stat, struct
3 from types import NoneType
4 from stat import S_IFREG, S_IFDIR
5 from time import time, strftime, localtime
7 from zope.interface import implements
8 from twisted.python import components
9 from twisted.application import service, strports
10 from twisted.conch.ssh import factory, keys, session
11 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
12 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
13 FX_BAD_MESSAGE, FX_FAILURE, FX_OK
14 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
15 FXF_CREAT, FXF_TRUNC, FXF_EXCL
16 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
17 from twisted.conch.avatar import ConchUser
18 from twisted.conch.openssh_compat import primes
19 from twisted.cred import portal
20 from twisted.internet.error import ProcessDone, ProcessTerminated
21 from twisted.python.failure import Failure
22 from twisted.internet.interfaces import ITransport
24 from twisted.internet import defer
25 from twisted.internet.interfaces import IFinishableConsumer
26 from foolscap.api import eventually
27 from allmydata.util import deferredutil
29 from allmydata.util.consumer import download_to_data
30 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
31 NoSuchChildError, ChildOfWrongTypeError
32 from allmydata.mutable.common import NotWriteableError
33 from allmydata.immutable.upload import FileHandle
34 from allmydata.dirnode import update_metadata
36 from pycryptopp.cipher.aes import AES
39 use_foolscap_logging = True
41 from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
42 msg as _msg, err as _err, PrefixingLogMixin as _PrefixingLogMixin
44 if use_foolscap_logging:
45 (logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
46 else: # pragma: no cover
47 def logmsg(s, level=None):
49 def logerr(s, level=None):
51 class PrefixingLogMixin:
52 def __init__(self, facility=None, prefix=''):
54 def log(self, s, level=None):
55 print "%r %s" % (self.prefix, s)
58 def eventually_callback(d):
59 return lambda res: eventually(d.callback, res)
61 def eventually_errback(d):
62 return lambda err: eventually(d.errback, err)
66 if isinstance(x, unicode):
67 return x.encode('utf-8')
68 if isinstance(x, str):
74 """SFTP times are unsigned 32-bit integers representing UTC seconds
75 (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
76 A Tahoe time is the corresponding float."""
77 return long(t) & 0xFFFFFFFFL
80 def _convert_error(res, request):
81 if not isinstance(res, Failure):
83 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
84 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
88 logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL)
90 if noisy: logmsg(traceback.format_exc(err.value), level=NOISY)
91 except: # pragma: no cover
94 # The message argument to SFTPError must not reveal information that
95 # might compromise anonymity.
97 if err.check(SFTPError):
98 # original raiser of SFTPError has responsibility to ensure anonymity
100 if err.check(NoSuchChildError):
101 childname = _utf8(err.value.args[0])
102 raise SFTPError(FX_NO_SUCH_FILE, childname)
103 if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
104 msg = _utf8(err.value.args[0])
105 raise SFTPError(FX_PERMISSION_DENIED, msg)
106 if err.check(ExistingChildError):
107 # Versions of SFTP after v3 (which is what twisted.conch implements)
108 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
109 # However v3 doesn't; instead, other servers such as sshd return
110 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
111 # to translate the error to the equivalent of POSIX EEXIST, which is
112 # necessary for some picky programs (such as gedit).
113 msg = _utf8(err.value.args[0])
114 raise SFTPError(FX_FAILURE, msg)
115 if err.check(NotImplementedError):
116 raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
117 if err.check(EOFError):
118 raise SFTPError(FX_EOF, "end of file reached")
119 if err.check(defer.FirstError):
120 _convert_error(err.value.subFailure, request)
122 # We assume that the error message is not anonymity-sensitive.
123 raise SFTPError(FX_FAILURE, _utf8(err.value))
126 def _repr_flags(flags):
127 return "|".join([f for f in
128 [(flags & FXF_READ) and "FXF_READ" or None,
129 (flags & FXF_WRITE) and "FXF_WRITE" or None,
130 (flags & FXF_APPEND) and "FXF_APPEND" or None,
131 (flags & FXF_CREAT) and "FXF_CREAT" or None,
132 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
133 (flags & FXF_EXCL) and "FXF_EXCL" or None,
138 def _lsLine(name, attrs):
141 st_mtime = attrs.get("mtime", 0)
142 st_mode = attrs["permissions"]
143 # TODO: check that clients are okay with this being a "?".
144 # (They should be because the longname is intended for human
146 st_size = attrs.get("size", "?")
147 # We don't know how many links there really are to this object.
150 # Based on <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
151 # We can't call the version in Twisted because we might have a version earlier than
152 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
155 perms = array.array('c', '-'*10)
156 ft = stat.S_IFMT(mode)
157 if stat.S_ISDIR(ft): perms[0] = 'd'
158 elif stat.S_ISREG(ft): perms[0] = '-'
161 if mode&stat.S_IRUSR: perms[1] = 'r'
162 if mode&stat.S_IWUSR: perms[2] = 'w'
163 if mode&stat.S_IXUSR: perms[3] = 'x'
165 if mode&stat.S_IRGRP: perms[4] = 'r'
166 if mode&stat.S_IWGRP: perms[5] = 'w'
167 if mode&stat.S_IXGRP: perms[6] = 'x'
169 if mode&stat.S_IROTH: perms[7] = 'r'
170 if mode&stat.S_IWOTH: perms[8] = 'w'
171 if mode&stat.S_IXOTH: perms[9] = 'x'
172 # suid/sgid never set
175 l += str(st_nlink).rjust(5) + ' '
186 if st_mtime + sixmo < now or st_mtime > now + day:
187 # mtime is more than 6 months ago, or more than one day in the future
188 l += strftime("%b %d %Y ", localtime(st_mtime))
190 l += strftime("%b %d %H:%M ", localtime(st_mtime))
195 def _no_write(parent_readonly, child, metadata=None):
196 """Whether child should be listed as having read-only permissions in parent."""
198 if child.is_unknown():
200 elif child.is_mutable():
201 return child.is_readonly()
202 elif parent_readonly or IDirectoryNode.providedBy(child):
205 return metadata is not None and metadata.get('no-write', False)
208 def _populate_attrs(childnode, metadata, size=None):
211 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
212 # bits, otherwise the client may refuse to open a directory.
213 # Also, sshfs run as a non-root user requires files and directories
214 # to be world-readable/writeable.
215 # It is important that we never set the executable bits on files.
217 # Directories and unknown nodes have no size, and SFTP doesn't
218 # require us to make one up.
220 # childnode might be None, meaning that the file doesn't exist yet,
221 # but we're going to write it later.
223 if childnode and childnode.is_unknown():
225 elif childnode and IDirectoryNode.providedBy(childnode):
226 perms = S_IFDIR | 0777
228 # For files, omit the size if we don't immediately know it.
229 if childnode and size is None:
230 size = childnode.get_size()
232 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
234 perms = S_IFREG | 0666
237 if metadata.get('no-write', False):
238 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
240 # See webapi.txt for what these times mean.
241 # We would prefer to omit atime, but SFTP version 3 can only
242 # accept mtime if atime is also set.
243 if 'linkmotime' in metadata.get('tahoe', {}):
244 attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['tahoe']['linkmotime'])
245 elif 'mtime' in metadata:
246 attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['mtime'])
248 if 'linkcrtime' in metadata.get('tahoe', {}):
249 attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime'])
251 attrs['permissions'] = perms
253 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
254 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
259 def _attrs_to_metadata(attrs):
263 if key == "mtime" or key == "ctime" or key == "createtime":
264 metadata[key] = long(attrs[key])
265 elif key.startswith("ext_"):
266 metadata[key] = str(attrs[key])
268 perms = attrs.get('permissions', stat.S_IWUSR)
269 if not (perms & stat.S_IWUSR):
270 metadata['no-write'] = True
275 def _direntry_for(filenode_or_parent, childname, filenode=None):
276 assert isinstance(childname, (unicode, NoneType)), childname
278 if childname is None:
279 filenode_or_parent = filenode
281 if filenode_or_parent:
282 rw_uri = filenode_or_parent.get_write_uri()
283 if rw_uri and childname:
284 return rw_uri + "/" + childname.encode('utf-8')
291 class EncryptedTemporaryFile(PrefixingLogMixin):
292 # not implemented: next, readline, readlines, xreadlines, writelines
295 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
296 self.file = tempfile.TemporaryFile()
297 self.key = os.urandom(16) # AES-128
299 def _crypt(self, offset, data):
300 # TODO: use random-access AES (pycryptopp ticket #18)
301 offset_big = offset // 16
302 offset_small = offset % 16
303 iv = binascii.unhexlify("%032x" % offset_big)
304 cipher = AES(self.key, iv=iv)
305 cipher.process("\x00"*offset_small)
306 return cipher.process(data)
314 def seek(self, offset, whence=0): # 0 = SEEK_SET
315 if noisy: self.log(".seek(%r, %r)" % (offset, whence), level=NOISY)
316 self.file.seek(offset, whence)
319 offset = self.file.tell()
320 if noisy: self.log(".tell() = %r" % (offset,), level=NOISY)
323 def read(self, size=-1):
324 if noisy: self.log(".read(%r)" % (size,), level=NOISY)
325 index = self.file.tell()
326 ciphertext = self.file.read(size)
327 plaintext = self._crypt(index, ciphertext)
330 def write(self, plaintext):
331 if noisy: self.log(".write(<data of length %r>)" % (len(plaintext),), level=NOISY)
332 index = self.file.tell()
333 ciphertext = self._crypt(index, plaintext)
334 self.file.write(ciphertext)
336 def truncate(self, newsize):
337 if noisy: self.log(".truncate(%r)" % (newsize,), level=NOISY)
338 self.file.truncate(newsize)
341 class OverwriteableFileConsumer(PrefixingLogMixin):
342 implements(IFinishableConsumer)
343 """I act both as a consumer for the download of the original file contents, and as a
344 wrapper for a temporary file that records the downloaded data and any overwrites.
345 I use a priority queue to keep track of which regions of the file have been overwritten
346 but not yet downloaded, so that the download does not clobber overwritten data.
347 I use another priority queue to record milestones at which to make callbacks
348 indicating that a given number of bytes have been downloaded.
350 The temporary file reflects the contents of the file that I represent, except that:
351 - regions that have neither been downloaded nor overwritten, if present,
353 - the temporary file may be shorter than the represented file (it is never longer).
354 The latter's current size is stored in self.current_size.
356 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
357 useful for other frontends."""
359 def __init__(self, download_size, tempfile_maker):
360 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
361 if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY)
362 self.download_size = download_size
363 self.current_size = download_size
364 self.f = tempfile_maker()
366 self.milestones = [] # empty heap of (offset, d)
367 self.overwrites = [] # empty heap of (start, end)
368 self.is_closed = False
369 self.done = self.when_reached(download_size) # adds a milestone
371 def _signal_done(ign):
372 if noisy: self.log("DONE", level=NOISY)
374 self.done.addCallback(_signal_done)
380 def get_current_size(self):
381 return self.current_size
383 def set_current_size(self, size):
384 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
385 (size, self.current_size, self.downloaded), level=NOISY)
386 if size < self.current_size or size < self.downloaded:
387 self.f.truncate(size)
388 if size > self.current_size:
389 self.overwrite(self.current_size, "\x00" * (size - self.current_size))
390 self.current_size = size
392 # invariant: self.download_size <= self.current_size
393 if size < self.download_size:
394 self.download_size = size
395 if self.downloaded >= self.download_size:
398 def registerProducer(self, p, streaming):
399 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
402 # call resumeProducing once to start things off
411 def write(self, data):
412 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
416 if self.downloaded >= self.download_size:
419 next_downloaded = self.downloaded + len(data)
420 if next_downloaded > self.download_size:
421 data = data[:(self.download_size - self.downloaded)]
423 while len(self.overwrites) > 0:
424 (start, end) = self.overwrites[0]
425 if start >= next_downloaded:
426 # This and all remaining overwrites are after the data we just downloaded.
428 if start > self.downloaded:
429 # The data we just downloaded has been partially overwritten.
430 # Write the prefix of it that precedes the overwritten region.
431 self.f.seek(self.downloaded)
432 self.f.write(data[:(start - self.downloaded)])
434 # This merges consecutive overwrites if possible, which allows us to detect the
435 # case where the download can be stopped early because the remaining region
436 # to download has already been fully overwritten.
437 heapq.heappop(self.overwrites)
438 while len(self.overwrites) > 0:
439 (start1, end1) = self.overwrites[0]
443 heapq.heappop(self.overwrites)
445 if end >= next_downloaded:
446 # This overwrite extends past the downloaded data, so there is no
447 # more data to consider on this call.
448 heapq.heappush(self.overwrites, (next_downloaded, end))
449 self._update_downloaded(next_downloaded)
451 elif end >= self.downloaded:
452 data = data[(end - self.downloaded):]
453 self._update_downloaded(end)
455 self.f.seek(self.downloaded)
457 self._update_downloaded(next_downloaded)
459 def _update_downloaded(self, new_downloaded):
460 self.downloaded = new_downloaded
461 milestone = new_downloaded
462 if len(self.overwrites) > 0:
463 (start, end) = self.overwrites[0]
464 if start <= new_downloaded and end > milestone:
467 while len(self.milestones) > 0:
468 (next, d) = self.milestones[0]
471 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
472 heapq.heappop(self.milestones)
473 eventually_callback(d)(None)
475 if milestone >= self.download_size:
478 def overwrite(self, offset, data):
479 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
480 if offset > self.current_size:
481 # Normally writing at an offset beyond the current end-of-file
482 # would leave a hole that appears filled with zeroes. However, an
483 # EncryptedTemporaryFile doesn't behave like that (if there is a
484 # hole in the file on disk, the zeroes that are read back will be
485 # XORed with the keystream). So we must explicitly write zeroes in
486 # the gap between the current EOF and the offset.
488 self.f.seek(self.current_size)
489 self.f.write("\x00" * (offset - self.current_size))
490 start = self.current_size
496 end = offset + len(data)
497 self.current_size = max(self.current_size, end)
498 if end > self.downloaded:
499 heapq.heappush(self.overwrites, (start, end))
501 def read(self, offset, length):
502 """When the data has been read, callback the Deferred that we return with this data.
503 Otherwise errback the Deferred that we return.
504 The caller must perform no more overwrites until the Deferred has fired."""
506 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
507 if offset >= self.current_size:
508 def _eof(): raise EOFError("read past end of file")
509 return defer.execute(_eof)
511 if offset + length > self.current_size:
512 length = self.current_size - offset
513 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
515 needed = min(offset + length, self.download_size)
516 d = self.when_reached(needed)
518 # It is not necessarily the case that self.downloaded >= needed, because
519 # the file might have been truncated (thus truncating the download) and
522 assert self.current_size >= offset + length, (self.current_size, offset, length)
523 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
525 return self.f.read(length)
526 d.addCallback(_reached)
529 def when_reached(self, index):
530 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
531 if index <= self.downloaded: # already reached
532 if noisy: self.log("already reached %r" % (index,), level=NOISY)
533 return defer.succeed(None)
536 if noisy: self.log("reached %r" % (index,), level=NOISY)
538 d.addCallback(_reached)
539 heapq.heappush(self.milestones, (index, d))
546 while len(self.milestones) > 0:
547 (next, d) = self.milestones[0]
548 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
549 heapq.heappop(self.milestones)
550 # The callback means that the milestone has been reached if
551 # it is ever going to be. Note that the file may have been
552 # truncated to before the milestone.
553 eventually_callback(d)(None)
556 if not self.is_closed:
557 self.is_closed = True
560 except BaseException, e:
561 self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
564 def unregisterProducer(self):
568 SIZE_THRESHOLD = 1000
571 class ShortReadOnlySFTPFile(PrefixingLogMixin):
572 implements(ISFTPFile)
573 """I represent a file handle to a particular file on an SFTP connection.
574 I am used only for short immutable files opened in read-only mode.
575 The file contents are downloaded to memory when I am created."""
577 def __init__(self, userpath, filenode, metadata):
578 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
579 if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
581 assert isinstance(userpath, str) and IFileNode.providedBy(filenode), (userpath, filenode)
582 self.filenode = filenode
583 self.metadata = metadata
584 self.async = download_to_data(filenode)
587 def readChunk(self, offset, length):
588 request = ".readChunk(%r, %r)" % (offset, length)
589 self.log(request, level=OPERATIONAL)
592 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
593 return defer.execute(_closed)
597 if noisy: self.log("_read(<data of length %r>) in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY)
599 # "In response to this request, the server will read as many bytes as it
600 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
601 # message. If an error occurs or EOF is encountered before reading any
602 # data, the server will respond with SSH_FXP_STATUS. For normal disk
603 # files, it is guaranteed that this will read the specified number of
604 # bytes, or up to end of file."
606 # i.e. we respond with an EOF error iff offset is already at EOF.
608 if offset >= len(data):
609 eventually_errback(d)(SFTPError(FX_EOF, "read at or past end of file"))
611 eventually_callback(d)(data[offset:min(offset+length, len(data))])
613 self.async.addCallbacks(_read, eventually_errback(d))
614 d.addBoth(_convert_error, request)
617 def writeChunk(self, offset, data):
618 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
620 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
621 return defer.execute(_denied)
624 self.log(".close()", level=OPERATIONAL)
627 return defer.succeed(None)
630 request = ".getAttrs()"
631 self.log(request, level=OPERATIONAL)
634 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
635 return defer.execute(_closed)
637 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
638 d.addBoth(_convert_error, request)
641 def setAttrs(self, attrs):
642 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
643 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
644 return defer.execute(_denied)
647 class GeneralSFTPFile(PrefixingLogMixin):
648 implements(ISFTPFile)
649 """I represent a file handle to a particular file on an SFTP connection.
650 I wrap an instance of OverwriteableFileConsumer, which is responsible for
651 storing the file contents. In order to allow write requests to be satisfied
652 immediately, there is effectively a FIFO queue between requests made to this
653 file handle, and requests to my OverwriteableFileConsumer. This queue is
654 implemented by the callback chain of self.async.
656 When first constructed, I am in an 'unopened' state that causes most
657 operations to be delayed until 'open' is called."""
659 def __init__(self, userpath, flags, close_notify, convergence):
660 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
661 if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
662 (userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
664 assert isinstance(userpath, str), userpath
665 self.userpath = userpath
667 self.close_notify = close_notify
668 self.convergence = convergence
669 self.async = defer.Deferred()
670 # Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created.
671 self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL)
673 self.abandoned = False
675 self.childname = None
679 # self.consumer should only be relied on in callbacks for self.async, since it might
680 # not be set before then.
683 def open(self, parent=None, childname=None, filenode=None, metadata=None):
684 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
685 (parent, childname, filenode, metadata), level=OPERATIONAL)
687 assert isinstance(childname, (unicode, NoneType)), childname
688 # If the file has been renamed, the new (parent, childname) takes precedence.
689 if self.parent is None:
691 if self.childname is None:
692 self.childname = childname
693 self.filenode = filenode
694 self.metadata = metadata
696 assert not self.closed, self
697 tempfile_maker = EncryptedTemporaryFile
699 if (self.flags & FXF_TRUNC) or not filenode:
700 # We're either truncating or creating the file, so we don't need the old contents.
701 self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
702 self.consumer.finish()
704 assert IFileNode.providedBy(filenode), filenode
706 # TODO: use download interface described in #993 when implemented.
707 if filenode.is_mutable():
708 self.async.addCallback(lambda ign: filenode.download_best_version())
709 def _downloaded(data):
710 self.consumer = OverwriteableFileConsumer(len(data), tempfile_maker)
711 self.consumer.write(data)
712 self.consumer.finish()
714 self.async.addCallback(_downloaded)
716 download_size = filenode.get_size()
717 assert download_size is not None, "download_size is None"
718 self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker)
720 if noisy: self.log("_read immutable", level=NOISY)
721 filenode.read(self.consumer, 0, None)
722 self.async.addCallback(_read)
724 eventually_callback(self.async)(None)
726 if noisy: self.log("open done", level=NOISY)
729 def get_userpath(self):
732 def get_direntry(self):
733 return _direntry_for(self.parent, self.childname)
735 def rename(self, new_userpath, new_parent, new_childname):
736 self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
738 assert isinstance(new_userpath, str) and isinstance(new_childname, unicode), (new_userpath, new_childname)
739 self.userpath = new_userpath
740 self.parent = new_parent
741 self.childname = new_childname
744 self.log(".abandon()", level=OPERATIONAL)
746 self.abandoned = True
748 def sync(self, ign=None):
749 # The ign argument allows some_file.sync to be used as a callback.
750 self.log(".sync()", level=OPERATIONAL)
753 self.async.addBoth(eventually_callback(d))
755 if noisy: self.log("_done(%r) in .sync()" % (res,), level=NOISY)
760 def readChunk(self, offset, length):
761 request = ".readChunk(%r, %r)" % (offset, length)
762 self.log(request, level=OPERATIONAL)
764 if not (self.flags & FXF_READ):
765 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
766 return defer.execute(_denied)
769 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
770 return defer.execute(_closed)
774 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
775 d2 = self.consumer.read(offset, length)
776 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
777 # It is correct to drop d2 here.
779 self.async.addCallbacks(_read, eventually_errback(d))
780 d.addBoth(_convert_error, request)
783 def writeChunk(self, offset, data):
784 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
786 if not (self.flags & FXF_WRITE):
787 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
788 return defer.execute(_denied)
791 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
792 return defer.execute(_closed)
794 self.has_changed = True
796 # Note that we return without waiting for the write to occur. Reads and
797 # close wait for prior writes, and will fail if any prior operation failed.
798 # This is ok because SFTP makes no guarantee that the write completes
799 # before the request does. In fact it explicitly allows write errors to be
800 # delayed until close:
801 # "One should note that on some server platforms even a close can fail.
802 # This can happen e.g. if the server operating system caches writes,
803 # and an error occurs while flushing cached writes during the close."
806 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
807 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
808 # FXF_APPEND means that we should always write at the current end of file.
809 write_offset = offset
810 if self.flags & FXF_APPEND:
811 write_offset = self.consumer.get_current_size()
813 self.consumer.overwrite(write_offset, data)
814 if noisy: self.log("overwrite done", level=NOISY)
816 self.async.addCallback(_write)
817 # don't addErrback to self.async, just allow subsequent async ops to fail.
818 return defer.succeed(None)
822 self.log(request, level=OPERATIONAL)
825 return defer.succeed(None)
827 # This means that close has been called, not that the close has succeeded.
830 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
831 def _readonly_close():
833 self.consumer.close()
834 return defer.execute(_readonly_close)
836 # We must capture the abandoned, parent, and childname variables synchronously
837 # at the close call. This is needed by the correctness arguments in the comments
838 # for _abandon_any_heisenfiles and _rename_heisenfiles.
839 # Note that the file must have been opened before it can be closed.
840 abandoned = self.abandoned
842 childname = self.childname
844 # has_changed is set when writeChunk is called, not when the write occurs, so
845 # it is correct to optimize out the commit if it is False at the close call.
846 has_changed = self.has_changed
849 if noisy: self.log("_committed(%r)" % (res,), level=NOISY)
851 self.consumer.close()
853 # We must close_notify before re-firing self.async.
854 if self.close_notify:
855 self.close_notify(self.userpath, self.parent, self.childname, self)
859 d2 = self.consumer.when_done()
860 if self.filenode and self.filenode.is_mutable():
861 self.log("update mutable file %r childname=%r metadata=%r" % (self.filenode, childname, self.metadata), level=OPERATIONAL)
862 if self.metadata.get('no-write', False) and not self.filenode.is_readonly():
863 assert parent and childname, (parent, childname, self.metadata)
864 d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata))
866 d2.addCallback(lambda ign: self.consumer.get_current_size())
867 d2.addCallback(lambda size: self.consumer.read(0, size))
868 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
871 self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL)
872 u = FileHandle(self.consumer.get_file(), self.convergence)
873 return parent.add_file(childname, u, metadata=self.metadata)
874 d2.addCallback(_add_file)
876 d2.addBoth(_committed)
881 # If the file has been abandoned, we don't want the close operation to get "stuck",
882 # even if self.async fails to re-fire. Doing the close independently of self.async
883 # in that case ensures that dropping an ssh connection is sufficient to abandon
884 # any heisenfiles that were not explicitly closed in that connection.
885 if abandoned or not has_changed:
886 d.addCallback(_committed)
888 self.async.addCallback(_close)
890 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
891 d.addBoth(_convert_error, request)
895 request = ".getAttrs()"
896 self.log(request, level=OPERATIONAL)
899 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
900 return defer.execute(_closed)
902 # Optimization for read-only handles, when we already know the metadata.
903 if not (self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
904 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
908 if noisy: self.log("_get(%r) in %r, filenode = %r, metadata = %r" % (ign, request, self.filenode, self.metadata), level=NOISY)
910 # self.filenode might be None, but that's ok.
911 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
912 eventually_callback(d)(attrs)
914 self.async.addCallbacks(_get, eventually_errback(d))
915 d.addBoth(_convert_error, request)
918 def setAttrs(self, attrs, only_if_at=None):
919 request = ".setAttrs(%r, only_if_at=%r)" % (attrs, only_if_at)
920 self.log(request, level=OPERATIONAL)
922 if not (self.flags & FXF_WRITE):
923 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
924 return defer.execute(_denied)
927 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
928 return defer.execute(_closed)
930 size = attrs.get("size", None)
931 if size is not None and (not isinstance(size, (int, long)) or size < 0):
932 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
933 return defer.execute(_bad)
937 if noisy: self.log("_set(%r) in %r" % (ign, request), level=NOISY)
938 current_direntry = _direntry_for(self.parent, self.childname, self.filenode)
939 if only_if_at and only_if_at != current_direntry:
940 if noisy: self.log("not setting attributes: current_direntry=%r in %r" %
941 (current_direntry, request), level=NOISY)
945 self.metadata = update_metadata(self.metadata, _attrs_to_metadata(attrs), now)
947 # TODO: should we refuse to truncate a file opened with FXF_APPEND?
948 # <http://allmydata.org/trac/tahoe-lafs/ticket/1037#comment:20>
949 self.consumer.set_current_size(size)
950 eventually_callback(d)(None)
952 self.async.addCallbacks(_set, eventually_errback(d))
953 d.addBoth(_convert_error, request)
958 def __init__(self, items):
968 def __init__(self, value):
972 # A "heisenfile" is a file that has been opened with write flags
973 # (FXF_WRITE and/or FXF_CREAT) and not yet close-notified.
974 # 'all_heisenfiles' maps from a direntry string to a list of
977 # A direntry string is parent_write_uri + "/" + childname_utf8 for
978 # an immutable file, or file_write_uri for a mutable file.
979 # Updates to this dict are single-threaded.
984 global all_heisenfiles
987 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
988 implements(ISFTPServer)
989 def __init__(self, client, rootnode, username):
990 ConchUser.__init__(self)
991 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username)
992 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
994 self.channelLookup["session"] = session.SSHSession
995 self.subsystemLookup["sftp"] = FileTransferServer
997 self._client = client
998 self._root = rootnode
999 self._username = username
1000 self._convergence = client.convergence
1002 # maps from UTF-8 paths for this user, to files written and still open
1003 self._heisenfiles = {}
1005 def gotVersion(self, otherVersion, extData):
1006 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
1008 # advertise the same extensions as the OpenSSH SFTP server
1009 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1010 return {'posix-rename@openssh.com': '1',
1011 'statvfs@openssh.com': '2',
1012 'fstatvfs@openssh.com': '2',
1016 self.log(".logout()", level=OPERATIONAL)
1018 for files in self._heisenfiles.itervalues():
1022 def _add_heisenfile_by_path(self, file):
1023 self.log("._add_heisenfile_by_path(%r)" % (file,), level=OPERATIONAL)
1025 userpath = file.get_userpath()
1026 if userpath in self._heisenfiles:
1027 self._heisenfiles[userpath] += [file]
1029 self._heisenfiles[userpath] = [file]
1031 def _add_heisenfile_by_direntry(self, file):
1032 self.log("._add_heisenfile_by_direntry(%r)" % (file,), level=OPERATIONAL)
1034 direntry = file.get_direntry()
1036 if direntry in all_heisenfiles:
1037 all_heisenfiles[direntry] += [file]
1039 all_heisenfiles[direntry] = [file]
1041 def _abandon_any_heisenfiles(self, userpath, direntry):
1042 request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
1043 self.log(request, level=OPERATIONAL)
1045 assert isinstance(userpath, str), userpath
1047 # First we synchronously mark all heisenfiles matching the userpath or direntry
1048 # as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
1049 # each file that we abandoned.
1051 # For each file, the call to .abandon() occurs:
1052 # * before the file is closed, in which case it will never be committed
1053 # (uploaded+linked or published); or
1054 # * after it is closed but before it has been close_notified, in which case the
1055 # .sync() ensures that it has been committed (successfully or not) before we
1058 # This avoids a race that might otherwise cause the file to be committed after
1059 # the remove operation has completed.
1061 # We return a Deferred that fires with True if any files were abandoned (this
1062 # does not mean that they were not committed; it is used to determine whether
1063 # a NoSuchChildError from the attempt to delete the file should be suppressed).
1066 if direntry in all_heisenfiles:
1067 files = all_heisenfiles[direntry]
1068 del all_heisenfiles[direntry]
1069 if userpath in self._heisenfiles:
1070 files += self._heisenfiles[userpath]
1071 del self._heisenfiles[userpath]
1073 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1078 d = defer.succeed(None)
1083 self.log("done %r" % (request,), level=OPERATIONAL)
1084 return len(files) > 0
1088 def _rename_heisenfiles(self, from_userpath, from_parent, from_childname,
1089 to_userpath, to_parent, to_childname, overwrite=True):
1090 request = ("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" %
1091 (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
1092 self.log(request, level=OPERATIONAL)
1094 assert (isinstance(from_userpath, str) and isinstance(from_childname, unicode) and
1095 isinstance(to_userpath, str) and isinstance(to_childname, unicode)), \
1096 (from_userpath, from_childname, to_userpath, to_childname)
1098 if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
1100 # First we synchronously rename all heisenfiles matching the userpath or direntry.
1101 # Then we .sync() each file that we renamed.
1103 # For each file, the call to .rename occurs:
1104 # * before the file is closed, in which case it will be committed at the
1106 # * after it is closed but before it has been close_notified, in which case the
1107 # .sync() ensures that it has been committed (successfully or not) before we
1110 # This avoids a race that might otherwise cause the file to be committed at the
1111 # old name after the rename operation has completed.
1113 # Note that if overwrite is False, the caller should already have checked
1114 # whether a real direntry exists at the destination. It is possible that another
1115 # direntry (heisen or real) comes to exist at the destination after that check,
1116 # but in that case it is correct for the rename to succeed (and for the commit
1117 # of the heisenfile at the destination to possibly clobber the other entry, since
1118 # that can happen anyway when we have concurrent write handles to the same direntry).
1120 # We return a Deferred that fires with True if any files were renamed (this
1121 # does not mean that they were not committed; it is used to determine whether
1122 # a NoSuchChildError from the rename attempt should be suppressed). If overwrite
1123 # is False and there were already heisenfiles at the destination userpath or
1124 # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
1126 from_direntry = _direntry_for(from_parent, from_childname)
1127 to_direntry = _direntry_for(to_parent, to_childname)
1129 if noisy: self.log("from_direntry = %r, to_direntry = %r, len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
1130 (from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
1132 if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
1133 def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1134 if noisy: self.log("existing", level=NOISY)
1135 return defer.execute(_existing)
1138 if from_direntry in all_heisenfiles:
1139 from_files = all_heisenfiles[from_direntry]
1140 del all_heisenfiles[from_direntry]
1141 if from_userpath in self._heisenfiles:
1142 from_files += self._heisenfiles[from_userpath]
1143 del self._heisenfiles[from_userpath]
1145 if noisy: self.log("from_files = %r in %r" % (from_files, request), level=NOISY)
1147 for f in from_files:
1148 f.rename(to_userpath, to_parent, to_childname)
1149 self._add_heisenfile_by_path(f)
1150 self._add_heisenfile_by_direntry(f)
1152 d = defer.succeed(None)
1153 for f in from_files:
1157 if noisy: self.log("done: len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
1158 (len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
1159 return len(from_files) > 0
1163 def _update_attrs_for_heisenfiles(self, userpath, direntry, attrs):
1164 request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
1165 self.log(request, level=OPERATIONAL)
1167 assert isinstance(userpath, str) and isinstance(direntry, str), (userpath, direntry)
1170 if direntry in all_heisenfiles:
1171 files = all_heisenfiles[direntry]
1172 if userpath in self._heisenfiles:
1173 files += self._heisenfiles[userpath]
1175 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1177 # We set the metadata for all heisenfiles at this path or direntry.
1178 # Since a direntry includes a write URI, we must have authority to
1179 # change the metadata of heisenfiles found in the all_heisenfiles dict.
1180 # However that's not necessarily the case for heisenfiles found by
1181 # path. Therefore we tell the setAttrs method of each file to only
1182 # perform the update if the file is at the correct direntry.
1184 d = defer.succeed(None)
1186 d.addBoth(f.setAttrs, attrs, only_if_at=direntry)
1189 self.log("done %r" % (request,), level=OPERATIONAL)
1190 # TODO: this should not return True if only_if_at caused all files to be skipped.
1191 return len(files) > 0
1195 def _sync_heisenfiles(self, userpath, direntry, ignore=None):
1196 request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
1197 self.log(request, level=OPERATIONAL)
1199 assert isinstance(userpath, str) and isinstance(direntry, (str, NoneType)), (userpath, direntry)
1202 if direntry in all_heisenfiles:
1203 files = all_heisenfiles[direntry]
1204 if userpath in self._heisenfiles:
1205 files += self._heisenfiles[userpath]
1207 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1209 d = defer.succeed(None)
1215 self.log("done %r" % (request,), level=OPERATIONAL)
1220 def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
1221 if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
1223 assert isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)), (userpath, childname)
1225 direntry = _direntry_for(parent, childname)
1226 if direntry in all_heisenfiles:
1227 all_old_files = all_heisenfiles[direntry]
1228 all_new_files = [f for f in all_old_files if f is not file_to_remove]
1229 if len(all_new_files) > 0:
1230 all_heisenfiles[direntry] = all_new_files
1232 del all_heisenfiles[direntry]
1234 if userpath in self._heisenfiles:
1235 old_files = self._heisenfiles[userpath]
1236 new_files = [f for f in old_files if f is not file_to_remove]
1237 if len(new_files) > 0:
1238 self._heisenfiles[userpath] = new_files
1240 del self._heisenfiles[userpath]
1242 if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
1244 def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None):
1245 if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
1246 (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
1249 assert (isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and
1250 (metadata is None or 'no-write' in metadata)), (userpath, childname, metadata)
1252 writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
1253 direntry = _direntry_for(parent, childname, filenode)
1255 d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file)
1257 if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
1258 d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata))
1262 close_notify = self._remove_heisenfile
1264 d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence))
1265 def _got_file(file):
1266 file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata)
1268 self._add_heisenfile_by_direntry(file)
1270 d.addCallback(_got_file)
1273 def openFile(self, pathstring, flags, attrs, delay=None):
1274 request = ".openFile(%r, %r = %r, %r, delay=%r)" % (pathstring, flags, _repr_flags(flags), attrs, delay)
1275 self.log(request, level=OPERATIONAL)
1277 # This is used for both reading and writing.
1278 # First exclude invalid combinations of flags, and empty paths.
1280 if not (flags & (FXF_READ | FXF_WRITE)):
1281 def _bad_readwrite():
1282 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
1283 return defer.execute(_bad_readwrite)
1285 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
1286 def _bad_exclcreat():
1287 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
1288 return defer.execute(_bad_exclcreat)
1290 path = self._path_from_string(pathstring)
1292 def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
1293 return defer.execute(_emptypath)
1295 # The combination of flags is potentially valid.
1297 # To work around clients that have race condition bugs, a getAttr, rename, or
1298 # remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags,
1299 # should succeed even if the 'open' request has not yet completed. So we now
1300 # synchronously add a file object into the self._heisenfiles dict, indexed
1301 # by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict,
1302 # because we don't yet have a user-independent path for the file.) The file
1303 # object does not know its filenode, parent, or childname at this point.
1305 userpath = self._path_to_utf8(path)
1307 if flags & (FXF_WRITE | FXF_CREAT):
1308 file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence)
1309 self._add_heisenfile_by_path(file)
1311 # We haven't decided which file implementation to use yet.
1314 desired_metadata = _attrs_to_metadata(attrs)
1316 # Now there are two major cases:
1318 # 1. The path is specified as /uri/FILECAP, with no parent directory.
1319 # If the FILECAP is mutable and writeable, then we can open it in write-only
1320 # or read/write mode (non-exclusively), otherwise we can only open it in
1321 # read-only mode. The open should succeed immediately as long as FILECAP is
1322 # a valid known filecap that grants the required permission.
1324 # 2. The path is specified relative to a parent. We find the parent dirnode and
1325 # get the child's URI and metadata if it exists. There are four subcases:
1326 # a. the child does not exist: FXF_CREAT must be set, and we must be able
1327 # to write to the parent directory.
1328 # b. the child exists but is not a valid known filecap: fail
1329 # c. the child is mutable: if we are trying to open it write-only or
1330 # read/write, then we must be able to write to the file.
1331 # d. the child is immutable: if we are trying to open it write-only or
1332 # read/write, then we must be able to write to the parent directory.
1334 # To reduce latency, open normally succeeds as soon as these conditions are
1335 # met, even though there might be a failure in downloading the existing file
1336 # or uploading a new one. However, there is an exception: if a file has been
1337 # written, then closed, and is now being reopened, then we have to delay the
1338 # open until the previous upload/publish has completed. This is necessary
1339 # because sshfs does not wait for the result of an FXF_CLOSE message before
1340 # reporting to the client that a file has been closed. It applies both to
1341 # mutable files, and to directory entries linked to an immutable file.
1343 # Note that the permission checks below are for more precise error reporting on
1344 # the open call; later operations would fail even if we did not make these checks.
1346 d = delay or defer.succeed(None)
1347 d.addCallback(lambda ign: self._get_root(path))
1348 def _got_root( (root, path) ):
1349 if root.is_unknown():
1350 raise SFTPError(FX_PERMISSION_DENIED,
1351 "cannot open an unknown cap (or child of an unknown object). "
1352 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1355 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
1356 if not IFileNode.providedBy(root):
1357 raise SFTPError(FX_PERMISSION_DENIED,
1358 "cannot open a directory cap")
1359 if (flags & FXF_WRITE) and root.is_readonly():
1360 raise SFTPError(FX_PERMISSION_DENIED,
1361 "cannot write to a non-writeable filecap without a parent directory")
1362 if flags & FXF_EXCL:
1363 raise SFTPError(FX_FAILURE,
1364 "cannot create a file exclusively when it already exists")
1366 # The file does not need to be added to all_heisenfiles, because it is not
1367 # associated with a directory entry that needs to be updated.
1369 metadata = update_metadata(None, desired_metadata, time())
1371 # We have to decide what to pass for the 'parent_readonly' argument to _no_write,
1372 # given that we don't actually have a parent. This only affects the permissions
1373 # reported by a getAttrs on this file handle in the case of an immutable file.
1374 # We choose 'parent_readonly=True' since that will cause the permissions to be
1375 # reported as r--r--r--, which is appropriate because an immutable file can't be
1376 # written via this path.
1378 metadata['no-write'] = _no_write(True, root)
1379 return self._make_file(file, userpath, flags, filenode=root, metadata=metadata)
1382 childname = path[-1]
1384 if noisy: self.log("case 2: root = %r, childname = %r, desired_metadata = %r, path[:-1] = %r" %
1385 (root, childname, desired_metadata, path[:-1]), level=NOISY)
1386 d2 = root.get_child_at_path(path[:-1])
1387 def _got_parent(parent):
1388 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
1389 if parent.is_unknown():
1390 raise SFTPError(FX_PERMISSION_DENIED,
1391 "cannot open a child of an unknown object. "
1392 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1394 parent_readonly = parent.is_readonly()
1395 d3 = defer.succeed(None)
1396 if flags & FXF_EXCL:
1397 # FXF_EXCL means that the link to the file (not the file itself) must
1398 # be created atomically wrt updates by this storage client.
1399 # That is, we need to create the link before returning success to the
1400 # SFTP open request (and not just on close, as would normally be the
1401 # case). We make the link initially point to a zero-length LIT file,
1402 # which is consistent with what might happen on a POSIX filesystem.
1405 raise SFTPError(FX_FAILURE,
1406 "cannot create a file exclusively when the parent directory is read-only")
1408 # 'overwrite=False' ensures failure if the link already exists.
1409 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
1411 zero_length_lit = "URI:LIT:"
1412 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
1413 (parent, zero_length_lit, childname), level=NOISY)
1414 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
1415 metadata=desired_metadata, overwrite=False))
1416 def _seturi_done(child):
1417 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
1418 d4 = parent.get_metadata_for(childname)
1419 d4.addCallback(lambda metadata: (child, metadata))
1421 d3.addCallback(_seturi_done)
1423 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
1424 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
1426 def _got_child( (filenode, current_metadata) ):
1427 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY)
1429 metadata = update_metadata(current_metadata, desired_metadata, time())
1431 # Ignore the permissions of the desired_metadata in an open call. The permissions
1432 # can only be set by setAttrs.
1433 metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
1435 if filenode.is_unknown():
1436 raise SFTPError(FX_PERMISSION_DENIED,
1437 "cannot open an unknown cap. Upgrading the gateway "
1438 "to a later Tahoe-LAFS version may help")
1439 if not IFileNode.providedBy(filenode):
1440 raise SFTPError(FX_PERMISSION_DENIED,
1441 "cannot open a directory as if it were a file")
1442 if (flags & FXF_WRITE) and metadata['no-write']:
1443 raise SFTPError(FX_PERMISSION_DENIED,
1444 "cannot open a non-writeable file for writing")
1446 return self._make_file(file, userpath, flags, parent=parent, childname=childname,
1447 filenode=filenode, metadata=metadata)
1449 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1450 f.trap(NoSuchChildError)
1452 if not (flags & FXF_CREAT):
1453 raise SFTPError(FX_NO_SUCH_FILE,
1454 "the file does not exist, and was not opened with the creation (CREAT) flag")
1456 raise SFTPError(FX_PERMISSION_DENIED,
1457 "cannot create a file when the parent directory is read-only")
1459 return self._make_file(file, userpath, flags, parent=parent, childname=childname)
1460 d3.addCallbacks(_got_child, _no_child)
1463 d2.addCallback(_got_parent)
1466 d.addCallback(_got_root)
1467 def _remove_on_error(err):
1469 self._remove_heisenfile(userpath, None, None, file)
1471 d.addErrback(_remove_on_error)
1472 d.addBoth(_convert_error, request)
1475 def renameFile(self, from_pathstring, to_pathstring, overwrite=False):
1476 request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring)
1477 self.log(request, level=OPERATIONAL)
1479 from_path = self._path_from_string(from_pathstring)
1480 to_path = self._path_from_string(to_pathstring)
1481 from_userpath = self._path_to_utf8(from_path)
1482 to_userpath = self._path_to_utf8(to_path)
1484 # the target directory must already exist
1485 d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
1486 self._get_parent_or_node(to_path)])
1487 def _got( (from_pair, to_pair) ):
1488 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
1489 (from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
1490 (from_parent, from_childname) = from_pair
1491 (to_parent, to_childname) = to_pair
1493 if from_childname is None:
1494 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1495 if to_childname is None:
1496 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1498 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1499 # "It is an error if there already exists a file with the name specified
1501 # OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error.
1503 # For the standard SSH_FXP_RENAME operation, overwrite=False.
1504 # We also support the posix-rename@openssh.com extension, which uses overwrite=True.
1506 d2 = defer.succeed(None)
1508 d2.addCallback(lambda ign: to_parent.get(to_childname))
1509 def _expect_fail(res):
1510 if not isinstance(res, Failure):
1511 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1513 # It is OK if we fail for errors other than NoSuchChildError, since that probably
1514 # indicates some problem accessing the destination directory.
1515 res.trap(NoSuchChildError)
1516 d2.addBoth(_expect_fail)
1518 # If there are heisenfiles to be written at the 'from' direntry, then ensure
1519 # they will now be written at the 'to' direntry instead.
1520 d2.addCallback(lambda ign:
1521 self._rename_heisenfiles(from_userpath, from_parent, from_childname,
1522 to_userpath, to_parent, to_childname, overwrite=overwrite))
1525 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1526 #d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite)
1528 d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite)
1530 if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" %
1531 (err, from_pathstring, to_pathstring, overwrite), level=NOISY)
1533 if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
1535 if not overwrite and err.check(ExistingChildError):
1536 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1541 d2.addCallback(_move)
1544 d.addBoth(_convert_error, request)
1547 def makeDirectory(self, pathstring, attrs):
1548 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1549 self.log(request, level=OPERATIONAL)
1551 path = self._path_from_string(pathstring)
1552 metadata = _attrs_to_metadata(attrs)
1553 if 'no-write' in metadata:
1554 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
1555 return defer.execute(_denied)
1557 d = self._get_root(path)
1558 d.addCallback(lambda (root, path):
1559 self._get_or_create_directories(root, path, metadata))
1560 d.addBoth(_convert_error, request)
1563 def _get_or_create_directories(self, node, path, metadata):
1564 if not IDirectoryNode.providedBy(node):
1565 # TODO: provide the name of the blocking file in the error message.
1566 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1567 "is a file in the way") # close enough
1568 return defer.execute(_blocked)
1571 return defer.succeed(node)
1572 d = node.get(path[0])
1573 def _maybe_create(f):
1574 f.trap(NoSuchChildError)
1575 return node.create_subdirectory(path[0])
1576 d.addErrback(_maybe_create)
1577 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1580 def removeFile(self, pathstring):
1581 request = ".removeFile(%r)" % (pathstring,)
1582 self.log(request, level=OPERATIONAL)
1584 path = self._path_from_string(pathstring)
1585 d = self._remove_object(path, must_be_file=True)
1586 d.addBoth(_convert_error, request)
1589 def removeDirectory(self, pathstring):
1590 request = ".removeDirectory(%r)" % (pathstring,)
1591 self.log(request, level=OPERATIONAL)
1593 path = self._path_from_string(pathstring)
1594 d = self._remove_object(path, must_be_directory=True)
1595 d.addBoth(_convert_error, request)
1598 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1599 userpath = self._path_to_utf8(path)
1600 d = self._get_parent_or_node(path)
1601 def _got_parent( (parent, childname) ):
1602 if childname is None:
1603 raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
1605 direntry = _direntry_for(parent, childname)
1606 d2 = defer.succeed(False)
1607 if not must_be_directory:
1608 d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry))
1610 d2.addCallback(lambda abandoned:
1611 parent.delete(childname, must_exist=not abandoned,
1612 must_be_directory=must_be_directory, must_be_file=must_be_file))
1614 d.addCallback(_got_parent)
1617 def openDirectory(self, pathstring):
1618 request = ".openDirectory(%r)" % (pathstring,)
1619 self.log(request, level=OPERATIONAL)
1621 path = self._path_from_string(pathstring)
1622 d = self._get_parent_or_node(path)
1623 def _got_parent_or_node( (parent_or_node, childname) ):
1624 if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
1625 (parent_or_node, childname, pathstring), level=NOISY)
1626 if childname is None:
1627 return parent_or_node
1629 return parent_or_node.get(childname)
1630 d.addCallback(_got_parent_or_node)
1632 if dirnode.is_unknown():
1633 raise SFTPError(FX_PERMISSION_DENIED,
1634 "cannot list an unknown cap as a directory. Upgrading the gateway "
1635 "to a later Tahoe-LAFS version may help")
1636 if not IDirectoryNode.providedBy(dirnode):
1637 raise SFTPError(FX_PERMISSION_DENIED,
1638 "cannot list a file as if it were a directory")
1641 def _render(children):
1642 parent_readonly = dirnode.is_readonly()
1644 for filename, (child, metadata) in children.iteritems():
1645 # The file size may be cached or absent.
1646 metadata['no-write'] = _no_write(parent_readonly, child, metadata)
1647 attrs = _populate_attrs(child, metadata)
1648 filename_utf8 = filename.encode('utf-8')
1649 longname = _lsLine(filename_utf8, attrs)
1650 results.append( (filename_utf8, longname, attrs) )
1651 return StoppableList(results)
1652 d2.addCallback(_render)
1654 d.addCallback(_list)
1655 d.addBoth(_convert_error, request)
1658 def getAttrs(self, pathstring, followLinks):
1659 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1660 self.log(request, level=OPERATIONAL)
1662 # When asked about a specific file, report its current size.
1663 # TODO: the modification time for a mutable file should be
1664 # reported as the update time of the best version. But that
1665 # information isn't currently stored in mutable shares, I think.
1667 path = self._path_from_string(pathstring)
1668 userpath = self._path_to_utf8(path)
1669 d = self._get_parent_or_node(path)
1670 def _got_parent_or_node( (parent_or_node, childname) ):
1671 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1673 # Some clients will incorrectly try to get the attributes
1674 # of a file immediately after opening it, before it has been put
1675 # into the all_heisenfiles table. This is a race condition bug in
1676 # the client, but we handle it anyway by calling .sync() on all
1677 # files matching either the path or the direntry.
1679 direntry = _direntry_for(parent_or_node, childname)
1680 d2 = self._sync_heisenfiles(userpath, direntry)
1682 if childname is None:
1683 node = parent_or_node
1684 d2.addCallback(lambda ign: node.get_current_size())
1685 d2.addCallback(lambda size:
1686 _populate_attrs(node, {'no-write': node.is_unknown() or node.is_readonly()}, size=size))
1688 parent = parent_or_node
1689 d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
1690 def _got( (child, metadata) ):
1691 if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
1692 assert IDirectoryNode.providedBy(parent), parent
1693 metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata)
1694 d3 = child.get_current_size()
1695 d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size))
1698 if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY)
1699 err.trap(NoSuchChildError)
1700 if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" %
1701 (self._heisenfiles, all_heisenfiles, direntry), level=NOISY)
1702 if direntry in all_heisenfiles:
1703 files = all_heisenfiles[direntry]
1704 if len(files) == 0: # pragma: no cover
1706 # use the heisenfile that was most recently opened
1707 return files[-1].getAttrs()
1709 d2.addCallbacks(_got, _nosuch)
1711 d.addCallback(_got_parent_or_node)
1712 d.addBoth(_convert_error, request)
1715 def setAttrs(self, pathstring, attrs):
1716 request = ".setAttrs(%r, %r)" % (pathstring, attrs)
1717 self.log(request, level=OPERATIONAL)
1720 # this would require us to download and re-upload the truncated/extended
1722 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
1723 return defer.execute(_unsupported)
1725 path = self._path_from_string(pathstring)
1726 userpath = self._path_to_utf8(path)
1727 d = self._get_parent_or_node(path)
1728 def _got_parent_or_node( (parent_or_node, childname) ):
1729 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1731 direntry = _direntry_for(parent_or_node, childname)
1732 d2 = self._update_attrs_for_heisenfiles(userpath, direntry, attrs)
1734 def _update(updated_heisenfiles):
1735 if childname is None:
1736 if updated_heisenfiles:
1738 raise SFTPError(FX_NO_SUCH_FILE, userpath)
1740 desired_metadata = _attrs_to_metadata(attrs)
1741 if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
1743 d3 = parent_or_node.set_metadata_for(childname, desired_metadata)
1745 if updated_heisenfiles:
1746 err.trap(NoSuchChildError)
1749 d3.addErrback(_nosuch)
1751 d2.addCallback(_update)
1752 d2.addCallback(lambda ign: None)
1754 d.addCallback(_got_parent_or_node)
1755 d.addBoth(_convert_error, request)
1758 def readLink(self, pathstring):
1759 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1761 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1762 return defer.execute(_unsupported)
1764 def makeLink(self, linkPathstring, targetPathstring):
1765 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1767 # If this is implemented, note the reversal of arguments described in point 7 of
1768 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
1770 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1771 return defer.execute(_unsupported)
1773 def extendedRequest(self, extensionName, extensionData):
1774 self.log(".extendedRequest(%r, <data of length %r>)" % (extensionName, len(extensionData)), level=OPERATIONAL)
1776 # We implement the three main OpenSSH SFTP extensions; see
1777 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1779 if extensionName == 'posix-rename@openssh.com':
1780 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
1782 if 4 > len(extensionData): return defer.execute(_bad)
1783 (fromPathLen,) = struct.unpack('>L', extensionData[0:4])
1784 if 8 + fromPathLen > len(extensionData): return defer.execute(_bad)
1786 (toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)])
1787 if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad)
1789 fromPathstring = extensionData[4:(4 + fromPathLen)]
1790 toPathstring = extensionData[(8 + fromPathLen):]
1791 d = self.renameFile(fromPathstring, toPathstring, overwrite=True)
1793 # Twisted conch assumes that the response from an extended request is either
1794 # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
1795 # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
1796 def _succeeded(ign):
1797 raise SFTPError(FX_OK, "request succeeded")
1798 d.addCallback(_succeeded)
1801 if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
1802 # f_bsize and f_frsize should be the same to avoid a bug in 'df'
1803 return defer.succeed(struct.pack('>11Q',
1804 1024, # uint64 f_bsize /* file system block size */
1805 1024, # uint64 f_frsize /* fundamental fs block size */
1806 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1807 314159265, # uint64 f_bfree /* free blocks in file system */
1808 314159265, # uint64 f_bavail /* free blocks for non-root */
1809 200000000, # uint64 f_files /* total file inodes */
1810 100000000, # uint64 f_ffree /* free file inodes */
1811 100000000, # uint64 f_favail /* free file inodes for non-root */
1812 0x1AF5, # uint64 f_fsid /* file system id */
1813 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1814 65535, # uint64 f_namemax /* maximum filename length */
1817 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
1818 (extensionName, len(extensionData)))
1819 return defer.execute(_unsupported)
1821 def realPath(self, pathstring):
1822 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1824 return self._path_to_utf8(self._path_from_string(pathstring))
1826 def _path_to_utf8(self, path):
1827 return (u"/" + u"/".join(path)).encode('utf-8')
1829 def _path_from_string(self, pathstring):
1830 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1832 assert isinstance(pathstring, str), pathstring
1834 # The home directory is the root directory.
1835 pathstring = pathstring.strip("/")
1836 if pathstring == "" or pathstring == ".":
1839 path_utf8 = pathstring.split("/")
1841 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1842 # "Servers SHOULD interpret a path name component ".." as referring to
1843 # the parent directory, and "." as referring to the current directory."
1845 for p_utf8 in path_utf8:
1847 # ignore excess .. components at the root
1852 p = p_utf8.decode('utf-8', 'strict')
1853 except UnicodeError:
1854 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1857 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1860 def _get_root(self, path):
1861 # return Deferred (root, remaining_path)
1862 d = defer.succeed(None)
1863 if path and path[0] == u"uri":
1864 d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8')))
1865 d.addCallback(lambda root: (root, path[2:]))
1867 d.addCallback(lambda ign: (self._root, path))
1870 def _get_parent_or_node(self, path):
1871 # return Deferred (parent, childname) or (node, None)
1872 d = self._get_root(path)
1873 def _got_root( (root, remaining_path) ):
1874 if not remaining_path:
1877 d2 = root.get_child_at_path(remaining_path[:-1])
1878 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1880 d.addCallback(_got_root)
1884 class FakeTransport:
1885 implements(ITransport)
1886 def write(self, data):
1887 logmsg("FakeTransport.write(<data of length %r>)" % (len(data),), level=NOISY)
1889 def writeSequence(self, data):
1890 logmsg("FakeTransport.writeSequence(...)", level=NOISY)
1892 def loseConnection(self):
1893 logmsg("FakeTransport.loseConnection()", level=NOISY)
1895 # getPeer and getHost can just raise errors, since we don't know what to return
1898 class ShellSession(PrefixingLogMixin):
1899 implements(ISession)
1900 def __init__(self, userHandler):
1901 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1902 if noisy: self.log(".__init__(%r)" % (userHandler), level=NOISY)
1904 def getPty(self, terminal, windowSize, attrs):
1905 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1907 def openShell(self, protocol):
1908 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1909 if hasattr(protocol, 'transport') and protocol.transport is None:
1910 protocol.transport = FakeTransport() # work around Twisted bug
1912 d = defer.succeed(None)
1913 d.addCallback(lambda ign: protocol.write("This server supports only SFTP, not shell sessions.\n"))
1914 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1917 def execCommand(self, protocol, cmd):
1918 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1919 if hasattr(protocol, 'transport') and protocol.transport is None:
1920 protocol.transport = FakeTransport() # work around Twisted bug
1922 d = defer.succeed(None)
1923 if cmd == "df -P -k /":
1924 d.addCallback(lambda ign: protocol.write(
1925 "Filesystem 1024-blocks Used Available Capacity Mounted on\n"
1926 "tahoe 628318530 314159265 314159265 50% /\n"))
1927 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessDone(None))))
1929 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1932 def windowChanged(self, newWindowSize):
1933 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1935 def eofReceived(self):
1936 self.log(".eofReceived()", level=OPERATIONAL)
1939 self.log(".closed()", level=OPERATIONAL)
1942 # If you have an SFTPUserHandler and want something that provides ISession, you get
1943 # ShellSession(userHandler).
1944 # We use adaptation because this must be a different object to the SFTPUserHandler.
1945 components.registerAdapter(ShellSession, SFTPUserHandler, ISession)
1948 from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1951 implements(portal.IRealm)
1952 def __init__(self, client):
1953 self._client = client
1955 def requestAvatar(self, avatarID, mind, interface):
1956 assert interface == IConchUser, interface
1957 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1958 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1959 return (interface, handler, handler.logout)
1962 class SFTPServer(service.MultiService):
1963 def __init__(self, client, accountfile, accounturl,
1964 sftp_portstr, pubkey_file, privkey_file):
1965 service.MultiService.__init__(self)
1967 r = Dispatcher(client)
1968 p = portal.Portal(r)
1971 c = AccountFileChecker(self, accountfile)
1972 p.registerChecker(c)
1974 c = AccountURLChecker(self, accounturl)
1975 p.registerChecker(c)
1976 if not accountfile and not accounturl:
1977 # we could leave this anonymous, with just the /uri/CAP form
1978 raise NeedRootcapLookupScheme("must provide an account file or URL")
1980 pubkey = keys.Key.fromFile(pubkey_file)
1981 privkey = keys.Key.fromFile(privkey_file)
1982 class SSHFactory(factory.SSHFactory):
1983 publicKeys = {pubkey.sshType(): pubkey}
1984 privateKeys = {privkey.sshType(): privkey}
1985 def getPrimes(self):
1987 # if present, this enables diffie-hellman-group-exchange
1988 return primes.parseModuliFile("/etc/ssh/moduli")
1995 s = strports.service(sftp_portstr, f)
1996 s.setServiceParent(self)