2 import heapq, traceback, array, stat, struct
3 from types import NoneType
4 from stat import S_IFREG, S_IFDIR
5 from time import time, strftime, localtime
7 from zope.interface import implements
8 from twisted.python import components
9 from twisted.application import service, strports
10 from twisted.conch.ssh import factory, keys, session
11 from twisted.conch.ssh.filetransfer import FileTransferServer, SFTPError, \
12 FX_NO_SUCH_FILE, FX_OP_UNSUPPORTED, FX_PERMISSION_DENIED, FX_EOF, \
13 FX_BAD_MESSAGE, FX_FAILURE, FX_OK
14 from twisted.conch.ssh.filetransfer import FXF_READ, FXF_WRITE, FXF_APPEND, \
15 FXF_CREAT, FXF_TRUNC, FXF_EXCL
16 from twisted.conch.interfaces import ISFTPServer, ISFTPFile, IConchUser, ISession
17 from twisted.conch.avatar import ConchUser
18 from twisted.conch.openssh_compat import primes
19 from twisted.cred import portal
20 from twisted.internet.error import ProcessDone, ProcessTerminated
21 from twisted.python.failure import Failure
22 from twisted.internet.interfaces import ITransport
24 from twisted.internet import defer
25 from twisted.internet.interfaces import IFinishableConsumer
26 from foolscap.api import eventually
27 from allmydata.util import deferredutil
29 from allmydata.util.consumer import download_to_data
30 from allmydata.interfaces import IFileNode, IDirectoryNode, ExistingChildError, \
31 NoSuchChildError, ChildOfWrongTypeError
32 from allmydata.mutable.common import NotWriteableError
33 from allmydata.immutable.upload import FileHandle
34 from allmydata.dirnode import update_metadata
35 from allmydata.util.fileutil import EncryptedTemporaryFile
38 use_foolscap_logging = True
40 from allmydata.util.log import NOISY, OPERATIONAL, WEIRD, \
41 msg as _msg, err as _err, PrefixingLogMixin as _PrefixingLogMixin
43 if use_foolscap_logging:
44 (logmsg, logerr, PrefixingLogMixin) = (_msg, _err, _PrefixingLogMixin)
45 else: # pragma: no cover
46 def logmsg(s, level=None):
48 def logerr(s, level=None):
50 class PrefixingLogMixin:
51 def __init__(self, facility=None, prefix=''):
53 def log(self, s, level=None):
54 print "%r %s" % (self.prefix, s)
57 def eventually_callback(d):
58 return lambda res: eventually(d.callback, res)
60 def eventually_errback(d):
61 return lambda err: eventually(d.errback, err)
65 if isinstance(x, unicode):
66 return x.encode('utf-8')
67 if isinstance(x, str):
73 """SFTP times are unsigned 32-bit integers representing UTC seconds
74 (ignoring leap seconds) since the Unix epoch, January 1 1970 00:00 UTC.
75 A Tahoe time is the corresponding float."""
76 return long(t) & 0xFFFFFFFFL
79 def _convert_error(res, request):
80 """If res is not a Failure, return it, otherwise reraise the appropriate
83 if not isinstance(res, Failure):
85 if isinstance(res, str): logged_res = "<data of length %r>" % (len(res),)
86 logmsg("SUCCESS %r %r" % (request, logged_res,), level=OPERATIONAL)
90 logmsg("RAISE %r %r" % (request, err.value), level=OPERATIONAL)
92 if noisy: logmsg(traceback.format_exc(err.value), level=NOISY)
93 except Exception: # pragma: no cover
96 # The message argument to SFTPError must not reveal information that
97 # might compromise anonymity, if we are running over an anonymous network.
99 if err.check(SFTPError):
100 # original raiser of SFTPError has responsibility to ensure anonymity
102 if err.check(NoSuchChildError):
103 childname = _utf8(err.value.args[0])
104 raise SFTPError(FX_NO_SUCH_FILE, childname)
105 if err.check(NotWriteableError) or err.check(ChildOfWrongTypeError):
106 msg = _utf8(err.value.args[0])
107 raise SFTPError(FX_PERMISSION_DENIED, msg)
108 if err.check(ExistingChildError):
109 # Versions of SFTP after v3 (which is what twisted.conch implements)
110 # define a specific error code for this case: FX_FILE_ALREADY_EXISTS.
111 # However v3 doesn't; instead, other servers such as sshd return
112 # FX_FAILURE. The gvfs SFTP backend, for example, depends on this
113 # to translate the error to the equivalent of POSIX EEXIST, which is
114 # necessary for some picky programs (such as gedit).
115 msg = _utf8(err.value.args[0])
116 raise SFTPError(FX_FAILURE, msg)
117 if err.check(NotImplementedError):
118 raise SFTPError(FX_OP_UNSUPPORTED, _utf8(err.value))
119 if err.check(EOFError):
120 raise SFTPError(FX_EOF, "end of file reached")
121 if err.check(defer.FirstError):
122 _convert_error(err.value.subFailure, request)
124 # We assume that the error message is not anonymity-sensitive.
125 raise SFTPError(FX_FAILURE, _utf8(err.value))
128 def _repr_flags(flags):
129 return "|".join([f for f in
130 [(flags & FXF_READ) and "FXF_READ" or None,
131 (flags & FXF_WRITE) and "FXF_WRITE" or None,
132 (flags & FXF_APPEND) and "FXF_APPEND" or None,
133 (flags & FXF_CREAT) and "FXF_CREAT" or None,
134 (flags & FXF_TRUNC) and "FXF_TRUNC" or None,
135 (flags & FXF_EXCL) and "FXF_EXCL" or None,
140 def _lsLine(name, attrs):
143 st_mtime = attrs.get("mtime", 0)
144 st_mode = attrs["permissions"]
145 st_size = attrs.get("size", "?")
146 # We don't know how many links there really are to this object.
149 # Based on <http://twistedmatrix.com/trac/browser/trunk/twisted/conch/ls.py?rev=25412>.
150 # We can't call the version in Twisted because we might have a version earlier than
151 # <http://twistedmatrix.com/trac/changeset/25412> (released in Twisted 8.2).
154 perms = array.array('c', '-'*10)
155 ft = stat.S_IFMT(mode)
156 if stat.S_ISDIR(ft): perms[0] = 'd'
157 elif stat.S_ISREG(ft): perms[0] = '-'
160 if mode&stat.S_IRUSR: perms[1] = 'r'
161 if mode&stat.S_IWUSR: perms[2] = 'w'
162 if mode&stat.S_IXUSR: perms[3] = 'x'
164 if mode&stat.S_IRGRP: perms[4] = 'r'
165 if mode&stat.S_IWGRP: perms[5] = 'w'
166 if mode&stat.S_IXGRP: perms[6] = 'x'
168 if mode&stat.S_IROTH: perms[7] = 'r'
169 if mode&stat.S_IWOTH: perms[8] = 'w'
170 if mode&stat.S_IXOTH: perms[9] = 'x'
171 # suid/sgid never set
174 l += str(st_nlink).rjust(5) + ' '
185 if st_mtime + sixmo < now or st_mtime > now + day:
186 # mtime is more than 6 months ago, or more than one day in the future
187 l += strftime("%b %d %Y ", localtime(st_mtime))
189 l += strftime("%b %d %H:%M ", localtime(st_mtime))
194 def _no_write(parent_readonly, child, metadata=None):
195 """Whether child should be listed as having read-only permissions in parent."""
197 if child.is_unknown():
199 elif child.is_mutable():
200 return child.is_readonly()
201 elif parent_readonly or IDirectoryNode.providedBy(child):
204 return metadata is not None and metadata.get('no-write', False)
207 def _populate_attrs(childnode, metadata, size=None):
210 # The permissions must have the S_IFDIR (040000) or S_IFREG (0100000)
211 # bits, otherwise the client may refuse to open a directory.
212 # Also, sshfs run as a non-root user requires files and directories
213 # to be world-readable/writeable.
214 # It is important that we never set the executable bits on files.
216 # Directories and unknown nodes have no size, and SFTP doesn't
217 # require us to make one up.
219 # childnode might be None, meaning that the file doesn't exist yet,
220 # but we're going to write it later.
222 if childnode and childnode.is_unknown():
224 elif childnode and IDirectoryNode.providedBy(childnode):
225 perms = S_IFDIR | 0777
227 # For files, omit the size if we don't immediately know it.
228 if childnode and size is None:
229 size = childnode.get_size()
231 assert isinstance(size, (int, long)) and not isinstance(size, bool), repr(size)
233 perms = S_IFREG | 0666
236 if metadata.get('no-write', False):
237 perms &= S_IFDIR | S_IFREG | 0555 # clear 'w' bits
239 # See webapi.txt for what these times mean.
240 # We would prefer to omit atime, but SFTP version 3 can only
241 # accept mtime if atime is also set.
242 if 'linkmotime' in metadata.get('tahoe', {}):
243 attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['tahoe']['linkmotime'])
244 elif 'mtime' in metadata:
245 attrs['ctime'] = attrs['mtime'] = attrs['atime'] = _to_sftp_time(metadata['mtime'])
247 if 'linkcrtime' in metadata.get('tahoe', {}):
248 attrs['createtime'] = _to_sftp_time(metadata['tahoe']['linkcrtime'])
250 attrs['permissions'] = perms
252 # twisted.conch.ssh.filetransfer only implements SFTP version 3,
253 # which doesn't include SSH_FILEXFER_ATTR_FLAGS.
258 def _attrs_to_metadata(attrs):
262 if key == "mtime" or key == "ctime" or key == "createtime":
263 metadata[key] = long(attrs[key])
264 elif key.startswith("ext_"):
265 metadata[key] = str(attrs[key])
267 perms = attrs.get('permissions', stat.S_IWUSR)
268 if not (perms & stat.S_IWUSR):
269 metadata['no-write'] = True
274 def _direntry_for(filenode_or_parent, childname, filenode=None):
275 assert isinstance(childname, (unicode, NoneType)), childname
277 if childname is None:
278 filenode_or_parent = filenode
280 if filenode_or_parent:
281 rw_uri = filenode_or_parent.get_write_uri()
282 if rw_uri and childname:
283 return rw_uri + "/" + childname.encode('utf-8')
290 class OverwriteableFileConsumer(PrefixingLogMixin):
291 implements(IFinishableConsumer)
292 """I act both as a consumer for the download of the original file contents, and as a
293 wrapper for a temporary file that records the downloaded data and any overwrites.
294 I use a priority queue to keep track of which regions of the file have been overwritten
295 but not yet downloaded, so that the download does not clobber overwritten data.
296 I use another priority queue to record milestones at which to make callbacks
297 indicating that a given number of bytes have been downloaded.
299 The temporary file reflects the contents of the file that I represent, except that:
300 - regions that have neither been downloaded nor overwritten, if present,
302 - the temporary file may be shorter than the represented file (it is never longer).
303 The latter's current size is stored in self.current_size.
305 This abstraction is mostly independent of SFTP. Consider moving it, if it is found
306 useful for other frontends."""
308 def __init__(self, download_size, tempfile_maker):
309 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
310 if noisy: self.log(".__init__(%r, %r)" % (download_size, tempfile_maker), level=NOISY)
311 self.download_size = download_size
312 self.current_size = download_size
313 self.f = tempfile_maker()
315 self.milestones = [] # empty heap of (offset, d)
316 self.overwrites = [] # empty heap of (start, end)
317 self.is_closed = False
318 self.done = self.when_reached(download_size) # adds a milestone
320 def _signal_done(ign):
321 if noisy: self.log("DONE", level=NOISY)
323 self.done.addCallback(_signal_done)
329 def get_current_size(self):
330 return self.current_size
332 def set_current_size(self, size):
333 if noisy: self.log(".set_current_size(%r), current_size = %r, downloaded = %r" %
334 (size, self.current_size, self.downloaded), level=NOISY)
335 if size < self.current_size or size < self.downloaded:
336 self.f.truncate(size)
337 if size > self.current_size:
338 self.overwrite(self.current_size, "\x00" * (size - self.current_size))
339 self.current_size = size
341 # make the invariant self.download_size <= self.current_size be true again
342 if size < self.download_size:
343 self.download_size = size
345 if self.downloaded >= self.download_size:
348 def registerProducer(self, p, streaming):
349 if noisy: self.log(".registerProducer(%r, streaming=%r)" % (p, streaming), level=NOISY)
350 if self.producer is not None:
351 raise RuntimeError("producer is already registered")
355 # call resumeProducing once to start things off
364 def write(self, data):
365 if noisy: self.log(".write(<data of length %r>)" % (len(data),), level=NOISY)
369 if self.downloaded >= self.download_size:
372 next_downloaded = self.downloaded + len(data)
373 if next_downloaded > self.download_size:
374 data = data[:(self.download_size - self.downloaded)]
376 while len(self.overwrites) > 0:
377 (start, end) = self.overwrites[0]
378 if start >= next_downloaded:
379 # This and all remaining overwrites are after the data we just downloaded.
381 if start > self.downloaded:
382 # The data we just downloaded has been partially overwritten.
383 # Write the prefix of it that precedes the overwritten region.
384 self.f.seek(self.downloaded)
385 self.f.write(data[:(start - self.downloaded)])
387 # This merges consecutive overwrites if possible, which allows us to detect the
388 # case where the download can be stopped early because the remaining region
389 # to download has already been fully overwritten.
390 heapq.heappop(self.overwrites)
391 while len(self.overwrites) > 0:
392 (start1, end1) = self.overwrites[0]
396 heapq.heappop(self.overwrites)
398 if end >= next_downloaded:
399 # This overwrite extends past the downloaded data, so there is no
400 # more data to consider on this call.
401 heapq.heappush(self.overwrites, (next_downloaded, end))
402 self._update_downloaded(next_downloaded)
404 elif end >= self.downloaded:
405 data = data[(end - self.downloaded):]
406 self._update_downloaded(end)
408 self.f.seek(self.downloaded)
410 self._update_downloaded(next_downloaded)
412 def _update_downloaded(self, new_downloaded):
413 self.downloaded = new_downloaded
414 milestone = new_downloaded
415 if len(self.overwrites) > 0:
416 (start, end) = self.overwrites[0]
417 if start <= new_downloaded and end > milestone:
420 while len(self.milestones) > 0:
421 (next, d) = self.milestones[0]
424 if noisy: self.log("MILESTONE %r %r" % (next, d), level=NOISY)
425 heapq.heappop(self.milestones)
426 eventually(d.callback, None)
428 if milestone >= self.download_size:
431 def overwrite(self, offset, data):
432 if noisy: self.log(".overwrite(%r, <data of length %r>)" % (offset, len(data)), level=NOISY)
433 if offset > self.current_size:
434 # Normally writing at an offset beyond the current end-of-file
435 # would leave a hole that appears filled with zeroes. However, an
436 # EncryptedTemporaryFile doesn't behave like that (if there is a
437 # hole in the file on disk, the zeroes that are read back will be
438 # XORed with the keystream). So we must explicitly write zeroes in
439 # the gap between the current EOF and the offset.
441 self.f.seek(self.current_size)
442 self.f.write("\x00" * (offset - self.current_size))
443 start = self.current_size
449 end = offset + len(data)
450 self.current_size = max(self.current_size, end)
451 if end > self.downloaded:
452 heapq.heappush(self.overwrites, (start, end))
454 def read(self, offset, length):
455 """When the data has been read, callback the Deferred that we return with this data.
456 Otherwise errback the Deferred that we return.
457 The caller must perform no more overwrites until the Deferred has fired."""
459 if noisy: self.log(".read(%r, %r), current_size = %r" % (offset, length, self.current_size), level=NOISY)
460 if offset >= self.current_size:
461 def _eof(): raise EOFError("read past end of file")
462 return defer.execute(_eof)
464 if offset + length > self.current_size:
465 length = self.current_size - offset
466 if noisy: self.log("truncating read to %r bytes" % (length,), level=NOISY)
468 needed = min(offset + length, self.download_size)
469 d = self.when_reached(needed)
471 # It is not necessarily the case that self.downloaded >= needed, because
472 # the file might have been truncated (thus truncating the download) and
475 assert self.current_size >= offset + length, (self.current_size, offset, length)
476 if noisy: self.log("self.f = %r" % (self.f,), level=NOISY)
478 return self.f.read(length)
479 d.addCallback(_reached)
482 def when_reached(self, index):
483 if noisy: self.log(".when_reached(%r)" % (index,), level=NOISY)
484 if index <= self.downloaded: # already reached
485 if noisy: self.log("already reached %r" % (index,), level=NOISY)
486 return defer.succeed(None)
489 if noisy: self.log("reached %r" % (index,), level=NOISY)
491 d.addCallback(_reached)
492 heapq.heappush(self.milestones, (index, d))
499 """Called by the producer when it has finished producing, or when we have
500 received enough bytes, or as a result of a close. Defined by IFinishableConsumer."""
502 while len(self.milestones) > 0:
503 (next, d) = self.milestones[0]
504 if noisy: self.log("MILESTONE FINISH %r %r" % (next, d), level=NOISY)
505 heapq.heappop(self.milestones)
506 # The callback means that the milestone has been reached if
507 # it is ever going to be. Note that the file may have been
508 # truncated to before the milestone.
509 eventually(d.callback, None)
512 if not self.is_closed:
513 self.is_closed = True
517 self.log("suppressed %r from close of temporary file %r" % (e, self.f), level=WEIRD)
520 def unregisterProducer(self):
524 SIZE_THRESHOLD = 1000
527 class ShortReadOnlySFTPFile(PrefixingLogMixin):
528 implements(ISFTPFile)
529 """I represent a file handle to a particular file on an SFTP connection.
530 I am used only for short immutable files opened in read-only mode.
531 When I am created, the file contents start to be downloaded to memory.
532 self.async is used to delay read requests until the download has finished."""
534 def __init__(self, userpath, filenode, metadata):
535 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
536 if noisy: self.log(".__init__(%r, %r, %r)" % (userpath, filenode, metadata), level=NOISY)
538 assert isinstance(userpath, str) and IFileNode.providedBy(filenode), (userpath, filenode)
539 self.filenode = filenode
540 self.metadata = metadata
541 self.async = download_to_data(filenode)
544 def readChunk(self, offset, length):
545 request = ".readChunk(%r, %r)" % (offset, length)
546 self.log(request, level=OPERATIONAL)
549 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
550 return defer.execute(_closed)
554 if noisy: self.log("_read(<data of length %r>) in readChunk(%r, %r)" % (len(data), offset, length), level=NOISY)
556 # "In response to this request, the server will read as many bytes as it
557 # can from the file (up to 'len'), and return them in a SSH_FXP_DATA
558 # message. If an error occurs or EOF is encountered before reading any
559 # data, the server will respond with SSH_FXP_STATUS. For normal disk
560 # files, it is guaranteed that this will read the specified number of
561 # bytes, or up to end of file."
563 # i.e. we respond with an EOF error iff offset is already at EOF.
565 if offset >= len(data):
566 eventually(d.errback, SFTPError(FX_EOF, "read at or past end of file"))
568 eventually(d.callback, data[offset:offset+length]) # truncated if offset+length > len(data)
570 self.async.addCallbacks(_read, eventually_errback(d))
571 d.addBoth(_convert_error, request)
574 def writeChunk(self, offset, data):
575 self.log(".writeChunk(%r, <data of length %r>) denied" % (offset, len(data)), level=OPERATIONAL)
577 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
578 return defer.execute(_denied)
581 self.log(".close()", level=OPERATIONAL)
584 return defer.succeed(None)
587 request = ".getAttrs()"
588 self.log(request, level=OPERATIONAL)
591 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
592 return defer.execute(_closed)
594 d = defer.execute(_populate_attrs, self.filenode, self.metadata)
595 d.addBoth(_convert_error, request)
598 def setAttrs(self, attrs):
599 self.log(".setAttrs(%r) denied" % (attrs,), level=OPERATIONAL)
600 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
601 return defer.execute(_denied)
604 class GeneralSFTPFile(PrefixingLogMixin):
605 implements(ISFTPFile)
606 """I represent a file handle to a particular file on an SFTP connection.
607 I wrap an instance of OverwriteableFileConsumer, which is responsible for
608 storing the file contents. In order to allow write requests to be satisfied
609 immediately, there is effectively a FIFO queue between requests made to this
610 file handle, and requests to my OverwriteableFileConsumer. This queue is
611 implemented by the callback chain of self.async.
613 When first constructed, I am in an 'unopened' state that causes most
614 operations to be delayed until 'open' is called."""
616 def __init__(self, userpath, flags, close_notify, convergence):
617 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=userpath)
618 if noisy: self.log(".__init__(%r, %r = %r, %r, <convergence censored>)" %
619 (userpath, flags, _repr_flags(flags), close_notify), level=NOISY)
621 assert isinstance(userpath, str), userpath
622 self.userpath = userpath
624 self.close_notify = close_notify
625 self.convergence = convergence
626 self.async = defer.Deferred()
627 # Creating or truncating the file is a change, but if FXF_EXCL is set, a zero-length file has already been created.
628 self.has_changed = (flags & (FXF_CREAT | FXF_TRUNC)) and not (flags & FXF_EXCL)
630 self.abandoned = False
632 self.childname = None
636 # self.consumer should only be relied on in callbacks for self.async, since it might
637 # not be set before then.
640 def open(self, parent=None, childname=None, filenode=None, metadata=None):
641 self.log(".open(parent=%r, childname=%r, filenode=%r, metadata=%r)" %
642 (parent, childname, filenode, metadata), level=OPERATIONAL)
644 assert isinstance(childname, (unicode, NoneType)), childname
645 # If the file has been renamed, the new (parent, childname) takes precedence.
646 if self.parent is None:
648 if self.childname is None:
649 self.childname = childname
650 self.filenode = filenode
651 self.metadata = metadata
653 assert not self.closed, self
654 tempfile_maker = EncryptedTemporaryFile
656 if (self.flags & FXF_TRUNC) or not filenode:
657 # We're either truncating or creating the file, so we don't need the old contents.
658 self.consumer = OverwriteableFileConsumer(0, tempfile_maker)
659 self.consumer.finish()
661 assert IFileNode.providedBy(filenode), filenode
663 if filenode.is_mutable():
664 self.async.addCallback(lambda ign: filenode.download_best_version())
665 def _downloaded(data):
666 self.consumer = OverwriteableFileConsumer(len(data), tempfile_maker)
667 self.consumer.write(data)
668 self.consumer.finish()
670 self.async.addCallback(_downloaded)
672 download_size = filenode.get_size()
673 assert download_size is not None, "download_size is None"
674 self.consumer = OverwriteableFileConsumer(download_size, tempfile_maker)
676 if noisy: self.log("_read immutable", level=NOISY)
677 filenode.read(self.consumer, 0, None)
678 self.async.addCallback(_read)
680 eventually(self.async.callback, None)
682 if noisy: self.log("open done", level=NOISY)
685 def get_userpath(self):
688 def get_direntry(self):
689 return _direntry_for(self.parent, self.childname)
691 def rename(self, new_userpath, new_parent, new_childname):
692 self.log(".rename(%r, %r, %r)" % (new_userpath, new_parent, new_childname), level=OPERATIONAL)
694 assert isinstance(new_userpath, str) and isinstance(new_childname, unicode), (new_userpath, new_childname)
695 self.userpath = new_userpath
696 self.parent = new_parent
697 self.childname = new_childname
700 self.log(".abandon()", level=OPERATIONAL)
702 self.abandoned = True
704 def sync(self, ign=None):
705 # The ign argument allows some_file.sync to be used as a callback.
706 self.log(".sync()", level=OPERATIONAL)
709 self.async.addBoth(eventually_callback(d))
711 if noisy: self.log("_done(%r) in .sync()" % (res,), level=NOISY)
716 def readChunk(self, offset, length):
717 request = ".readChunk(%r, %r)" % (offset, length)
718 self.log(request, level=OPERATIONAL)
720 if not (self.flags & FXF_READ):
721 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for reading")
722 return defer.execute(_denied)
725 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot read from a closed file handle")
726 return defer.execute(_closed)
730 if noisy: self.log("_read in readChunk(%r, %r)" % (offset, length), level=NOISY)
731 d2 = self.consumer.read(offset, length)
732 d2.addCallbacks(eventually_callback(d), eventually_errback(d))
733 # It is correct to drop d2 here.
735 self.async.addCallbacks(_read, eventually_errback(d))
736 d.addBoth(_convert_error, request)
739 def writeChunk(self, offset, data):
740 self.log(".writeChunk(%r, <data of length %r>)" % (offset, len(data)), level=OPERATIONAL)
742 if not (self.flags & FXF_WRITE):
743 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
744 return defer.execute(_denied)
747 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot write to a closed file handle")
748 return defer.execute(_closed)
750 self.has_changed = True
752 # Note that we return without waiting for the write to occur. Reads and
753 # close wait for prior writes, and will fail if any prior operation failed.
754 # This is ok because SFTP makes no guarantee that the write completes
755 # before the request does. In fact it explicitly allows write errors to be
756 # delayed until close:
757 # "One should note that on some server platforms even a close can fail.
758 # This can happen e.g. if the server operating system caches writes,
759 # and an error occurs while flushing cached writes during the close."
762 if noisy: self.log("_write in .writeChunk(%r, <data of length %r>), current_size = %r" %
763 (offset, len(data), self.consumer.get_current_size()), level=NOISY)
764 # FXF_APPEND means that we should always write at the current end of file.
765 write_offset = offset
766 if self.flags & FXF_APPEND:
767 write_offset = self.consumer.get_current_size()
769 self.consumer.overwrite(write_offset, data)
770 if noisy: self.log("overwrite done", level=NOISY)
772 self.async.addCallback(_write)
773 # don't addErrback to self.async, just allow subsequent async ops to fail.
774 return defer.succeed(None)
778 self.log(request, level=OPERATIONAL)
781 return defer.succeed(None)
783 # This means that close has been called, not that the close has succeeded.
786 if not (self.flags & (FXF_WRITE | FXF_CREAT)):
787 def _readonly_close():
789 self.consumer.close()
790 return defer.execute(_readonly_close)
792 # We must capture the abandoned, parent, and childname variables synchronously
793 # at the close call. This is needed by the correctness arguments in the comments
794 # for _abandon_any_heisenfiles and _rename_heisenfiles.
795 # Note that the file must have been opened before it can be closed.
796 abandoned = self.abandoned
798 childname = self.childname
800 # has_changed is set when writeChunk is called, not when the write occurs, so
801 # it is correct to optimize out the commit if it is False at the close call.
802 has_changed = self.has_changed
805 if noisy: self.log("_committed(%r)" % (res,), level=NOISY)
807 self.consumer.close()
809 # We must close_notify before re-firing self.async.
810 if self.close_notify:
811 self.close_notify(self.userpath, self.parent, self.childname, self)
815 d2 = self.consumer.when_done()
816 if self.filenode and self.filenode.is_mutable():
817 self.log("update mutable file %r childname=%r metadata=%r" % (self.filenode, childname, self.metadata), level=OPERATIONAL)
818 if self.metadata.get('no-write', False) and not self.filenode.is_readonly():
819 assert parent and childname, (parent, childname, self.metadata)
820 d2.addCallback(lambda ign: parent.set_metadata_for(childname, self.metadata))
822 d2.addCallback(lambda ign: self.consumer.get_current_size())
823 d2.addCallback(lambda size: self.consumer.read(0, size))
824 d2.addCallback(lambda new_contents: self.filenode.overwrite(new_contents))
827 self.log("_add_file childname=%r" % (childname,), level=OPERATIONAL)
828 u = FileHandle(self.consumer.get_file(), self.convergence)
829 return parent.add_file(childname, u, metadata=self.metadata)
830 d2.addCallback(_add_file)
832 d2.addBoth(_committed)
837 # If the file has been abandoned, we don't want the close operation to get "stuck",
838 # even if self.async fails to re-fire. Doing the close independently of self.async
839 # in that case ensures that dropping an ssh connection is sufficient to abandon
840 # any heisenfiles that were not explicitly closed in that connection.
841 if abandoned or not has_changed:
842 d.addCallback(_committed)
844 self.async.addCallback(_close)
846 self.async.addCallbacks(eventually_callback(d), eventually_errback(d))
847 d.addBoth(_convert_error, request)
851 request = ".getAttrs()"
852 self.log(request, level=OPERATIONAL)
855 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot get attributes for a closed file handle")
856 return defer.execute(_closed)
858 # Optimization for read-only handles, when we already know the metadata.
859 if not (self.flags & (FXF_WRITE | FXF_CREAT)) and self.metadata and self.filenode and not self.filenode.is_mutable():
860 return defer.succeed(_populate_attrs(self.filenode, self.metadata))
864 if noisy: self.log("_get(%r) in %r, filenode = %r, metadata = %r" % (ign, request, self.filenode, self.metadata), level=NOISY)
866 # self.filenode might be None, but that's ok.
867 attrs = _populate_attrs(self.filenode, self.metadata, size=self.consumer.get_current_size())
868 eventually(d.callback, attrs)
870 self.async.addCallbacks(_get, eventually_errback(d))
871 d.addBoth(_convert_error, request)
874 def setAttrs(self, attrs, only_if_at=None):
875 request = ".setAttrs(%r, only_if_at=%r)" % (attrs, only_if_at)
876 self.log(request, level=OPERATIONAL)
878 if not (self.flags & FXF_WRITE):
879 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "file handle was not opened for writing")
880 return defer.execute(_denied)
883 def _closed(): raise SFTPError(FX_BAD_MESSAGE, "cannot set attributes for a closed file handle")
884 return defer.execute(_closed)
886 size = attrs.get("size", None)
887 if size is not None and (not isinstance(size, (int, long)) or size < 0):
888 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "new size is not a valid nonnegative integer")
889 return defer.execute(_bad)
893 if noisy: self.log("_set(%r) in %r" % (ign, request), level=NOISY)
894 current_direntry = _direntry_for(self.parent, self.childname, self.filenode)
895 if only_if_at and only_if_at != current_direntry:
896 if noisy: self.log("not setting attributes: current_direntry=%r in %r" %
897 (current_direntry, request), level=NOISY)
901 self.metadata = update_metadata(self.metadata, _attrs_to_metadata(attrs), now)
903 # TODO: should we refuse to truncate a file opened with FXF_APPEND?
904 # <http://allmydata.org/trac/tahoe-lafs/ticket/1037#comment:20>
905 self.consumer.set_current_size(size)
906 eventually(d.callback, None)
908 self.async.addCallbacks(_set, eventually_errback(d))
909 d.addBoth(_convert_error, request)
914 def __init__(self, items):
924 def __init__(self, value):
928 # A "heisenfile" is a file that has been opened with write flags
929 # (FXF_WRITE and/or FXF_CREAT) and not yet close-notified.
930 # 'all_heisenfiles' maps from a direntry string to a list of
933 # A direntry string is parent_write_uri + "/" + childname_utf8 for
934 # an immutable file, or file_write_uri for a mutable file.
935 # Updates to this dict are single-threaded.
940 global all_heisenfiles
943 class SFTPUserHandler(ConchUser, PrefixingLogMixin):
944 implements(ISFTPServer)
945 def __init__(self, client, rootnode, username):
946 ConchUser.__init__(self)
947 PrefixingLogMixin.__init__(self, facility="tahoe.sftp", prefix=username)
948 if noisy: self.log(".__init__(%r, %r, %r)" % (client, rootnode, username), level=NOISY)
950 self.channelLookup["session"] = session.SSHSession
951 self.subsystemLookup["sftp"] = FileTransferServer
953 self._client = client
954 self._root = rootnode
955 self._username = username
956 self._convergence = client.convergence
958 # maps from UTF-8 paths for this user, to files written and still open
959 self._heisenfiles = {}
961 def gotVersion(self, otherVersion, extData):
962 self.log(".gotVersion(%r, %r)" % (otherVersion, extData), level=OPERATIONAL)
964 # advertise the same extensions as the OpenSSH SFTP server
965 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
966 return {'posix-rename@openssh.com': '1',
967 'statvfs@openssh.com': '2',
968 'fstatvfs@openssh.com': '2',
972 self.log(".logout()", level=OPERATIONAL)
974 for files in self._heisenfiles.itervalues():
978 def _add_heisenfile_by_path(self, file):
979 self.log("._add_heisenfile_by_path(%r)" % (file,), level=OPERATIONAL)
981 userpath = file.get_userpath()
982 if userpath in self._heisenfiles:
983 self._heisenfiles[userpath] += [file]
985 self._heisenfiles[userpath] = [file]
987 def _add_heisenfile_by_direntry(self, file):
988 self.log("._add_heisenfile_by_direntry(%r)" % (file,), level=OPERATIONAL)
990 direntry = file.get_direntry()
992 if direntry in all_heisenfiles:
993 all_heisenfiles[direntry] += [file]
995 all_heisenfiles[direntry] = [file]
997 def _abandon_any_heisenfiles(self, userpath, direntry):
998 request = "._abandon_any_heisenfiles(%r, %r)" % (userpath, direntry)
999 self.log(request, level=OPERATIONAL)
1001 assert isinstance(userpath, str), userpath
1003 # First we synchronously mark all heisenfiles matching the userpath or direntry
1004 # as abandoned, and remove them from the two heisenfile dicts. Then we .sync()
1005 # each file that we abandoned.
1007 # For each file, the call to .abandon() occurs:
1008 # * before the file is closed, in which case it will never be committed
1009 # (uploaded+linked or published); or
1010 # * after it is closed but before it has been close_notified, in which case the
1011 # .sync() ensures that it has been committed (successfully or not) before we
1014 # This avoids a race that might otherwise cause the file to be committed after
1015 # the remove operation has completed.
1017 # We return a Deferred that fires with True if any files were abandoned (this
1018 # does not mean that they were not committed; it is used to determine whether
1019 # a NoSuchChildError from the attempt to delete the file should be suppressed).
1022 if direntry in all_heisenfiles:
1023 files = all_heisenfiles[direntry]
1024 del all_heisenfiles[direntry]
1025 if userpath in self._heisenfiles:
1026 files += self._heisenfiles[userpath]
1027 del self._heisenfiles[userpath]
1029 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1034 d = defer.succeed(None)
1039 self.log("done %r" % (request,), level=OPERATIONAL)
1040 return len(files) > 0
1044 def _rename_heisenfiles(self, from_userpath, from_parent, from_childname,
1045 to_userpath, to_parent, to_childname, overwrite=True):
1046 request = ("._rename_heisenfiles(%r, %r, %r, %r, %r, %r, overwrite=%r)" %
1047 (from_userpath, from_parent, from_childname, to_userpath, to_parent, to_childname, overwrite))
1048 self.log(request, level=OPERATIONAL)
1050 assert (isinstance(from_userpath, str) and isinstance(from_childname, unicode) and
1051 isinstance(to_userpath, str) and isinstance(to_childname, unicode)), \
1052 (from_userpath, from_childname, to_userpath, to_childname)
1054 if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
1056 # First we synchronously rename all heisenfiles matching the userpath or direntry.
1057 # Then we .sync() each file that we renamed.
1059 # For each file, the call to .rename occurs:
1060 # * before the file is closed, in which case it will be committed at the
1062 # * after it is closed but before it has been close_notified, in which case the
1063 # .sync() ensures that it has been committed (successfully or not) before we
1066 # This avoids a race that might otherwise cause the file to be committed at the
1067 # old name after the rename operation has completed.
1069 # Note that if overwrite is False, the caller should already have checked
1070 # whether a real direntry exists at the destination. It is possible that another
1071 # direntry (heisen or real) comes to exist at the destination after that check,
1072 # but in that case it is correct for the rename to succeed (and for the commit
1073 # of the heisenfile at the destination to possibly clobber the other entry, since
1074 # that can happen anyway when we have concurrent write handles to the same direntry).
1076 # We return a Deferred that fires with True if any files were renamed (this
1077 # does not mean that they were not committed; it is used to determine whether
1078 # a NoSuchChildError from the rename attempt should be suppressed). If overwrite
1079 # is False and there were already heisenfiles at the destination userpath or
1080 # direntry, we return a Deferred that fails with SFTPError(FX_PERMISSION_DENIED).
1082 from_direntry = _direntry_for(from_parent, from_childname)
1083 to_direntry = _direntry_for(to_parent, to_childname)
1085 if noisy: self.log("from_direntry = %r, to_direntry = %r, len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
1086 (from_direntry, to_direntry, len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
1088 if not overwrite and (to_userpath in self._heisenfiles or to_direntry in all_heisenfiles):
1089 def _existing(): raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1090 if noisy: self.log("existing", level=NOISY)
1091 return defer.execute(_existing)
1094 if from_direntry in all_heisenfiles:
1095 from_files = all_heisenfiles[from_direntry]
1096 del all_heisenfiles[from_direntry]
1097 if from_userpath in self._heisenfiles:
1098 from_files += self._heisenfiles[from_userpath]
1099 del self._heisenfiles[from_userpath]
1101 if noisy: self.log("from_files = %r in %r" % (from_files, request), level=NOISY)
1103 for f in from_files:
1104 f.rename(to_userpath, to_parent, to_childname)
1105 self._add_heisenfile_by_path(f)
1106 self._add_heisenfile_by_direntry(f)
1108 d = defer.succeed(None)
1109 for f in from_files:
1113 if noisy: self.log("done: len(all_heisenfiles) = %r, len(self._heisenfiles) = %r in %r" %
1114 (len(all_heisenfiles), len(self._heisenfiles), request), level=NOISY)
1115 return len(from_files) > 0
1119 def _update_attrs_for_heisenfiles(self, userpath, direntry, attrs):
1120 request = "._update_attrs_for_heisenfiles(%r, %r, %r)" % (userpath, direntry, attrs)
1121 self.log(request, level=OPERATIONAL)
1123 assert isinstance(userpath, str) and isinstance(direntry, str), (userpath, direntry)
1126 if direntry in all_heisenfiles:
1127 files = all_heisenfiles[direntry]
1128 if userpath in self._heisenfiles:
1129 files += self._heisenfiles[userpath]
1131 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1133 # We set the metadata for all heisenfiles at this path or direntry.
1134 # Since a direntry includes a write URI, we must have authority to
1135 # change the metadata of heisenfiles found in the all_heisenfiles dict.
1136 # However that's not necessarily the case for heisenfiles found by
1137 # path. Therefore we tell the setAttrs method of each file to only
1138 # perform the update if the file is at the correct direntry.
1140 d = defer.succeed(None)
1142 d.addBoth(f.setAttrs, attrs, only_if_at=direntry)
1145 self.log("done %r" % (request,), level=OPERATIONAL)
1146 # TODO: this should not return True if only_if_at caused all files to be skipped.
1147 return len(files) > 0
1151 def _sync_heisenfiles(self, userpath, direntry, ignore=None):
1152 request = "._sync_heisenfiles(%r, %r, ignore=%r)" % (userpath, direntry, ignore)
1153 self.log(request, level=OPERATIONAL)
1155 assert isinstance(userpath, str) and isinstance(direntry, (str, NoneType)), (userpath, direntry)
1158 if direntry in all_heisenfiles:
1159 files = all_heisenfiles[direntry]
1160 if userpath in self._heisenfiles:
1161 files += self._heisenfiles[userpath]
1163 if noisy: self.log("files = %r in %r" % (files, request), level=NOISY)
1165 d = defer.succeed(None)
1171 self.log("done %r" % (request,), level=OPERATIONAL)
1176 def _remove_heisenfile(self, userpath, parent, childname, file_to_remove):
1177 if noisy: self.log("._remove_heisenfile(%r, %r, %r, %r)" % (userpath, parent, childname, file_to_remove), level=NOISY)
1179 assert isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)), (userpath, childname)
1181 direntry = _direntry_for(parent, childname)
1182 if direntry in all_heisenfiles:
1183 all_old_files = all_heisenfiles[direntry]
1184 all_new_files = [f for f in all_old_files if f is not file_to_remove]
1185 if len(all_new_files) > 0:
1186 all_heisenfiles[direntry] = all_new_files
1188 del all_heisenfiles[direntry]
1190 if userpath in self._heisenfiles:
1191 old_files = self._heisenfiles[userpath]
1192 new_files = [f for f in old_files if f is not file_to_remove]
1193 if len(new_files) > 0:
1194 self._heisenfiles[userpath] = new_files
1196 del self._heisenfiles[userpath]
1198 if noisy: self.log("all_heisenfiles = %r\nself._heisenfiles = %r" % (all_heisenfiles, self._heisenfiles), level=NOISY)
1200 def _make_file(self, existing_file, userpath, flags, parent=None, childname=None, filenode=None, metadata=None):
1201 if noisy: self.log("._make_file(%r, %r, %r = %r, parent=%r, childname=%r, filenode=%r, metadata=%r)" %
1202 (existing_file, userpath, flags, _repr_flags(flags), parent, childname, filenode, metadata),
1205 assert (isinstance(userpath, str) and isinstance(childname, (unicode, NoneType)) and
1206 (metadata is None or 'no-write' in metadata)), (userpath, childname, metadata)
1208 writing = (flags & (FXF_WRITE | FXF_CREAT)) != 0
1209 direntry = _direntry_for(parent, childname, filenode)
1211 d = self._sync_heisenfiles(userpath, direntry, ignore=existing_file)
1213 if not writing and (flags & FXF_READ) and filenode and not filenode.is_mutable() and filenode.get_size() <= SIZE_THRESHOLD:
1214 d.addCallback(lambda ign: ShortReadOnlySFTPFile(userpath, filenode, metadata))
1218 close_notify = self._remove_heisenfile
1220 d.addCallback(lambda ign: existing_file or GeneralSFTPFile(userpath, flags, close_notify, self._convergence))
1221 def _got_file(file):
1222 file.open(parent=parent, childname=childname, filenode=filenode, metadata=metadata)
1224 self._add_heisenfile_by_direntry(file)
1226 d.addCallback(_got_file)
1229 def openFile(self, pathstring, flags, attrs, delay=None):
1230 request = ".openFile(%r, %r = %r, %r, delay=%r)" % (pathstring, flags, _repr_flags(flags), attrs, delay)
1231 self.log(request, level=OPERATIONAL)
1233 # This is used for both reading and writing.
1234 # First exclude invalid combinations of flags, and empty paths.
1236 if not (flags & (FXF_READ | FXF_WRITE)):
1237 def _bad_readwrite():
1238 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: at least one of FXF_READ and FXF_WRITE must be set")
1239 return defer.execute(_bad_readwrite)
1241 if (flags & FXF_EXCL) and not (flags & FXF_CREAT):
1242 def _bad_exclcreat():
1243 raise SFTPError(FX_BAD_MESSAGE, "invalid file open flags: FXF_EXCL cannot be set without FXF_CREAT")
1244 return defer.execute(_bad_exclcreat)
1246 path = self._path_from_string(pathstring)
1248 def _emptypath(): raise SFTPError(FX_NO_SUCH_FILE, "path cannot be empty")
1249 return defer.execute(_emptypath)
1251 # The combination of flags is potentially valid.
1253 # To work around clients that have race condition bugs, a getAttr, rename, or
1254 # remove request following an 'open' request with FXF_WRITE or FXF_CREAT flags,
1255 # should succeed even if the 'open' request has not yet completed. So we now
1256 # synchronously add a file object into the self._heisenfiles dict, indexed
1257 # by its UTF-8 userpath. (We can't yet add it to the all_heisenfiles dict,
1258 # because we don't yet have a user-independent path for the file.) The file
1259 # object does not know its filenode, parent, or childname at this point.
1261 userpath = self._path_to_utf8(path)
1263 if flags & (FXF_WRITE | FXF_CREAT):
1264 file = GeneralSFTPFile(userpath, flags, self._remove_heisenfile, self._convergence)
1265 self._add_heisenfile_by_path(file)
1267 # We haven't decided which file implementation to use yet.
1270 desired_metadata = _attrs_to_metadata(attrs)
1272 # Now there are two major cases:
1274 # 1. The path is specified as /uri/FILECAP, with no parent directory.
1275 # If the FILECAP is mutable and writeable, then we can open it in write-only
1276 # or read/write mode (non-exclusively), otherwise we can only open it in
1277 # read-only mode. The open should succeed immediately as long as FILECAP is
1278 # a valid known filecap that grants the required permission.
1280 # 2. The path is specified relative to a parent. We find the parent dirnode and
1281 # get the child's URI and metadata if it exists. There are four subcases:
1282 # a. the child does not exist: FXF_CREAT must be set, and we must be able
1283 # to write to the parent directory.
1284 # b. the child exists but is not a valid known filecap: fail
1285 # c. the child is mutable: if we are trying to open it write-only or
1286 # read/write, then we must be able to write to the file.
1287 # d. the child is immutable: if we are trying to open it write-only or
1288 # read/write, then we must be able to write to the parent directory.
1290 # To reduce latency, open normally succeeds as soon as these conditions are
1291 # met, even though there might be a failure in downloading the existing file
1292 # or uploading a new one. However, there is an exception: if a file has been
1293 # written, then closed, and is now being reopened, then we have to delay the
1294 # open until the previous upload/publish has completed. This is necessary
1295 # because sshfs does not wait for the result of an FXF_CLOSE message before
1296 # reporting to the client that a file has been closed. It applies both to
1297 # mutable files, and to directory entries linked to an immutable file.
1299 # Note that the permission checks below are for more precise error reporting on
1300 # the open call; later operations would fail even if we did not make these checks.
1302 d = delay or defer.succeed(None)
1303 d.addCallback(lambda ign: self._get_root(path))
1304 def _got_root( (root, path) ):
1305 if root.is_unknown():
1306 raise SFTPError(FX_PERMISSION_DENIED,
1307 "cannot open an unknown cap (or child of an unknown object). "
1308 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1311 if noisy: self.log("case 1: root = %r, path[:-1] = %r" % (root, path[:-1]), level=NOISY)
1312 if not IFileNode.providedBy(root):
1313 raise SFTPError(FX_PERMISSION_DENIED,
1314 "cannot open a directory cap")
1315 if (flags & FXF_WRITE) and root.is_readonly():
1316 raise SFTPError(FX_PERMISSION_DENIED,
1317 "cannot write to a non-writeable filecap without a parent directory")
1318 if flags & FXF_EXCL:
1319 raise SFTPError(FX_FAILURE,
1320 "cannot create a file exclusively when it already exists")
1322 # The file does not need to be added to all_heisenfiles, because it is not
1323 # associated with a directory entry that needs to be updated.
1325 metadata = update_metadata(None, desired_metadata, time())
1327 # We have to decide what to pass for the 'parent_readonly' argument to _no_write,
1328 # given that we don't actually have a parent. This only affects the permissions
1329 # reported by a getAttrs on this file handle in the case of an immutable file.
1330 # We choose 'parent_readonly=True' since that will cause the permissions to be
1331 # reported as r--r--r--, which is appropriate because an immutable file can't be
1332 # written via this path.
1334 metadata['no-write'] = _no_write(True, root)
1335 return self._make_file(file, userpath, flags, filenode=root, metadata=metadata)
1338 childname = path[-1]
1340 if noisy: self.log("case 2: root = %r, childname = %r, desired_metadata = %r, path[:-1] = %r" %
1341 (root, childname, desired_metadata, path[:-1]), level=NOISY)
1342 d2 = root.get_child_at_path(path[:-1])
1343 def _got_parent(parent):
1344 if noisy: self.log("_got_parent(%r)" % (parent,), level=NOISY)
1345 if parent.is_unknown():
1346 raise SFTPError(FX_PERMISSION_DENIED,
1347 "cannot open a child of an unknown object. "
1348 "Upgrading the gateway to a later Tahoe-LAFS version may help")
1350 parent_readonly = parent.is_readonly()
1351 d3 = defer.succeed(None)
1352 if flags & FXF_EXCL:
1353 # FXF_EXCL means that the link to the file (not the file itself) must
1354 # be created atomically wrt updates by this storage client.
1355 # That is, we need to create the link before returning success to the
1356 # SFTP open request (and not just on close, as would normally be the
1357 # case). We make the link initially point to a zero-length LIT file,
1358 # which is consistent with what might happen on a POSIX filesystem.
1361 raise SFTPError(FX_FAILURE,
1362 "cannot create a file exclusively when the parent directory is read-only")
1364 # 'overwrite=False' ensures failure if the link already exists.
1365 # FIXME: should use a single call to set_uri and return (child, metadata) (#1035)
1367 zero_length_lit = "URI:LIT:"
1368 if noisy: self.log("%r.set_uri(%r, None, readcap=%r, overwrite=False)" %
1369 (parent, zero_length_lit, childname), level=NOISY)
1370 d3.addCallback(lambda ign: parent.set_uri(childname, None, readcap=zero_length_lit,
1371 metadata=desired_metadata, overwrite=False))
1372 def _seturi_done(child):
1373 if noisy: self.log("%r.get_metadata_for(%r)" % (parent, childname), level=NOISY)
1374 d4 = parent.get_metadata_for(childname)
1375 d4.addCallback(lambda metadata: (child, metadata))
1377 d3.addCallback(_seturi_done)
1379 if noisy: self.log("%r.get_child_and_metadata(%r)" % (parent, childname), level=NOISY)
1380 d3.addCallback(lambda ign: parent.get_child_and_metadata(childname))
1382 def _got_child( (filenode, current_metadata) ):
1383 if noisy: self.log("_got_child( (%r, %r) )" % (filenode, current_metadata), level=NOISY)
1385 metadata = update_metadata(current_metadata, desired_metadata, time())
1387 # Ignore the permissions of the desired_metadata in an open call. The permissions
1388 # can only be set by setAttrs.
1389 metadata['no-write'] = _no_write(parent_readonly, filenode, current_metadata)
1391 if filenode.is_unknown():
1392 raise SFTPError(FX_PERMISSION_DENIED,
1393 "cannot open an unknown cap. Upgrading the gateway "
1394 "to a later Tahoe-LAFS version may help")
1395 if not IFileNode.providedBy(filenode):
1396 raise SFTPError(FX_PERMISSION_DENIED,
1397 "cannot open a directory as if it were a file")
1398 if (flags & FXF_WRITE) and metadata['no-write']:
1399 raise SFTPError(FX_PERMISSION_DENIED,
1400 "cannot open a non-writeable file for writing")
1402 return self._make_file(file, userpath, flags, parent=parent, childname=childname,
1403 filenode=filenode, metadata=metadata)
1405 if noisy: self.log("_no_child(%r)" % (f,), level=NOISY)
1406 f.trap(NoSuchChildError)
1408 if not (flags & FXF_CREAT):
1409 raise SFTPError(FX_NO_SUCH_FILE,
1410 "the file does not exist, and was not opened with the creation (CREAT) flag")
1412 raise SFTPError(FX_PERMISSION_DENIED,
1413 "cannot create a file when the parent directory is read-only")
1415 return self._make_file(file, userpath, flags, parent=parent, childname=childname)
1416 d3.addCallbacks(_got_child, _no_child)
1419 d2.addCallback(_got_parent)
1422 d.addCallback(_got_root)
1423 def _remove_on_error(err):
1425 self._remove_heisenfile(userpath, None, None, file)
1427 d.addErrback(_remove_on_error)
1428 d.addBoth(_convert_error, request)
1431 def renameFile(self, from_pathstring, to_pathstring, overwrite=False):
1432 request = ".renameFile(%r, %r)" % (from_pathstring, to_pathstring)
1433 self.log(request, level=OPERATIONAL)
1435 from_path = self._path_from_string(from_pathstring)
1436 to_path = self._path_from_string(to_pathstring)
1437 from_userpath = self._path_to_utf8(from_path)
1438 to_userpath = self._path_to_utf8(to_path)
1440 # the target directory must already exist
1441 d = deferredutil.gatherResults([self._get_parent_or_node(from_path),
1442 self._get_parent_or_node(to_path)])
1443 def _got( (from_pair, to_pair) ):
1444 if noisy: self.log("_got( (%r, %r) ) in .renameFile(%r, %r, overwrite=%r)" %
1445 (from_pair, to_pair, from_pathstring, to_pathstring, overwrite), level=NOISY)
1446 (from_parent, from_childname) = from_pair
1447 (to_parent, to_childname) = to_pair
1449 if from_childname is None:
1450 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename a source object specified by URI")
1451 if to_childname is None:
1452 raise SFTPError(FX_NO_SUCH_FILE, "cannot rename to a destination specified by URI")
1454 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.5>
1455 # "It is an error if there already exists a file with the name specified
1457 # OpenSSH's SFTP server returns FX_PERMISSION_DENIED for this error.
1459 # For the standard SSH_FXP_RENAME operation, overwrite=False.
1460 # We also support the posix-rename@openssh.com extension, which uses overwrite=True.
1462 d2 = defer.succeed(None)
1464 d2.addCallback(lambda ign: to_parent.get(to_childname))
1465 def _expect_fail(res):
1466 if not isinstance(res, Failure):
1467 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1469 # It is OK if we fail for errors other than NoSuchChildError, since that probably
1470 # indicates some problem accessing the destination directory.
1471 res.trap(NoSuchChildError)
1472 d2.addBoth(_expect_fail)
1474 # If there are heisenfiles to be written at the 'from' direntry, then ensure
1475 # they will now be written at the 'to' direntry instead.
1476 d2.addCallback(lambda ign:
1477 self._rename_heisenfiles(from_userpath, from_parent, from_childname,
1478 to_userpath, to_parent, to_childname, overwrite=overwrite))
1481 # FIXME: use move_child_to_path to avoid possible data loss due to #943
1482 #d3 = from_parent.move_child_to_path(from_childname, to_root, to_path, overwrite=overwrite)
1484 d3 = from_parent.move_child_to(from_childname, to_parent, to_childname, overwrite=overwrite)
1486 if noisy: self.log("_check(%r) in .renameFile(%r, %r, overwrite=%r)" %
1487 (err, from_pathstring, to_pathstring, overwrite), level=NOISY)
1489 if not isinstance(err, Failure) or (renamed and err.check(NoSuchChildError)):
1491 if not overwrite and err.check(ExistingChildError):
1492 raise SFTPError(FX_PERMISSION_DENIED, "cannot rename to existing path " + to_userpath)
1497 d2.addCallback(_move)
1500 d.addBoth(_convert_error, request)
1503 def makeDirectory(self, pathstring, attrs):
1504 request = ".makeDirectory(%r, %r)" % (pathstring, attrs)
1505 self.log(request, level=OPERATIONAL)
1507 path = self._path_from_string(pathstring)
1508 metadata = _attrs_to_metadata(attrs)
1509 if 'no-write' in metadata:
1510 def _denied(): raise SFTPError(FX_PERMISSION_DENIED, "cannot create a directory that is initially read-only")
1511 return defer.execute(_denied)
1513 d = self._get_root(path)
1514 d.addCallback(lambda (root, path):
1515 self._get_or_create_directories(root, path, metadata))
1516 d.addBoth(_convert_error, request)
1519 def _get_or_create_directories(self, node, path, metadata):
1520 if not IDirectoryNode.providedBy(node):
1521 # TODO: provide the name of the blocking file in the error message.
1522 def _blocked(): raise SFTPError(FX_FAILURE, "cannot create directory because there "
1523 "is a file in the way") # close enough
1524 return defer.execute(_blocked)
1527 return defer.succeed(node)
1528 d = node.get(path[0])
1529 def _maybe_create(f):
1530 f.trap(NoSuchChildError)
1531 return node.create_subdirectory(path[0])
1532 d.addErrback(_maybe_create)
1533 d.addCallback(self._get_or_create_directories, path[1:], metadata)
1536 def removeFile(self, pathstring):
1537 request = ".removeFile(%r)" % (pathstring,)
1538 self.log(request, level=OPERATIONAL)
1540 path = self._path_from_string(pathstring)
1541 d = self._remove_object(path, must_be_file=True)
1542 d.addBoth(_convert_error, request)
1545 def removeDirectory(self, pathstring):
1546 request = ".removeDirectory(%r)" % (pathstring,)
1547 self.log(request, level=OPERATIONAL)
1549 path = self._path_from_string(pathstring)
1550 d = self._remove_object(path, must_be_directory=True)
1551 d.addBoth(_convert_error, request)
1554 def _remove_object(self, path, must_be_directory=False, must_be_file=False):
1555 userpath = self._path_to_utf8(path)
1556 d = self._get_parent_or_node(path)
1557 def _got_parent( (parent, childname) ):
1558 if childname is None:
1559 raise SFTPError(FX_NO_SUCH_FILE, "cannot remove an object specified by URI")
1561 direntry = _direntry_for(parent, childname)
1562 d2 = defer.succeed(False)
1563 if not must_be_directory:
1564 d2.addCallback(lambda ign: self._abandon_any_heisenfiles(userpath, direntry))
1566 d2.addCallback(lambda abandoned:
1567 parent.delete(childname, must_exist=not abandoned,
1568 must_be_directory=must_be_directory, must_be_file=must_be_file))
1570 d.addCallback(_got_parent)
1573 def openDirectory(self, pathstring):
1574 request = ".openDirectory(%r)" % (pathstring,)
1575 self.log(request, level=OPERATIONAL)
1577 path = self._path_from_string(pathstring)
1578 d = self._get_parent_or_node(path)
1579 def _got_parent_or_node( (parent_or_node, childname) ):
1580 if noisy: self.log("_got_parent_or_node( (%r, %r) ) in openDirectory(%r)" %
1581 (parent_or_node, childname, pathstring), level=NOISY)
1582 if childname is None:
1583 return parent_or_node
1585 return parent_or_node.get(childname)
1586 d.addCallback(_got_parent_or_node)
1588 if dirnode.is_unknown():
1589 raise SFTPError(FX_PERMISSION_DENIED,
1590 "cannot list an unknown cap as a directory. Upgrading the gateway "
1591 "to a later Tahoe-LAFS version may help")
1592 if not IDirectoryNode.providedBy(dirnode):
1593 raise SFTPError(FX_PERMISSION_DENIED,
1594 "cannot list a file as if it were a directory")
1597 def _render(children):
1598 parent_readonly = dirnode.is_readonly()
1600 for filename, (child, metadata) in children.iteritems():
1601 # The file size may be cached or absent.
1602 metadata['no-write'] = _no_write(parent_readonly, child, metadata)
1603 attrs = _populate_attrs(child, metadata)
1604 filename_utf8 = filename.encode('utf-8')
1605 longname = _lsLine(filename_utf8, attrs)
1606 results.append( (filename_utf8, longname, attrs) )
1607 return StoppableList(results)
1608 d2.addCallback(_render)
1610 d.addCallback(_list)
1611 d.addBoth(_convert_error, request)
1614 def getAttrs(self, pathstring, followLinks):
1615 request = ".getAttrs(%r, followLinks=%r)" % (pathstring, followLinks)
1616 self.log(request, level=OPERATIONAL)
1618 # When asked about a specific file, report its current size.
1619 # TODO: the modification time for a mutable file should be
1620 # reported as the update time of the best version. But that
1621 # information isn't currently stored in mutable shares, I think.
1623 path = self._path_from_string(pathstring)
1624 userpath = self._path_to_utf8(path)
1625 d = self._get_parent_or_node(path)
1626 def _got_parent_or_node( (parent_or_node, childname) ):
1627 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1629 # Some clients will incorrectly try to get the attributes
1630 # of a file immediately after opening it, before it has been put
1631 # into the all_heisenfiles table. This is a race condition bug in
1632 # the client, but we handle it anyway by calling .sync() on all
1633 # files matching either the path or the direntry.
1635 direntry = _direntry_for(parent_or_node, childname)
1636 d2 = self._sync_heisenfiles(userpath, direntry)
1638 if childname is None:
1639 node = parent_or_node
1640 d2.addCallback(lambda ign: node.get_current_size())
1641 d2.addCallback(lambda size:
1642 _populate_attrs(node, {'no-write': node.is_unknown() or node.is_readonly()}, size=size))
1644 parent = parent_or_node
1645 d2.addCallback(lambda ign: parent.get_child_and_metadata_at_path([childname]))
1646 def _got( (child, metadata) ):
1647 if noisy: self.log("_got( (%r, %r) )" % (child, metadata), level=NOISY)
1648 assert IDirectoryNode.providedBy(parent), parent
1649 metadata['no-write'] = _no_write(parent.is_readonly(), child, metadata)
1650 d3 = child.get_current_size()
1651 d3.addCallback(lambda size: _populate_attrs(child, metadata, size=size))
1654 if noisy: self.log("_nosuch(%r)" % (err,), level=NOISY)
1655 err.trap(NoSuchChildError)
1656 if noisy: self.log("checking open files:\nself._heisenfiles = %r\nall_heisenfiles = %r\ndirentry=%r" %
1657 (self._heisenfiles, all_heisenfiles, direntry), level=NOISY)
1658 if direntry in all_heisenfiles:
1659 files = all_heisenfiles[direntry]
1660 if len(files) == 0: # pragma: no cover
1662 # use the heisenfile that was most recently opened
1663 return files[-1].getAttrs()
1665 d2.addCallbacks(_got, _nosuch)
1667 d.addCallback(_got_parent_or_node)
1668 d.addBoth(_convert_error, request)
1671 def setAttrs(self, pathstring, attrs):
1672 request = ".setAttrs(%r, %r)" % (pathstring, attrs)
1673 self.log(request, level=OPERATIONAL)
1676 # this would require us to download and re-upload the truncated/extended
1678 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "setAttrs wth size attribute unsupported")
1679 return defer.execute(_unsupported)
1681 path = self._path_from_string(pathstring)
1682 userpath = self._path_to_utf8(path)
1683 d = self._get_parent_or_node(path)
1684 def _got_parent_or_node( (parent_or_node, childname) ):
1685 if noisy: self.log("_got_parent_or_node( (%r, %r) )" % (parent_or_node, childname), level=NOISY)
1687 direntry = _direntry_for(parent_or_node, childname)
1688 d2 = self._update_attrs_for_heisenfiles(userpath, direntry, attrs)
1690 def _update(updated_heisenfiles):
1691 if childname is None:
1692 if updated_heisenfiles:
1694 raise SFTPError(FX_NO_SUCH_FILE, userpath)
1696 desired_metadata = _attrs_to_metadata(attrs)
1697 if noisy: self.log("desired_metadata = %r" % (desired_metadata,), level=NOISY)
1699 d3 = parent_or_node.set_metadata_for(childname, desired_metadata)
1701 if updated_heisenfiles:
1702 err.trap(NoSuchChildError)
1705 d3.addErrback(_nosuch)
1707 d2.addCallback(_update)
1708 d2.addCallback(lambda ign: None)
1710 d.addCallback(_got_parent_or_node)
1711 d.addBoth(_convert_error, request)
1714 def readLink(self, pathstring):
1715 self.log(".readLink(%r)" % (pathstring,), level=OPERATIONAL)
1717 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "readLink")
1718 return defer.execute(_unsupported)
1720 def makeLink(self, linkPathstring, targetPathstring):
1721 self.log(".makeLink(%r, %r)" % (linkPathstring, targetPathstring), level=OPERATIONAL)
1723 # If this is implemented, note the reversal of arguments described in point 7 of
1724 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>.
1726 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "makeLink")
1727 return defer.execute(_unsupported)
1729 def extendedRequest(self, extensionName, extensionData):
1730 self.log(".extendedRequest(%r, <data of length %r>)" % (extensionName, len(extensionData)), level=OPERATIONAL)
1732 # We implement the three main OpenSSH SFTP extensions; see
1733 # <http://www.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL?rev=1.15>
1735 if extensionName == 'posix-rename@openssh.com':
1736 def _bad(): raise SFTPError(FX_BAD_MESSAGE, "could not parse posix-rename@openssh.com request")
1738 if 4 > len(extensionData): return defer.execute(_bad)
1739 (fromPathLen,) = struct.unpack('>L', extensionData[0:4])
1740 if 8 + fromPathLen > len(extensionData): return defer.execute(_bad)
1742 (toPathLen,) = struct.unpack('>L', extensionData[(4 + fromPathLen):(8 + fromPathLen)])
1743 if 8 + fromPathLen + toPathLen != len(extensionData): return defer.execute(_bad)
1745 fromPathstring = extensionData[4:(4 + fromPathLen)]
1746 toPathstring = extensionData[(8 + fromPathLen):]
1747 d = self.renameFile(fromPathstring, toPathstring, overwrite=True)
1749 # Twisted conch assumes that the response from an extended request is either
1750 # an error, or an FXP_EXTENDED_REPLY. But it happens to do the right thing
1751 # (respond with an FXP_STATUS message) if we return a Failure with code FX_OK.
1752 def _succeeded(ign):
1753 raise SFTPError(FX_OK, "request succeeded")
1754 d.addCallback(_succeeded)
1757 if extensionName == 'statvfs@openssh.com' or extensionName == 'fstatvfs@openssh.com':
1758 # f_bsize and f_frsize should be the same to avoid a bug in 'df'
1759 return defer.succeed(struct.pack('>11Q',
1760 1024, # uint64 f_bsize /* file system block size */
1761 1024, # uint64 f_frsize /* fundamental fs block size */
1762 628318530, # uint64 f_blocks /* number of blocks (unit f_frsize) */
1763 314159265, # uint64 f_bfree /* free blocks in file system */
1764 314159265, # uint64 f_bavail /* free blocks for non-root */
1765 200000000, # uint64 f_files /* total file inodes */
1766 100000000, # uint64 f_ffree /* free file inodes */
1767 100000000, # uint64 f_favail /* free file inodes for non-root */
1768 0x1AF5, # uint64 f_fsid /* file system id */
1769 2, # uint64 f_flag /* bit mask = ST_NOSUID; not ST_RDONLY */
1770 65535, # uint64 f_namemax /* maximum filename length */
1773 def _unsupported(): raise SFTPError(FX_OP_UNSUPPORTED, "unsupported %r request <data of length %r>" %
1774 (extensionName, len(extensionData)))
1775 return defer.execute(_unsupported)
1777 def realPath(self, pathstring):
1778 self.log(".realPath(%r)" % (pathstring,), level=OPERATIONAL)
1780 return self._path_to_utf8(self._path_from_string(pathstring))
1782 def _path_to_utf8(self, path):
1783 return (u"/" + u"/".join(path)).encode('utf-8')
1785 def _path_from_string(self, pathstring):
1786 if noisy: self.log("CONVERT %r" % (pathstring,), level=NOISY)
1788 assert isinstance(pathstring, str), pathstring
1790 # The home directory is the root directory.
1791 pathstring = pathstring.strip("/")
1792 if pathstring == "" or pathstring == ".":
1795 path_utf8 = pathstring.split("/")
1797 # <http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-6.2>
1798 # "Servers SHOULD interpret a path name component ".." as referring to
1799 # the parent directory, and "." as referring to the current directory."
1801 for p_utf8 in path_utf8:
1803 # ignore excess .. components at the root
1808 p = p_utf8.decode('utf-8', 'strict')
1809 except UnicodeError:
1810 raise SFTPError(FX_NO_SUCH_FILE, "path could not be decoded as UTF-8")
1813 if noisy: self.log(" PATH %r" % (path,), level=NOISY)
1816 def _get_root(self, path):
1817 # return Deferred (root, remaining_path)
1818 d = defer.succeed(None)
1819 if path and path[0] == u"uri":
1820 d.addCallback(lambda ign: self._client.create_node_from_uri(path[1].encode('utf-8')))
1821 d.addCallback(lambda root: (root, path[2:]))
1823 d.addCallback(lambda ign: (self._root, path))
1826 def _get_parent_or_node(self, path):
1827 # return Deferred (parent, childname) or (node, None)
1828 d = self._get_root(path)
1829 def _got_root( (root, remaining_path) ):
1830 if not remaining_path:
1833 d2 = root.get_child_at_path(remaining_path[:-1])
1834 d2.addCallback(lambda parent: (parent, remaining_path[-1]))
1836 d.addCallback(_got_root)
1840 class FakeTransport:
1841 implements(ITransport)
1842 def write(self, data):
1843 logmsg("FakeTransport.write(<data of length %r>)" % (len(data),), level=NOISY)
1845 def writeSequence(self, data):
1846 logmsg("FakeTransport.writeSequence(...)", level=NOISY)
1848 def loseConnection(self):
1849 logmsg("FakeTransport.loseConnection()", level=NOISY)
1851 # getPeer and getHost can just raise errors, since we don't know what to return
1854 class ShellSession(PrefixingLogMixin):
1855 implements(ISession)
1856 def __init__(self, userHandler):
1857 PrefixingLogMixin.__init__(self, facility="tahoe.sftp")
1858 if noisy: self.log(".__init__(%r)" % (userHandler), level=NOISY)
1860 def getPty(self, terminal, windowSize, attrs):
1861 self.log(".getPty(%r, %r, %r)" % (terminal, windowSize, attrs), level=OPERATIONAL)
1863 def openShell(self, protocol):
1864 self.log(".openShell(%r)" % (protocol,), level=OPERATIONAL)
1865 if hasattr(protocol, 'transport') and protocol.transport is None:
1866 protocol.transport = FakeTransport() # work around Twisted bug
1868 d = defer.succeed(None)
1869 d.addCallback(lambda ign: protocol.write("This server supports only SFTP, not shell sessions.\n"))
1870 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1873 def execCommand(self, protocol, cmd):
1874 self.log(".execCommand(%r, %r)" % (protocol, cmd), level=OPERATIONAL)
1875 if hasattr(protocol, 'transport') and protocol.transport is None:
1876 protocol.transport = FakeTransport() # work around Twisted bug
1878 d = defer.succeed(None)
1879 if cmd == "df -P -k /":
1880 d.addCallback(lambda ign: protocol.write(
1881 "Filesystem 1024-blocks Used Available Capacity Mounted on\n"
1882 "tahoe 628318530 314159265 314159265 50% /\n"))
1883 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessDone(None))))
1885 d.addCallback(lambda ign: protocol.processEnded(Reason(ProcessTerminated(exitCode=1))))
1888 def windowChanged(self, newWindowSize):
1889 self.log(".windowChanged(%r)" % (newWindowSize,), level=OPERATIONAL)
1891 def eofReceived(self):
1892 self.log(".eofReceived()", level=OPERATIONAL)
1895 self.log(".closed()", level=OPERATIONAL)
1898 # If you have an SFTPUserHandler and want something that provides ISession, you get
1899 # ShellSession(userHandler).
1900 # We use adaptation because this must be a different object to the SFTPUserHandler.
1901 components.registerAdapter(ShellSession, SFTPUserHandler, ISession)
1904 from allmydata.frontends.auth import AccountURLChecker, AccountFileChecker, NeedRootcapLookupScheme
1907 implements(portal.IRealm)
1908 def __init__(self, client):
1909 self._client = client
1911 def requestAvatar(self, avatarID, mind, interface):
1912 assert interface == IConchUser, interface
1913 rootnode = self._client.create_node_from_uri(avatarID.rootcap)
1914 handler = SFTPUserHandler(self._client, rootnode, avatarID.username)
1915 return (interface, handler, handler.logout)
1918 class SFTPServer(service.MultiService):
1919 def __init__(self, client, accountfile, accounturl,
1920 sftp_portstr, pubkey_file, privkey_file):
1921 service.MultiService.__init__(self)
1923 r = Dispatcher(client)
1924 p = portal.Portal(r)
1927 c = AccountFileChecker(self, accountfile)
1928 p.registerChecker(c)
1930 c = AccountURLChecker(self, accounturl)
1931 p.registerChecker(c)
1932 if not accountfile and not accounturl:
1933 # we could leave this anonymous, with just the /uri/CAP form
1934 raise NeedRootcapLookupScheme("must provide an account file or URL")
1936 pubkey = keys.Key.fromFile(pubkey_file)
1937 privkey = keys.Key.fromFile(privkey_file)
1938 class SSHFactory(factory.SSHFactory):
1939 publicKeys = {pubkey.sshType(): pubkey}
1940 privateKeys = {privkey.sshType(): privkey}
1941 def getPrimes(self):
1943 # if present, this enables diffie-hellman-group-exchange
1944 return primes.parseModuliFile("/etc/ssh/moduli")
1951 s = strports.service(sftp_portstr, f)
1952 s.setServiceParent(self)