from allmydata.util.consumer import download_to_data
from allmydata.immutable import upload
+from allmydata.mutable import publish
from allmydata.test.no_network import GridTestMixin
from allmydata.test.common import ShouldFailMixin
from allmydata.test.common_util import ReallyEqualMixin
timeout = 240
+#defer.setDebugging(True)
+#from twisted.internet import base
+#base.DelayedCall.debug = True
+
class Handler(GridTestMixin, ShouldFailMixin, ReallyEqualMixin, unittest.TestCase):
"""This is a no-network unit test of the SFTPUserHandler and the abstractions it uses."""
return d
def _set_up_tree(self):
- d = self.client.create_mutable_file("mutable file contents")
+ u = publish.MutableData("mutable file contents")
+ d = self.client.create_mutable_file(u)
d.addCallback(lambda node: self.root.set_node(u"mutable", node))
def _created_mutable(n):
self.mutable = n
return d2
d.addCallback(_read_short)
+ # check that failed downloads cause failed reads
+ d.addCallback(lambda ign: self.handler.openFile("uri/"+self.gross_uri, sftp.FXF_READ, {}))
+ def _read_broken(rf):
+ d2 = defer.succeed(None)
+ d2.addCallback(lambda ign: self.g.nuke_from_orbit())
+ d2.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken",
+ rf.readChunk, 0, 100))
+ # close shouldn't fail
+ d2.addCallback(lambda ign: rf.close())
+ d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
+ return d2
+ d.addCallback(_read_broken)
+
+ d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
+ d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
+ return d
+
+ def test_openFile_read_error(self):
+ # The check at the end of openFile_read tested this for large files, but it trashed
+ # the grid in the process, so this needs to be a separate test.
+ small = upload.Data("0123456789"*10, None)
+ d = self._set_up("openFile_read_error")
+ d.addCallback(lambda ign: self.root.add_file(u"small", small))
+ d.addCallback(lambda n: self.handler.openFile("/uri/"+n.get_uri(), sftp.FXF_READ, {}))
+ def _read_broken(rf):
+ d2 = defer.succeed(None)
+ d2.addCallback(lambda ign: self.g.nuke_from_orbit())
+ d2.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read broken",
+ rf.readChunk, 0, 100))
+ # close shouldn't fail
+ d2.addCallback(lambda ign: rf.close())
+ d2.addCallback(lambda res: self.failUnlessReallyEqual(res, None))
+ return d2
+ d.addCallback(_read_broken)
+
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d
self.handler.openFile("small", sftp.FXF_READ | sftp.FXF_WRITE, {}))
def _read_write(rwf):
d2 = rwf.writeChunk(8, "0123")
+ # test immediate read starting after the old end-of-file
+ d2.addCallback(lambda ign: rwf.readChunk(11, 1))
+ d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "3"))
d2.addCallback(lambda ign: rwf.readChunk(0, 100))
d2.addCallback(lambda data: self.failUnlessReallyEqual(data, "012345670123"))
d2.addCallback(lambda ign: rwf.close())
self.shouldFail(NoSuchChildError, "rename new while open", "new",
self.root.get, u"new"))
+ # check that failed downloads cause failed reads and failed close, when open for writing
+ gross = u"gro\u00DF".encode("utf-8")
+ d.addCallback(lambda ign: self.handler.openFile(gross, sftp.FXF_READ | sftp.FXF_WRITE, {}))
+ def _read_write_broken(rwf):
+ d2 = rwf.writeChunk(0, "abcdefghij")
+ d2.addCallback(lambda ign: self.g.nuke_from_orbit())
+
+ # reading should fail (reliably if we read past the written chunk)
+ d2.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken",
+ rwf.readChunk, 0, 100))
+ # close should fail in this case
+ d2.addCallback(lambda ign:
+ self.shouldFailWithSFTPError(sftp.FX_FAILURE, "read/write broken close",
+ rwf.close))
+ return d2
+ d.addCallback(_read_write_broken)
+
d.addCallback(lambda ign: self.failUnlessEqual(sftpd.all_heisenfiles, {}))
d.addCallback(lambda ign: self.failUnlessEqual(self.handler._heisenfiles, {}))
return d