cancel_secret = c.get_cancel_secret()
self.failUnless(base32.b2a(cancel_secret))
+ def test_nodekey_yes_storage(self):
+ basedir = "test_client.Basic.test_nodekey_yes_storage"
+ os.mkdir(basedir)
+ fileutil.write(os.path.join(basedir, "tahoe.cfg"),
+ BASECONFIG)
+ c = client.Client(basedir)
+ self.failUnless(c.get_long_nodeid().startswith("v0-"))
+
+ def test_nodekey_no_storage(self):
+ basedir = "test_client.Basic.test_nodekey_no_storage"
+ os.mkdir(basedir)
+ fileutil.write(os.path.join(basedir, "tahoe.cfg"),
+ BASECONFIG + "[storage]\n" + "enabled = false\n")
+ c = client.Client(basedir)
+ self.failUnless(c.get_long_nodeid().startswith("v0-"))
+
def test_reserved_1(self):
basedir = "client.Basic.test_reserved_1"
os.mkdir(basedir)
"[storage]\n" + \
"enabled = true\n" + \
"reserved_space = bogus\n")
- c = client.Client(basedir)
- self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 0)
+ self.failUnlessRaises(ValueError, client.Client, basedir)
def _permute(self, sb, key):
return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
self.failUnless("node.uptime" in stats)
self.failUnless(isinstance(stats["node.uptime"], float))
+ def test_helper_furl(self):
+ basedir = "test_client.Basic.test_helper_furl"
+ os.mkdir(basedir)
+
+ def _check(config, expected_furl):
+ fileutil.write(os.path.join(basedir, "tahoe.cfg"),
+ BASECONFIG + config)
+ c = client.Client(basedir)
+ uploader = c.getServiceNamed("uploader")
+ furl, connected = uploader.get_helper_info()
+ self.failUnlessEqual(furl, expected_furl)
+
+ _check("", None)
+ _check("helper.furl =\n", None)
+ _check("helper.furl = \n", None)
+ _check("helper.furl = None", None)
+ _check("helper.furl = pb://blah\n", "pb://blah")
+
@mock.patch('allmydata.util.log.msg')
@mock.patch('allmydata.frontends.drop_upload.DropUploader')
def test_create_drop_uploader(self, mock_drop_uploader, mock_log_msg):
self.failUnless(n.is_readonly())
self.failIf(n.is_mutable())
+ # Testing #1679. There was a bug that would occur when downloader was
+ # downloading the same readcap more than once concurrently, so the
+ # filenode object was cached, and there was a failure from one of the
+ # servers in one of the download attempts. No subsequent download
+ # attempt would attempt to use that server again, which would lead to
+ # the file being undownloadable until the gateway was restarted. The
+ # current fix for this (hopefully to be superceded by a better fix
+ # eventually) is to prevent re-use of filenodes, so the NodeMaker is
+ # hereby required *not* to cache and re-use filenodes for CHKs.
+ other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
+ self.failIf(n is other_n, (n, other_n))
+
n = c.create_node_from_uri("URI:LIT:n5xgk")
self.failUnless(IFilesystemNode.providedBy(n))
self.failUnless(IFileNode.providedBy(n))