2 from twisted.trial import unittest
3 from twisted.application import service
6 from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
7 from allmydata import client
8 from allmydata.storage_client import StorageFarmBroker
9 from allmydata.util import base32, fileutil
10 from allmydata.interfaces import IFilesystemNode, IFileNode, \
11 IImmutableFileNode, IMutableFileNode, IDirectoryNode
12 from foolscap.api import flushEventualQueue
13 import allmydata.test.common_util as testutil
17 BASECONFIG = ("[client]\n"
18 "introducer.furl = \n"
21 BASECONFIG_I = ("[client]\n"
22 "introducer.furl = %s\n"
25 class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
26 def test_loadable(self):
27 basedir = "test_client.Basic.test_loadable"
29 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
31 client.Client(basedir)
33 def test_comment(self):
34 should_fail = [r"test#test", r"#testtest", r"test\\#test"]
35 should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
37 basedir = "test_client.Basic.test_comment"
41 config = ("[client]\n"
42 "introducer.furl = %s\n" % s)
43 fileutil.write(os.path.join(basedir, "tahoe.cfg"), config)
46 self.failUnless(Node._contains_unescaped_hash(s))
48 self.failUnlessRaises(UnescapedHashError, client.Client, basedir)
50 for s in should_not_fail:
51 self.failIf(Node._contains_unescaped_hash(s))
53 client.Client(basedir)
56 @mock.patch('twisted.python.log.msg')
57 def test_error_on_old_config_files(self, mock_log_msg):
58 basedir = "test_client.Basic.test_error_on_old_config_files"
60 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
64 "reserved_space = bogus\n")
65 fileutil.write(os.path.join(basedir, "introducer.furl"), "")
66 fileutil.write(os.path.join(basedir, "no_storage"), "")
67 fileutil.write(os.path.join(basedir, "readonly_storage"), "")
68 fileutil.write(os.path.join(basedir, "debug_discard_storage"), "")
70 e = self.failUnlessRaises(OldConfigError, client.Client, basedir)
71 abs_basedir = fileutil.abspath_expanduser_unicode(unicode(basedir)).encode(sys.getfilesystemencoding())
72 self.failUnlessIn(os.path.join(abs_basedir, "introducer.furl"), e.args[0])
73 self.failUnlessIn(os.path.join(abs_basedir, "no_storage"), e.args[0])
74 self.failUnlessIn(os.path.join(abs_basedir, "readonly_storage"), e.args[0])
75 self.failUnlessIn(os.path.join(abs_basedir, "debug_discard_storage"), e.args[0])
77 for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage',
78 'debug_discard_storage']:
79 logged = [ m for m in mock_log_msg.call_args_list if
80 ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m[0][0]) and oldfile in str(m[0][0])) ]
81 self.failUnless(logged, (oldfile, mock_log_msg.call_args_list))
84 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
85 'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl',
86 'key_generator.furl', 'stats_gatherer.furl', 'sizelimit',
88 logged = [ m for m in mock_log_msg.call_args_list if
89 ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m[0][0]) and oldfile in str(m[0][0])) ]
90 self.failIf(logged, oldfile)
92 def test_secrets(self):
93 basedir = "test_client.Basic.test_secrets"
95 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
97 c = client.Client(basedir)
98 secret_fname = os.path.join(basedir, "private", "secret")
99 self.failUnless(os.path.exists(secret_fname), secret_fname)
100 renew_secret = c.get_renewal_secret()
101 self.failUnless(base32.b2a(renew_secret))
102 cancel_secret = c.get_cancel_secret()
103 self.failUnless(base32.b2a(cancel_secret))
105 def test_nodekey_yes_storage(self):
106 basedir = "test_client.Basic.test_nodekey_yes_storage"
108 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
110 c = client.Client(basedir)
111 self.failUnless(c.get_long_nodeid().startswith("v0-"))
113 def test_nodekey_no_storage(self):
114 basedir = "test_client.Basic.test_nodekey_no_storage"
116 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
117 BASECONFIG + "[storage]\n" + "enabled = false\n")
118 c = client.Client(basedir)
119 self.failUnless(c.get_long_nodeid().startswith("v0-"))
121 def test_reserved_1(self):
122 basedir = "client.Basic.test_reserved_1"
124 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
127 "enabled = true\n" + \
128 "reserved_space = 1000\n")
129 c = client.Client(basedir)
130 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
132 def test_reserved_2(self):
133 basedir = "client.Basic.test_reserved_2"
135 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
138 "enabled = true\n" + \
139 "reserved_space = 10K\n")
140 c = client.Client(basedir)
141 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
143 def test_reserved_3(self):
144 basedir = "client.Basic.test_reserved_3"
146 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
149 "enabled = true\n" + \
150 "reserved_space = 5mB\n")
151 c = client.Client(basedir)
152 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
155 def test_reserved_4(self):
156 basedir = "client.Basic.test_reserved_4"
158 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
161 "enabled = true\n" + \
162 "reserved_space = 78Gb\n")
163 c = client.Client(basedir)
164 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
167 def test_reserved_bad(self):
168 basedir = "client.Basic.test_reserved_bad"
170 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
173 "enabled = true\n" + \
174 "reserved_space = bogus\n")
175 self.failUnlessRaises(ValueError, client.Client, basedir)
177 def _permute(self, sb, key):
178 return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
180 def test_permute(self):
181 sb = StorageFarmBroker(None, True)
182 for k in ["%d" % i for i in range(5)]:
183 ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
184 "permutation-seed-base32": base32.b2a(k) }
185 sb.test_add_rref(k, "rref", ann)
187 self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2'])
188 self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3'])
190 self.failUnlessReallyEqual(self._permute(sb, "one"), [])
192 def test_versions(self):
193 basedir = "test_client.Basic.test_versions"
195 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
199 c = client.Client(basedir)
200 ss = c.getServiceNamed("storage")
201 verdict = ss.remote_get_version()
202 self.failUnlessReallyEqual(verdict["application-version"],
203 str(allmydata.__full_version__))
204 self.failIfEqual(str(allmydata.__version__), "unknown")
205 self.failUnless("." in str(allmydata.__full_version__),
206 "non-numeric version in '%s'" % allmydata.__version__)
207 all_versions = allmydata.get_package_versions_string()
208 self.failUnless(allmydata.__appname__ in all_versions)
210 stats = c.get_stats()
211 self.failUnless("node.uptime" in stats)
212 self.failUnless(isinstance(stats["node.uptime"], float))
214 def test_helper_furl(self):
215 basedir = "test_client.Basic.test_helper_furl"
218 def _check(config, expected_furl):
219 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
221 c = client.Client(basedir)
222 uploader = c.getServiceNamed("uploader")
223 furl, connected = uploader.get_helper_info()
224 self.failUnlessEqual(furl, expected_furl)
227 _check("helper.furl =\n", None)
228 _check("helper.furl = \n", None)
229 _check("helper.furl = None", None)
230 _check("helper.furl = pb://blah\n", "pb://blah")
232 @mock.patch('allmydata.util.log.msg')
233 @mock.patch('allmydata.frontends.drop_upload.DropUploader')
234 def test_create_drop_uploader(self, mock_drop_uploader, mock_log_msg):
235 class MockDropUploader(service.MultiService):
238 def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
239 service.MultiService.__init__(self)
241 self.upload_dircap = upload_dircap
242 self.local_dir_utf8 = local_dir_utf8
243 self.inotify = inotify
245 mock_drop_uploader.side_effect = MockDropUploader
247 upload_dircap = "URI:DIR2:blah"
248 local_dir_utf8 = u"loc\u0101l_dir".encode('utf-8')
249 config = (BASECONFIG +
251 "enabled = false\n" +
255 basedir1 = "test_client.Basic.test_create_drop_uploader1"
257 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
258 config + "local.directory = " + local_dir_utf8 + "\n")
259 self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
261 fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
262 fileutil.write(os.path.join(basedir1, "private", "drop_upload_dircap"), "URI:DIR2:blah")
263 self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
265 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
266 config + "upload.dircap = " + upload_dircap + "\n")
267 self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
269 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
270 config + "local.directory = " + local_dir_utf8 + "\n")
271 c1 = client.Client(basedir1)
272 uploader = c1.getServiceNamed('drop-upload')
273 self.failUnless(isinstance(uploader, MockDropUploader), uploader)
274 self.failUnlessReallyEqual(uploader.client, c1)
275 self.failUnlessReallyEqual(uploader.upload_dircap, upload_dircap)
276 self.failUnlessReallyEqual(uploader.local_dir_utf8, local_dir_utf8)
277 self.failUnless(uploader.inotify is None, uploader.inotify)
278 self.failUnless(uploader.running)
280 class Boom(Exception):
282 mock_drop_uploader.side_effect = Boom()
284 basedir2 = "test_client.Basic.test_create_drop_uploader2"
286 os.mkdir(os.path.join(basedir2, "private"))
287 fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
291 "local.directory = " + local_dir_utf8 + "\n")
292 fileutil.write(os.path.join(basedir2, "private", "drop_upload_dircap"), "URI:DIR2:blah")
293 c2 = client.Client(basedir2)
294 self.failUnlessRaises(KeyError, c2.getServiceNamed, 'drop-upload')
295 self.failUnless([True for arg in mock_log_msg.call_args_list if "Boom" in repr(arg)],
296 mock_log_msg.call_args_list)
299 def flush_but_dont_ignore(res):
300 d = flushEventualQueue()
306 class Run(unittest.TestCase, testutil.StallMixin):
309 self.sparent = service.MultiService()
310 self.sparent.startService()
312 d = self.sparent.stopService()
313 d.addBoth(flush_but_dont_ignore)
316 def test_loadable(self):
317 basedir = "test_client.Run.test_loadable"
319 dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
320 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
321 fileutil.write(os.path.join(basedir, client.Client.EXIT_TRIGGER_FILE), "")
322 client.Client(basedir)
324 def test_reloadable(self):
325 basedir = "test_client.Run.test_reloadable"
327 dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
328 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
329 c1 = client.Client(basedir)
330 c1.setServiceParent(self.sparent)
332 # delay to let the service start up completely. I'm not entirely sure
334 d = self.stall(delay=2.0)
335 d.addCallback(lambda res: c1.disownServiceParent())
336 # the cygwin buildslave seems to need more time to let the old
337 # service completely shut down. When delay=0.1, I saw this test fail,
338 # probably due to the logport trying to reclaim the old socket
339 # number. This suggests that either we're dropping a Deferred
340 # somewhere in the shutdown sequence, or that cygwin is just cranky.
341 d.addCallback(self.stall, delay=2.0)
343 # TODO: pause for slightly over one second, to let
344 # Client._check_exit_trigger poll the file once. That will exercise
345 # another few lines. Then add another test in which we don't
346 # update the file at all, and watch to see the node shutdown.
347 # (To do this, use a modified node which overrides Node.shutdown(),
348 # also change _check_exit_trigger to use it instead of a raw
349 # reactor.stop, also instrument the shutdown event in an
350 # attribute that we can check.)
351 c2 = client.Client(basedir)
352 c2.setServiceParent(self.sparent)
353 return c2.disownServiceParent()
354 d.addCallback(_restart)
357 class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
358 def test_maker(self):
359 basedir = "client/NodeMaker/maker"
360 fileutil.make_dirs(basedir)
361 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
362 c = client.Client(basedir)
364 n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
365 self.failUnless(IFilesystemNode.providedBy(n))
366 self.failUnless(IFileNode.providedBy(n))
367 self.failUnless(IImmutableFileNode.providedBy(n))
368 self.failIf(IMutableFileNode.providedBy(n))
369 self.failIf(IDirectoryNode.providedBy(n))
370 self.failUnless(n.is_readonly())
371 self.failIf(n.is_mutable())
373 # Testing #1679. There was a bug that would occur when downloader was
374 # downloading the same readcap more than once concurrently, so the
375 # filenode object was cached, and there was a failure from one of the
376 # servers in one of the download attempts. No subsequent download
377 # attempt would attempt to use that server again, which would lead to
378 # the file being undownloadable until the gateway was restarted. The
379 # current fix for this (hopefully to be superceded by a better fix
380 # eventually) is to prevent re-use of filenodes, so the NodeMaker is
381 # hereby required *not* to cache and re-use filenodes for CHKs.
382 other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
383 self.failIf(n is other_n, (n, other_n))
385 n = c.create_node_from_uri("URI:LIT:n5xgk")
386 self.failUnless(IFilesystemNode.providedBy(n))
387 self.failUnless(IFileNode.providedBy(n))
388 self.failUnless(IImmutableFileNode.providedBy(n))
389 self.failIf(IMutableFileNode.providedBy(n))
390 self.failIf(IDirectoryNode.providedBy(n))
391 self.failUnless(n.is_readonly())
392 self.failIf(n.is_mutable())
394 n = c.create_node_from_uri("URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
395 self.failUnless(IFilesystemNode.providedBy(n))
396 self.failUnless(IFileNode.providedBy(n))
397 self.failIf(IImmutableFileNode.providedBy(n))
398 self.failUnless(IMutableFileNode.providedBy(n))
399 self.failIf(IDirectoryNode.providedBy(n))
400 self.failIf(n.is_readonly())
401 self.failUnless(n.is_mutable())
403 n = c.create_node_from_uri("URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
404 self.failUnless(IFilesystemNode.providedBy(n))
405 self.failUnless(IFileNode.providedBy(n))
406 self.failIf(IImmutableFileNode.providedBy(n))
407 self.failUnless(IMutableFileNode.providedBy(n))
408 self.failIf(IDirectoryNode.providedBy(n))
409 self.failUnless(n.is_readonly())
410 self.failUnless(n.is_mutable())
412 n = c.create_node_from_uri("URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
413 self.failUnless(IFilesystemNode.providedBy(n))
414 self.failIf(IFileNode.providedBy(n))
415 self.failIf(IImmutableFileNode.providedBy(n))
416 self.failIf(IMutableFileNode.providedBy(n))
417 self.failUnless(IDirectoryNode.providedBy(n))
418 self.failIf(n.is_readonly())
419 self.failUnless(n.is_mutable())
421 n = c.create_node_from_uri("URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
422 self.failUnless(IFilesystemNode.providedBy(n))
423 self.failIf(IFileNode.providedBy(n))
424 self.failIf(IImmutableFileNode.providedBy(n))
425 self.failIf(IMutableFileNode.providedBy(n))
426 self.failUnless(IDirectoryNode.providedBy(n))
427 self.failUnless(n.is_readonly())
428 self.failUnless(n.is_mutable())
430 unknown_rw = "lafs://from_the_future"
431 unknown_ro = "lafs://readonly_from_the_future"
432 n = c.create_node_from_uri(unknown_rw, unknown_ro)
433 self.failUnless(IFilesystemNode.providedBy(n))
434 self.failIf(IFileNode.providedBy(n))
435 self.failIf(IImmutableFileNode.providedBy(n))
436 self.failIf(IMutableFileNode.providedBy(n))
437 self.failIf(IDirectoryNode.providedBy(n))
438 self.failUnless(n.is_unknown())
439 self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
440 self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
441 self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
443 # Note: it isn't that we *intend* to deploy non-ASCII caps in
444 # the future, it is that we want to make sure older Tahoe-LAFS
445 # versions wouldn't choke on them if we were to do so. See
446 # #1051 and wiki:NewCapDesign for details.
447 unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
448 unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
449 n = c.create_node_from_uri(unknown_rw, unknown_ro)
450 self.failUnless(IFilesystemNode.providedBy(n))
451 self.failIf(IFileNode.providedBy(n))
452 self.failIf(IImmutableFileNode.providedBy(n))
453 self.failIf(IMutableFileNode.providedBy(n))
454 self.failIf(IDirectoryNode.providedBy(n))
455 self.failUnless(n.is_unknown())
456 self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
457 self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
458 self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)