2 from twisted.trial import unittest
3 from twisted.application import service
6 from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
7 from allmydata import client
8 from allmydata.storage_client import StorageFarmBroker
9 from allmydata.util import base32, fileutil
10 from allmydata.interfaces import IFilesystemNode, IFileNode, \
11 IImmutableFileNode, IMutableFileNode, IDirectoryNode
12 from foolscap.api import flushEventualQueue
13 import allmydata.test.common_util as testutil
17 BASECONFIG = ("[client]\n"
18 "introducer.furl = \n"
21 BASECONFIG_I = ("[client]\n"
22 "introducer.furl = %s\n"
25 class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
26 def test_loadable(self):
27 basedir = "test_client.Basic.test_loadable"
29 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
31 client.Client(basedir)
33 def test_comment(self):
34 dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
36 should_fail = [r"test#test", r"#testtest", r"test\\#test"]
37 should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
39 basedir = "test_client.Basic.test_comment"
43 config = ("[client]\n"
44 "introducer.furl = %s\n" % s)
45 fileutil.write(os.path.join(basedir, "tahoe.cfg"), config)
48 self.failUnless(Node._contains_unescaped_hash(s))
50 self.failUnlessRaises(UnescapedHashError, client.Client, basedir)
52 for s in should_not_fail:
53 self.failIf(Node._contains_unescaped_hash(s))
55 client.Client(basedir)
58 @mock.patch('twisted.python.log.msg')
59 def test_error_on_old_config_files(self, mock_log_msg):
60 basedir = "test_client.Basic.test_error_on_old_config_files"
62 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
66 "reserved_space = bogus\n")
67 fileutil.write(os.path.join(basedir, "introducer.furl"), "")
68 fileutil.write(os.path.join(basedir, "no_storage"), "")
69 fileutil.write(os.path.join(basedir, "readonly_storage"), "")
70 fileutil.write(os.path.join(basedir, "debug_discard_storage"), "")
72 e = self.failUnlessRaises(OldConfigError, client.Client, basedir)
73 self.failUnlessIn(os.path.abspath(os.path.join(basedir, "introducer.furl")), e.args[0])
74 self.failUnlessIn(os.path.abspath(os.path.join(basedir, "no_storage")), e.args[0])
75 self.failUnlessIn(os.path.abspath(os.path.join(basedir, "readonly_storage")), e.args[0])
76 self.failUnlessIn(os.path.abspath(os.path.join(basedir, "debug_discard_storage")), e.args[0])
78 for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage',
79 'debug_discard_storage']:
80 logged = [ m for m in mock_log_msg.call_args_list if
81 ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m[0][0]) and oldfile in str(m[0][0])) ]
82 self.failUnless(logged, (oldfile, mock_log_msg.call_args_list))
85 'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
86 'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl',
87 'key_generator.furl', 'stats_gatherer.furl', 'sizelimit',
89 logged = [ m for m in mock_log_msg.call_args_list if
90 ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m[0][0]) and oldfile in str(m[0][0])) ]
91 self.failIf(logged, oldfile)
93 def test_secrets(self):
94 basedir = "test_client.Basic.test_secrets"
96 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
98 c = client.Client(basedir)
99 secret_fname = os.path.join(basedir, "private", "secret")
100 self.failUnless(os.path.exists(secret_fname), secret_fname)
101 renew_secret = c.get_renewal_secret()
102 self.failUnless(base32.b2a(renew_secret))
103 cancel_secret = c.get_cancel_secret()
104 self.failUnless(base32.b2a(cancel_secret))
106 def test_nodekey_yes_storage(self):
107 basedir = "test_client.Basic.test_nodekey_yes_storage"
109 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
111 c = client.Client(basedir)
112 self.failUnless(c.get_long_nodeid().startswith("v0-"))
114 def test_nodekey_no_storage(self):
115 basedir = "test_client.Basic.test_nodekey_no_storage"
117 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
118 BASECONFIG + "[storage]\n" + "enabled = false\n")
119 c = client.Client(basedir)
120 self.failUnless(c.get_long_nodeid().startswith("v0-"))
122 def test_reserved_1(self):
123 basedir = "client.Basic.test_reserved_1"
125 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
128 "enabled = true\n" + \
129 "reserved_space = 1000\n")
130 c = client.Client(basedir)
131 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
133 def test_reserved_2(self):
134 basedir = "client.Basic.test_reserved_2"
136 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
139 "enabled = true\n" + \
140 "reserved_space = 10K\n")
141 c = client.Client(basedir)
142 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
144 def test_reserved_3(self):
145 basedir = "client.Basic.test_reserved_3"
147 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
150 "enabled = true\n" + \
151 "reserved_space = 5mB\n")
152 c = client.Client(basedir)
153 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
156 def test_reserved_4(self):
157 basedir = "client.Basic.test_reserved_4"
159 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
162 "enabled = true\n" + \
163 "reserved_space = 78Gb\n")
164 c = client.Client(basedir)
165 self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
168 def test_reserved_bad(self):
169 basedir = "client.Basic.test_reserved_bad"
171 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
174 "enabled = true\n" + \
175 "reserved_space = bogus\n")
176 self.failUnlessRaises(ValueError, client.Client, basedir)
178 def _permute(self, sb, key):
179 return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
181 def test_permute(self):
182 sb = StorageFarmBroker(None, True)
183 for k in ["%d" % i for i in range(5)]:
184 ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
185 "permutation-seed-base32": base32.b2a(k) }
186 sb.test_add_rref(k, "rref", ann)
188 self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2'])
189 self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3'])
191 self.failUnlessReallyEqual(self._permute(sb, "one"), [])
193 def test_versions(self):
194 basedir = "test_client.Basic.test_versions"
196 fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
200 c = client.Client(basedir)
201 ss = c.getServiceNamed("storage")
202 verdict = ss.remote_get_version()
203 self.failUnlessReallyEqual(verdict["application-version"],
204 str(allmydata.__full_version__))
205 self.failIfEqual(str(allmydata.__version__), "unknown")
206 self.failUnless("." in str(allmydata.__full_version__),
207 "non-numeric version in '%s'" % allmydata.__version__)
208 all_versions = allmydata.get_package_versions_string()
209 self.failUnless(allmydata.__appname__ in all_versions)
211 stats = c.get_stats()
212 self.failUnless("node.uptime" in stats)
213 self.failUnless(isinstance(stats["node.uptime"], float))
215 def test_helper_furl(self):
216 basedir = "test_client.Basic.test_helper_furl"
219 def _check(config, expected_furl):
220 fileutil.write(os.path.join(basedir, "tahoe.cfg"),
222 c = client.Client(basedir)
223 uploader = c.getServiceNamed("uploader")
224 furl, connected = uploader.get_helper_info()
225 self.failUnlessEqual(furl, expected_furl)
228 _check("helper.furl =\n", None)
229 _check("helper.furl = \n", None)
230 _check("helper.furl = None", None)
231 _check("helper.furl = pb://blah\n", "pb://blah")
233 @mock.patch('allmydata.util.log.msg')
234 @mock.patch('allmydata.frontends.drop_upload.DropUploader')
235 def test_create_drop_uploader(self, mock_drop_uploader, mock_log_msg):
236 class MockDropUploader(service.MultiService):
239 def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
240 service.MultiService.__init__(self)
242 self.upload_dircap = upload_dircap
243 self.local_dir_utf8 = local_dir_utf8
244 self.inotify = inotify
246 mock_drop_uploader.side_effect = MockDropUploader
248 upload_dircap = "URI:DIR2:blah"
249 local_dir_utf8 = u"loc\u0101l_dir".encode('utf-8')
250 config = (BASECONFIG +
252 "enabled = false\n" +
256 basedir1 = "test_client.Basic.test_create_drop_uploader1"
258 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
259 config + "local.directory = " + local_dir_utf8 + "\n")
260 self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
262 fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
263 fileutil.write(os.path.join(basedir1, "private", "drop_upload_dircap"), "URI:DIR2:blah")
264 self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
266 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
267 config + "upload.dircap = " + upload_dircap + "\n")
268 self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
270 fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
271 config + "local.directory = " + local_dir_utf8 + "\n")
272 c1 = client.Client(basedir1)
273 uploader = c1.getServiceNamed('drop-upload')
274 self.failUnless(isinstance(uploader, MockDropUploader), uploader)
275 self.failUnlessReallyEqual(uploader.client, c1)
276 self.failUnlessReallyEqual(uploader.upload_dircap, upload_dircap)
277 self.failUnlessReallyEqual(uploader.local_dir_utf8, local_dir_utf8)
278 self.failUnless(uploader.inotify is None, uploader.inotify)
279 self.failUnless(uploader.running)
281 class Boom(Exception):
283 mock_drop_uploader.side_effect = Boom()
285 basedir2 = "test_client.Basic.test_create_drop_uploader2"
287 os.mkdir(os.path.join(basedir2, "private"))
288 fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
292 "local.directory = " + local_dir_utf8 + "\n")
293 fileutil.write(os.path.join(basedir2, "private", "drop_upload_dircap"), "URI:DIR2:blah")
294 c2 = client.Client(basedir2)
295 self.failUnlessRaises(KeyError, c2.getServiceNamed, 'drop-upload')
296 self.failUnless([True for arg in mock_log_msg.call_args_list if "Boom" in repr(arg)],
297 mock_log_msg.call_args_list)
300 def flush_but_dont_ignore(res):
301 d = flushEventualQueue()
307 class Run(unittest.TestCase, testutil.StallMixin):
310 self.sparent = service.MultiService()
311 self.sparent.startService()
313 d = self.sparent.stopService()
314 d.addBoth(flush_but_dont_ignore)
317 def test_loadable(self):
318 basedir = "test_client.Run.test_loadable"
320 dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
321 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
322 fileutil.write(os.path.join(basedir, "suicide_prevention_hotline"), "")
323 client.Client(basedir)
325 def test_reloadable(self):
326 basedir = "test_client.Run.test_reloadable"
328 dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
329 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
330 c1 = client.Client(basedir)
331 c1.setServiceParent(self.sparent)
333 # delay to let the service start up completely. I'm not entirely sure
335 d = self.stall(delay=2.0)
336 d.addCallback(lambda res: c1.disownServiceParent())
337 # the cygwin buildslave seems to need more time to let the old
338 # service completely shut down. When delay=0.1, I saw this test fail,
339 # probably due to the logport trying to reclaim the old socket
340 # number. This suggests that either we're dropping a Deferred
341 # somewhere in the shutdown sequence, or that cygwin is just cranky.
342 d.addCallback(self.stall, delay=2.0)
344 # TODO: pause for slightly over one second, to let
345 # Client._check_hotline poll the file once. That will exercise
346 # another few lines. Then add another test in which we don't
347 # update the file at all, and watch to see the node shutdown. (to
348 # do this, use a modified node which overrides Node.shutdown(),
349 # also change _check_hotline to use it instead of a raw
350 # reactor.stop, also instrument the shutdown event in an
351 # attribute that we can check)
352 c2 = client.Client(basedir)
353 c2.setServiceParent(self.sparent)
354 return c2.disownServiceParent()
355 d.addCallback(_restart)
358 class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
359 def test_maker(self):
360 basedir = "client/NodeMaker/maker"
361 fileutil.make_dirs(basedir)
362 fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
363 c = client.Client(basedir)
365 n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
366 self.failUnless(IFilesystemNode.providedBy(n))
367 self.failUnless(IFileNode.providedBy(n))
368 self.failUnless(IImmutableFileNode.providedBy(n))
369 self.failIf(IMutableFileNode.providedBy(n))
370 self.failIf(IDirectoryNode.providedBy(n))
371 self.failUnless(n.is_readonly())
372 self.failIf(n.is_mutable())
374 # Testing #1679. There was a bug that would occur when downloader was
375 # downloading the same readcap more than once concurrently, so the
376 # filenode object was cached, and there was a failure from one of the
377 # servers in one of the download attempts. No subsequent download
378 # attempt would attempt to use that server again, which would lead to
379 # the file being undownloadable until the gateway was restarted. The
380 # current fix for this (hopefully to be superceded by a better fix
381 # eventually) is to prevent re-use of filenodes, so the NodeMaker is
382 # hereby required *not* to cache and re-use filenodes for CHKs.
383 other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
384 self.failIf(n is other_n, (n, other_n))
386 n = c.create_node_from_uri("URI:LIT:n5xgk")
387 self.failUnless(IFilesystemNode.providedBy(n))
388 self.failUnless(IFileNode.providedBy(n))
389 self.failUnless(IImmutableFileNode.providedBy(n))
390 self.failIf(IMutableFileNode.providedBy(n))
391 self.failIf(IDirectoryNode.providedBy(n))
392 self.failUnless(n.is_readonly())
393 self.failIf(n.is_mutable())
395 n = c.create_node_from_uri("URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
396 self.failUnless(IFilesystemNode.providedBy(n))
397 self.failUnless(IFileNode.providedBy(n))
398 self.failIf(IImmutableFileNode.providedBy(n))
399 self.failUnless(IMutableFileNode.providedBy(n))
400 self.failIf(IDirectoryNode.providedBy(n))
401 self.failIf(n.is_readonly())
402 self.failUnless(n.is_mutable())
404 n = c.create_node_from_uri("URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
405 self.failUnless(IFilesystemNode.providedBy(n))
406 self.failUnless(IFileNode.providedBy(n))
407 self.failIf(IImmutableFileNode.providedBy(n))
408 self.failUnless(IMutableFileNode.providedBy(n))
409 self.failIf(IDirectoryNode.providedBy(n))
410 self.failUnless(n.is_readonly())
411 self.failUnless(n.is_mutable())
413 n = c.create_node_from_uri("URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
414 self.failUnless(IFilesystemNode.providedBy(n))
415 self.failIf(IFileNode.providedBy(n))
416 self.failIf(IImmutableFileNode.providedBy(n))
417 self.failIf(IMutableFileNode.providedBy(n))
418 self.failUnless(IDirectoryNode.providedBy(n))
419 self.failIf(n.is_readonly())
420 self.failUnless(n.is_mutable())
422 n = c.create_node_from_uri("URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
423 self.failUnless(IFilesystemNode.providedBy(n))
424 self.failIf(IFileNode.providedBy(n))
425 self.failIf(IImmutableFileNode.providedBy(n))
426 self.failIf(IMutableFileNode.providedBy(n))
427 self.failUnless(IDirectoryNode.providedBy(n))
428 self.failUnless(n.is_readonly())
429 self.failUnless(n.is_mutable())
431 unknown_rw = "lafs://from_the_future"
432 unknown_ro = "lafs://readonly_from_the_future"
433 n = c.create_node_from_uri(unknown_rw, unknown_ro)
434 self.failUnless(IFilesystemNode.providedBy(n))
435 self.failIf(IFileNode.providedBy(n))
436 self.failIf(IImmutableFileNode.providedBy(n))
437 self.failIf(IMutableFileNode.providedBy(n))
438 self.failIf(IDirectoryNode.providedBy(n))
439 self.failUnless(n.is_unknown())
440 self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
441 self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
442 self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
444 # Note: it isn't that we *intend* to deploy non-ASCII caps in
445 # the future, it is that we want to make sure older Tahoe-LAFS
446 # versions wouldn't choke on them if we were to do so. See
447 # #1051 and wiki:NewCapDesign for details.
448 unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
449 unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
450 n = c.create_node_from_uri(unknown_rw, unknown_ro)
451 self.failUnless(IFilesystemNode.providedBy(n))
452 self.failIf(IFileNode.providedBy(n))
453 self.failIf(IImmutableFileNode.providedBy(n))
454 self.failIf(IMutableFileNode.providedBy(n))
455 self.failIf(IDirectoryNode.providedBy(n))
456 self.failUnless(n.is_unknown())
457 self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
458 self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
459 self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)