]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/blob - src/allmydata/test/test_client.py
new feature: preferred storage servers
[tahoe-lafs/tahoe-lafs.git] / src / allmydata / test / test_client.py
1 import os, sys
2 import twisted
3 from twisted.trial import unittest
4 from twisted.application import service
5
6 import allmydata
7 import allmydata.frontends.drop_upload
8 import allmydata.util.log
9
10 from allmydata.node import Node, OldConfigError, OldConfigOptionError, MissingConfigEntry, UnescapedHashError
11 from allmydata.frontends.auth import NeedRootcapLookupScheme
12 from allmydata import client
13 from allmydata.storage_client import StorageFarmBroker
14 from allmydata.manhole import AuthorizedKeysManhole
15 from allmydata.util import base32, fileutil
16 from allmydata.interfaces import IFilesystemNode, IFileNode, \
17      IImmutableFileNode, IMutableFileNode, IDirectoryNode
18 from foolscap.api import flushEventualQueue
19 import allmydata.test.common_util as testutil
20
21
22 BASECONFIG = ("[client]\n"
23               "introducer.furl = \n"
24               )
25
26 BASECONFIG_I = ("[client]\n"
27               "introducer.furl = %s\n"
28               )
29
30 class Basic(testutil.ReallyEqualMixin, unittest.TestCase):
31     def test_loadable(self):
32         basedir = "test_client.Basic.test_loadable"
33         os.mkdir(basedir)
34         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
35                            BASECONFIG)
36         client.Client(basedir)
37
38     def test_comment(self):
39         should_fail = [r"test#test", r"#testtest", r"test\\#test"]
40         should_not_fail = [r"test\#test", r"test\\\#test", r"testtest"]
41
42         basedir = "test_client.Basic.test_comment"
43         os.mkdir(basedir)
44
45         def write_config(s):
46             config = ("[client]\n"
47                       "introducer.furl = %s\n" % s)
48             fileutil.write(os.path.join(basedir, "tahoe.cfg"), config)
49
50         for s in should_fail:
51             self.failUnless(Node._contains_unescaped_hash(s))
52             write_config(s)
53             self.failUnlessRaises(UnescapedHashError, client.Client, basedir)
54
55         for s in should_not_fail:
56             self.failIf(Node._contains_unescaped_hash(s))
57             write_config(s)
58             client.Client(basedir)
59
60
61     def test_error_on_old_config_files(self):
62         basedir = "test_client.Basic.test_error_on_old_config_files"
63         os.mkdir(basedir)
64         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
65                        BASECONFIG +
66                        "[storage]\n" +
67                        "enabled = false\n" +
68                        "reserved_space = bogus\n")
69         fileutil.write(os.path.join(basedir, "introducer.furl"), "")
70         fileutil.write(os.path.join(basedir, "no_storage"), "")
71         fileutil.write(os.path.join(basedir, "readonly_storage"), "")
72         fileutil.write(os.path.join(basedir, "debug_discard_storage"), "")
73
74         logged_messages = []
75         self.patch(twisted.python.log, 'msg', logged_messages.append)
76
77         e = self.failUnlessRaises(OldConfigError, client.Client, basedir)
78         abs_basedir = fileutil.abspath_expanduser_unicode(unicode(basedir)).encode(sys.getfilesystemencoding())
79         self.failUnlessIn(os.path.join(abs_basedir, "introducer.furl"), e.args[0])
80         self.failUnlessIn(os.path.join(abs_basedir, "no_storage"), e.args[0])
81         self.failUnlessIn(os.path.join(abs_basedir, "readonly_storage"), e.args[0])
82         self.failUnlessIn(os.path.join(abs_basedir, "debug_discard_storage"), e.args[0])
83
84         for oldfile in ['introducer.furl', 'no_storage', 'readonly_storage',
85                         'debug_discard_storage']:
86             logged = [ m for m in logged_messages if
87                        ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
88             self.failUnless(logged, (oldfile, logged_messages))
89
90         for oldfile in [
91             'nickname', 'webport', 'keepalive_timeout', 'log_gatherer.furl',
92             'disconnect_timeout', 'advertised_ip_addresses', 'helper.furl',
93             'key_generator.furl', 'stats_gatherer.furl', 'sizelimit',
94             'run_helper']:
95             logged = [ m for m in logged_messages if
96                        ("Found pre-Tahoe-LAFS-v1.3 configuration file" in str(m) and oldfile in str(m)) ]
97             self.failIf(logged, (oldfile, logged_messages))
98
99     def test_secrets(self):
100         basedir = "test_client.Basic.test_secrets"
101         os.mkdir(basedir)
102         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
103                            BASECONFIG)
104         c = client.Client(basedir)
105         secret_fname = os.path.join(basedir, "private", "secret")
106         self.failUnless(os.path.exists(secret_fname), secret_fname)
107         renew_secret = c.get_renewal_secret()
108         self.failUnless(base32.b2a(renew_secret))
109         cancel_secret = c.get_cancel_secret()
110         self.failUnless(base32.b2a(cancel_secret))
111
112     def test_nodekey_yes_storage(self):
113         basedir = "test_client.Basic.test_nodekey_yes_storage"
114         os.mkdir(basedir)
115         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
116                        BASECONFIG)
117         c = client.Client(basedir)
118         self.failUnless(c.get_long_nodeid().startswith("v0-"))
119
120     def test_nodekey_no_storage(self):
121         basedir = "test_client.Basic.test_nodekey_no_storage"
122         os.mkdir(basedir)
123         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
124                        BASECONFIG + "[storage]\n" + "enabled = false\n")
125         c = client.Client(basedir)
126         self.failUnless(c.get_long_nodeid().startswith("v0-"))
127
128     def test_reserved_1(self):
129         basedir = "client.Basic.test_reserved_1"
130         os.mkdir(basedir)
131         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
132                            BASECONFIG + \
133                            "[storage]\n" + \
134                            "enabled = true\n" + \
135                            "reserved_space = 1000\n")
136         c = client.Client(basedir)
137         self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 1000)
138
139     def test_reserved_2(self):
140         basedir = "client.Basic.test_reserved_2"
141         os.mkdir(basedir)
142         fileutil.write(os.path.join(basedir, "tahoe.cfg"),  \
143                            BASECONFIG + \
144                            "[storage]\n" + \
145                            "enabled = true\n" + \
146                            "reserved_space = 10K\n")
147         c = client.Client(basedir)
148         self.failUnlessEqual(c.getServiceNamed("storage").reserved_space, 10*1000)
149
150     def test_reserved_3(self):
151         basedir = "client.Basic.test_reserved_3"
152         os.mkdir(basedir)
153         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
154                            BASECONFIG + \
155                            "[storage]\n" + \
156                            "enabled = true\n" + \
157                            "reserved_space = 5mB\n")
158         c = client.Client(basedir)
159         self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
160                              5*1000*1000)
161
162     def test_reserved_4(self):
163         basedir = "client.Basic.test_reserved_4"
164         os.mkdir(basedir)
165         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
166                            BASECONFIG + \
167                            "[storage]\n" + \
168                            "enabled = true\n" + \
169                            "reserved_space = 78Gb\n")
170         c = client.Client(basedir)
171         self.failUnlessEqual(c.getServiceNamed("storage").reserved_space,
172                              78*1000*1000*1000)
173
174     def test_reserved_bad(self):
175         basedir = "client.Basic.test_reserved_bad"
176         os.mkdir(basedir)
177         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
178                            BASECONFIG + \
179                            "[storage]\n" + \
180                            "enabled = true\n" + \
181                            "reserved_space = bogus\n")
182         self.failUnlessRaises(ValueError, client.Client, basedir)
183
184     def test_web_staticdir(self):
185         basedir = u"client.Basic.test_web_staticdir"
186         os.mkdir(basedir)
187         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
188                        BASECONFIG +
189                        "[node]\n" +
190                        "web.port = tcp:0:interface=127.0.0.1\n" +
191                        "web.static = relative\n")
192         c = client.Client(basedir)
193         w = c.getServiceNamed("webish")
194         abs_basedir = fileutil.abspath_expanduser_unicode(basedir)
195         expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir)
196         self.failUnlessReallyEqual(w.staticdir, expected)
197
198     def test_manhole_keyfile(self):
199         basedir = u"client.Basic.test_manhole_keyfile"
200         os.mkdir(basedir)
201         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
202                        BASECONFIG +
203                        "[node]\n" +
204                        "ssh.port = tcp:0:interface=127.0.0.1\n" +
205                        "ssh.authorized_keys_file = relative\n")
206         c = client.Client(basedir)
207         m = [s for s in c if isinstance(s, AuthorizedKeysManhole)][0]
208         abs_basedir = fileutil.abspath_expanduser_unicode(basedir)
209         expected = fileutil.abspath_expanduser_unicode(u"relative", abs_basedir)
210         self.failUnlessReallyEqual(m.keyfile, expected)
211
212     # TODO: also test config options for SFTP.
213
214     def test_ftp_auth_keyfile(self):
215         basedir = u"client.Basic.test_ftp_auth_keyfile"
216         os.mkdir(basedir)
217         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
218                        (BASECONFIG +
219                         "[ftpd]\n"
220                         "enabled = true\n"
221                         "port = tcp:0:interface=127.0.0.1\n"
222                         "accounts.file = private/accounts\n"))
223         os.mkdir(os.path.join(basedir, "private"))
224         fileutil.write(os.path.join(basedir, "private", "accounts"), "\n")
225         c = client.Client(basedir) # just make sure it can be instantiated
226         del c
227
228     def test_ftp_auth_url(self):
229         basedir = u"client.Basic.test_ftp_auth_url"
230         os.mkdir(basedir)
231         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
232                        (BASECONFIG +
233                         "[ftpd]\n"
234                         "enabled = true\n"
235                         "port = tcp:0:interface=127.0.0.1\n"
236                         "accounts.url = http://0.0.0.0/\n"))
237         c = client.Client(basedir) # just make sure it can be instantiated
238         del c
239
240     def test_ftp_auth_no_accountfile_or_url(self):
241         basedir = u"client.Basic.test_ftp_auth_no_accountfile_or_url"
242         os.mkdir(basedir)
243         fileutil.write(os.path.join(basedir, "tahoe.cfg"),
244                        (BASECONFIG +
245                         "[ftpd]\n"
246                         "enabled = true\n"
247                         "port = tcp:0:interface=127.0.0.1\n"))
248         self.failUnlessRaises(NeedRootcapLookupScheme, client.Client, basedir)
249
250     def _permute(self, sb, key):
251         return [ s.get_longname() for s in sb.get_servers_for_psi(key) ]
252
253     def test_permute(self):
254         sb = StorageFarmBroker(None, True)
255         for k in ["%d" % i for i in range(5)]:
256             ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
257                    "permutation-seed-base32": base32.b2a(k) }
258             sb.test_add_rref(k, "rref", ann)
259
260         self.failUnlessReallyEqual(self._permute(sb, "one"), ['3','1','0','4','2'])
261         self.failUnlessReallyEqual(self._permute(sb, "two"), ['0','4','2','1','3'])
262         sb.servers.clear()
263         self.failUnlessReallyEqual(self._permute(sb, "one"), [])
264
265     def test_permute_with_preferred(self):
266         sb = StorageFarmBroker(None, True, ['1','4'])
267         for k in ["%d" % i for i in range(5)]:
268             ann = {"anonymous-storage-FURL": "pb://abcde@nowhere/fake",
269                    "permutation-seed-base32": base32.b2a(k) }
270             sb.test_add_rref(k, "rref", ann)
271
272         self.failUnlessReallyEqual(self._permute(sb, "one"), ['1','4','3','0','2'])
273         self.failUnlessReallyEqual(self._permute(sb, "two"), ['4','1','0','2','3'])
274         sb.servers.clear()
275         self.failUnlessReallyEqual(self._permute(sb, "one"), [])
276
277     def test_versions(self):
278         basedir = "test_client.Basic.test_versions"
279         os.mkdir(basedir)
280         fileutil.write(os.path.join(basedir, "tahoe.cfg"), \
281                            BASECONFIG + \
282                            "[storage]\n" + \
283                            "enabled = true\n")
284         c = client.Client(basedir)
285         ss = c.getServiceNamed("storage")
286         verdict = ss.remote_get_version()
287         self.failUnlessReallyEqual(verdict["application-version"],
288                                    str(allmydata.__full_version__))
289         self.failIfEqual(str(allmydata.__version__), "unknown")
290         self.failUnless("." in str(allmydata.__full_version__),
291                         "non-numeric version in '%s'" % allmydata.__version__)
292         all_versions = allmydata.get_package_versions_string()
293         self.failUnless(allmydata.__appname__ in all_versions)
294         # also test stats
295         stats = c.get_stats()
296         self.failUnless("node.uptime" in stats)
297         self.failUnless(isinstance(stats["node.uptime"], float))
298
299     def test_helper_furl(self):
300         basedir = "test_client.Basic.test_helper_furl"
301         os.mkdir(basedir)
302
303         def _check(config, expected_furl):
304             fileutil.write(os.path.join(basedir, "tahoe.cfg"),
305                            BASECONFIG + config)
306             c = client.Client(basedir)
307             uploader = c.getServiceNamed("uploader")
308             furl, connected = uploader.get_helper_info()
309             self.failUnlessEqual(furl, expected_furl)
310
311         _check("", None)
312         _check("helper.furl =\n", None)
313         _check("helper.furl = \n", None)
314         _check("helper.furl = None", None)
315         _check("helper.furl = pb://blah\n", "pb://blah")
316
317     def test_create_drop_uploader(self):
318         class MockDropUploader(service.MultiService):
319             name = 'drop-upload'
320
321             def __init__(self, client, upload_dircap, local_dir_utf8, inotify=None):
322                 service.MultiService.__init__(self)
323                 self.client = client
324                 self.upload_dircap = upload_dircap
325                 self.local_dir_utf8 = local_dir_utf8
326                 self.inotify = inotify
327
328         self.patch(allmydata.frontends.drop_upload, 'DropUploader', MockDropUploader)
329
330         upload_dircap = "URI:DIR2:blah"
331         local_dir_utf8 = u"loc\u0101l_dir".encode('utf-8')
332         config = (BASECONFIG +
333                   "[storage]\n" +
334                   "enabled = false\n" +
335                   "[drop_upload]\n" +
336                   "enabled = true\n")
337
338         basedir1 = "test_client.Basic.test_create_drop_uploader1"
339         os.mkdir(basedir1)
340         fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
341                        config + "local.directory = " + local_dir_utf8 + "\n")
342         self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
343
344         fileutil.write(os.path.join(basedir1, "tahoe.cfg"), config)
345         fileutil.write(os.path.join(basedir1, "private", "drop_upload_dircap"), "URI:DIR2:blah")
346         self.failUnlessRaises(MissingConfigEntry, client.Client, basedir1)
347
348         fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
349                        config + "upload.dircap = " + upload_dircap + "\n")
350         self.failUnlessRaises(OldConfigOptionError, client.Client, basedir1)
351
352         fileutil.write(os.path.join(basedir1, "tahoe.cfg"),
353                        config + "local.directory = " + local_dir_utf8 + "\n")
354         c1 = client.Client(basedir1)
355         uploader = c1.getServiceNamed('drop-upload')
356         self.failUnless(isinstance(uploader, MockDropUploader), uploader)
357         self.failUnlessReallyEqual(uploader.client, c1)
358         self.failUnlessReallyEqual(uploader.upload_dircap, upload_dircap)
359         self.failUnlessReallyEqual(uploader.local_dir_utf8, local_dir_utf8)
360         self.failUnless(uploader.inotify is None, uploader.inotify)
361         self.failUnless(uploader.running)
362
363         class Boom(Exception):
364             pass
365         def BoomDropUploader(client, upload_dircap, local_dir_utf8, inotify=None):
366             raise Boom()
367
368         logged_messages = []
369         def mock_log(*args, **kwargs):
370             logged_messages.append("%r %r" % (args, kwargs))
371         self.patch(allmydata.util.log, 'msg', mock_log)
372         self.patch(allmydata.frontends.drop_upload, 'DropUploader', BoomDropUploader)
373
374         basedir2 = "test_client.Basic.test_create_drop_uploader2"
375         os.mkdir(basedir2)
376         os.mkdir(os.path.join(basedir2, "private"))
377         fileutil.write(os.path.join(basedir2, "tahoe.cfg"),
378                        BASECONFIG +
379                        "[drop_upload]\n" +
380                        "enabled = true\n" +
381                        "local.directory = " + local_dir_utf8 + "\n")
382         fileutil.write(os.path.join(basedir2, "private", "drop_upload_dircap"), "URI:DIR2:blah")
383         c2 = client.Client(basedir2)
384         self.failUnlessRaises(KeyError, c2.getServiceNamed, 'drop-upload')
385         self.failUnless([True for arg in logged_messages if "Boom" in arg],
386                         logged_messages)
387
388
389 def flush_but_dont_ignore(res):
390     d = flushEventualQueue()
391     def _done(ignored):
392         return res
393     d.addCallback(_done)
394     return d
395
396 class Run(unittest.TestCase, testutil.StallMixin):
397
398     def setUp(self):
399         self.sparent = service.MultiService()
400         self.sparent.startService()
401     def tearDown(self):
402         d = self.sparent.stopService()
403         d.addBoth(flush_but_dont_ignore)
404         return d
405
406     def test_loadable(self):
407         basedir = "test_client.Run.test_loadable"
408         os.mkdir(basedir)
409         dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
410         fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
411         fileutil.write(os.path.join(basedir, client.Client.EXIT_TRIGGER_FILE), "")
412         client.Client(basedir)
413
414     def test_reloadable(self):
415         basedir = "test_client.Run.test_reloadable"
416         os.mkdir(basedir)
417         dummy = "pb://wl74cyahejagspqgy4x5ukrvfnevlknt@127.0.0.1:58889/bogus"
418         fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG_I % dummy)
419         c1 = client.Client(basedir)
420         c1.setServiceParent(self.sparent)
421
422         # delay to let the service start up completely. I'm not entirely sure
423         # this is necessary.
424         d = self.stall(delay=2.0)
425         d.addCallback(lambda res: c1.disownServiceParent())
426         # the cygwin buildslave seems to need more time to let the old
427         # service completely shut down. When delay=0.1, I saw this test fail,
428         # probably due to the logport trying to reclaim the old socket
429         # number. This suggests that either we're dropping a Deferred
430         # somewhere in the shutdown sequence, or that cygwin is just cranky.
431         d.addCallback(self.stall, delay=2.0)
432         def _restart(res):
433             # TODO: pause for slightly over one second, to let
434             # Client._check_exit_trigger poll the file once. That will exercise
435             # another few lines. Then add another test in which we don't
436             # update the file at all, and watch to see the node shutdown.
437             # (To do this, use a modified node which overrides Node.shutdown(),
438             # also change _check_exit_trigger to use it instead of a raw
439             # reactor.stop, also instrument the shutdown event in an
440             # attribute that we can check.)
441             c2 = client.Client(basedir)
442             c2.setServiceParent(self.sparent)
443             return c2.disownServiceParent()
444         d.addCallback(_restart)
445         return d
446
447 class NodeMaker(testutil.ReallyEqualMixin, unittest.TestCase):
448     def test_maker(self):
449         basedir = "client/NodeMaker/maker"
450         fileutil.make_dirs(basedir)
451         fileutil.write(os.path.join(basedir, "tahoe.cfg"), BASECONFIG)
452         c = client.Client(basedir)
453
454         n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
455         self.failUnless(IFilesystemNode.providedBy(n))
456         self.failUnless(IFileNode.providedBy(n))
457         self.failUnless(IImmutableFileNode.providedBy(n))
458         self.failIf(IMutableFileNode.providedBy(n))
459         self.failIf(IDirectoryNode.providedBy(n))
460         self.failUnless(n.is_readonly())
461         self.failIf(n.is_mutable())
462
463         # Testing #1679. There was a bug that would occur when downloader was
464         # downloading the same readcap more than once concurrently, so the
465         # filenode object was cached, and there was a failure from one of the
466         # servers in one of the download attempts. No subsequent download
467         # attempt would attempt to use that server again, which would lead to
468         # the file being undownloadable until the gateway was restarted. The
469         # current fix for this (hopefully to be superceded by a better fix
470         # eventually) is to prevent re-use of filenodes, so the NodeMaker is
471         # hereby required *not* to cache and re-use filenodes for CHKs.
472         other_n = c.create_node_from_uri("URI:CHK:6nmrpsubgbe57udnexlkiwzmlu:bjt7j6hshrlmadjyr7otq3dc24end5meo5xcr5xe5r663po6itmq:3:10:7277")
473         self.failIf(n is other_n, (n, other_n))
474
475         n = c.create_node_from_uri("URI:LIT:n5xgk")
476         self.failUnless(IFilesystemNode.providedBy(n))
477         self.failUnless(IFileNode.providedBy(n))
478         self.failUnless(IImmutableFileNode.providedBy(n))
479         self.failIf(IMutableFileNode.providedBy(n))
480         self.failIf(IDirectoryNode.providedBy(n))
481         self.failUnless(n.is_readonly())
482         self.failIf(n.is_mutable())
483
484         n = c.create_node_from_uri("URI:SSK:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
485         self.failUnless(IFilesystemNode.providedBy(n))
486         self.failUnless(IFileNode.providedBy(n))
487         self.failIf(IImmutableFileNode.providedBy(n))
488         self.failUnless(IMutableFileNode.providedBy(n))
489         self.failIf(IDirectoryNode.providedBy(n))
490         self.failIf(n.is_readonly())
491         self.failUnless(n.is_mutable())
492
493         n = c.create_node_from_uri("URI:SSK-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
494         self.failUnless(IFilesystemNode.providedBy(n))
495         self.failUnless(IFileNode.providedBy(n))
496         self.failIf(IImmutableFileNode.providedBy(n))
497         self.failUnless(IMutableFileNode.providedBy(n))
498         self.failIf(IDirectoryNode.providedBy(n))
499         self.failUnless(n.is_readonly())
500         self.failUnless(n.is_mutable())
501
502         n = c.create_node_from_uri("URI:DIR2:n6x24zd3seu725yluj75q5boaa:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
503         self.failUnless(IFilesystemNode.providedBy(n))
504         self.failIf(IFileNode.providedBy(n))
505         self.failIf(IImmutableFileNode.providedBy(n))
506         self.failIf(IMutableFileNode.providedBy(n))
507         self.failUnless(IDirectoryNode.providedBy(n))
508         self.failIf(n.is_readonly())
509         self.failUnless(n.is_mutable())
510
511         n = c.create_node_from_uri("URI:DIR2-RO:b7sr5qsifnicca7cbk3rhrhbvq:mm6yoqjhl6ueh7iereldqxue4nene4wl7rqfjfybqrehdqmqskvq")
512         self.failUnless(IFilesystemNode.providedBy(n))
513         self.failIf(IFileNode.providedBy(n))
514         self.failIf(IImmutableFileNode.providedBy(n))
515         self.failIf(IMutableFileNode.providedBy(n))
516         self.failUnless(IDirectoryNode.providedBy(n))
517         self.failUnless(n.is_readonly())
518         self.failUnless(n.is_mutable())
519
520         unknown_rw = "lafs://from_the_future"
521         unknown_ro = "lafs://readonly_from_the_future"
522         n = c.create_node_from_uri(unknown_rw, unknown_ro)
523         self.failUnless(IFilesystemNode.providedBy(n))
524         self.failIf(IFileNode.providedBy(n))
525         self.failIf(IImmutableFileNode.providedBy(n))
526         self.failIf(IMutableFileNode.providedBy(n))
527         self.failIf(IDirectoryNode.providedBy(n))
528         self.failUnless(n.is_unknown())
529         self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
530         self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
531         self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)
532
533         # Note: it isn't that we *intend* to deploy non-ASCII caps in
534         # the future, it is that we want to make sure older Tahoe-LAFS
535         # versions wouldn't choke on them if we were to do so. See
536         # #1051 and wiki:NewCapDesign for details.
537         unknown_rw = u"lafs://from_the_future_rw_\u263A".encode('utf-8')
538         unknown_ro = u"lafs://readonly_from_the_future_ro_\u263A".encode('utf-8')
539         n = c.create_node_from_uri(unknown_rw, unknown_ro)
540         self.failUnless(IFilesystemNode.providedBy(n))
541         self.failIf(IFileNode.providedBy(n))
542         self.failIf(IImmutableFileNode.providedBy(n))
543         self.failIf(IMutableFileNode.providedBy(n))
544         self.failIf(IDirectoryNode.providedBy(n))
545         self.failUnless(n.is_unknown())
546         self.failUnlessReallyEqual(n.get_uri(), unknown_rw)
547         self.failUnlessReallyEqual(n.get_write_uri(), unknown_rw)
548         self.failUnlessReallyEqual(n.get_readonly_uri(), "ro." + unknown_ro)