4 from twisted.trial import unittest
5 from cStringIO import StringIO
10 from allmydata.util import fileutil, hashutil
11 from allmydata import uri
13 # Test that the scripts can be imported -- although the actual tests of their functionality are
14 # done by invoking them in a subprocess.
15 from allmydata.scripts import tahoe_ls, tahoe_get, tahoe_put, tahoe_rm, tahoe_cp
16 _hush_pyflakes = [tahoe_ls, tahoe_get, tahoe_put, tahoe_rm, tahoe_cp]
18 from allmydata.scripts.common import DEFAULT_ALIAS, get_aliases
20 from allmydata.scripts import cli, debug, runner, backupdb
21 from allmydata.test.common import SystemTestMixin
22 from twisted.internet import threads # CLI tests use deferToThread
24 class CLI(unittest.TestCase):
25 # this test case only looks at argument-processing and simple stuff.
26 def test_options(self):
27 fileutil.rm_dir("cli/test_options")
28 fileutil.make_dirs("cli/test_options")
29 fileutil.make_dirs("cli/test_options/private")
30 open("cli/test_options/node.url","w").write("http://localhost:8080/\n")
31 filenode_uri = uri.WriteableSSKFileURI(writekey="\x00"*16,
32 fingerprint="\x00"*32)
33 private_uri = uri.NewDirectoryURI(filenode_uri).to_string()
34 open("cli/test_options/private/root_dir.cap", "w").write(private_uri + "\n")
36 o.parseOptions(["--node-directory", "cli/test_options"])
37 self.failUnlessEqual(o['node-url'], "http://localhost:8080/")
38 self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri)
39 self.failUnlessEqual(o.where, "")
42 o.parseOptions(["--node-directory", "cli/test_options",
43 "--node-url", "http://example.org:8111/"])
44 self.failUnlessEqual(o['node-url'], "http://example.org:8111/")
45 self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], private_uri)
46 self.failUnlessEqual(o.where, "")
49 o.parseOptions(["--node-directory", "cli/test_options",
51 self.failUnlessEqual(o['node-url'], "http://localhost:8080/")
52 self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], "root")
53 self.failUnlessEqual(o.where, "")
56 other_filenode_uri = uri.WriteableSSKFileURI(writekey="\x11"*16,
57 fingerprint="\x11"*32)
58 other_uri = uri.NewDirectoryURI(other_filenode_uri).to_string()
59 o.parseOptions(["--node-directory", "cli/test_options",
60 "--dir-cap", other_uri])
61 self.failUnlessEqual(o['node-url'], "http://localhost:8080/")
62 self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri)
63 self.failUnlessEqual(o.where, "")
66 o.parseOptions(["--node-directory", "cli/test_options",
67 "--dir-cap", other_uri, "subdir"])
68 self.failUnlessEqual(o['node-url'], "http://localhost:8080/")
69 self.failUnlessEqual(o.aliases[DEFAULT_ALIAS], other_uri)
70 self.failUnlessEqual(o.where, "subdir")
72 def _dump_cap(self, *args):
73 config = debug.DumpCapOptions()
74 config.stdout,config.stderr = StringIO(), StringIO()
75 config.parseOptions(args)
76 debug.dump_cap(config)
77 self.failIf(config.stderr.getvalue())
78 output = config.stdout.getvalue()
81 def test_dump_cap_chk(self):
82 key = "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
83 storage_index = hashutil.storage_index_hash(key)
84 uri_extension_hash = hashutil.uri_extension_hash("stuff")
88 u = uri.CHKFileURI(key=key,
89 uri_extension_hash=uri_extension_hash,
90 needed_shares=needed_shares,
91 total_shares=total_shares,
93 output = self._dump_cap(u.to_string())
94 self.failUnless("CHK File:" in output, output)
95 self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output)
96 self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output)
97 self.failUnless("size: 1234" in output, output)
98 self.failUnless("k/N: 25/100" in output, output)
99 self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output)
101 output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa",
103 self.failUnless("client renewal secret: znxmki5zdibb5qlt46xbdvk2t55j7hibejq3i5ijyurkr6m6jkhq" in output, output)
105 output = self._dump_cap(u.get_verify_cap().to_string())
106 self.failIf("key: " in output, output)
107 self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output)
108 self.failUnless("size: 1234" in output, output)
109 self.failUnless("k/N: 25/100" in output, output)
110 self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output)
112 prefixed_u = "http://127.0.0.1/uri/%s" % urllib.quote(u.to_string())
113 output = self._dump_cap(prefixed_u)
114 self.failUnless("CHK File:" in output, output)
115 self.failUnless("key: aaaqeayeaudaocajbifqydiob4" in output, output)
116 self.failUnless("UEB hash: nf3nimquen7aeqm36ekgxomalstenpkvsdmf6fplj7swdatbv5oa" in output, output)
117 self.failUnless("size: 1234" in output, output)
118 self.failUnless("k/N: 25/100" in output, output)
119 self.failUnless("storage index: hdis5iaveku6lnlaiccydyid7q" in output, output)
121 def test_dump_cap_lit(self):
122 u = uri.LiteralFileURI("this is some data")
123 output = self._dump_cap(u.to_string())
124 self.failUnless("Literal File URI:" in output, output)
125 self.failUnless("data: this is some data" in output, output)
127 def test_dump_cap_ssk(self):
128 writekey = "\x01" * 16
129 fingerprint = "\xfe" * 32
130 u = uri.WriteableSSKFileURI(writekey, fingerprint)
132 output = self._dump_cap(u.to_string())
133 self.failUnless("SSK Writeable URI:" in output, output)
134 self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output, output)
135 self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output)
136 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output)
137 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
139 output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa",
141 self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output)
143 fileutil.make_dirs("cli/test_dump_cap/private")
144 f = open("cli/test_dump_cap/private/secret", "w")
145 f.write("5s33nk3qpvnj2fw3z4mnm2y6fa\n")
147 output = self._dump_cap("--client-dir", "cli/test_dump_cap",
149 self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output)
151 output = self._dump_cap("--client-dir", "cli/test_dump_cap_BOGUS",
153 self.failIf("file renewal secret:" in output, output)
155 output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j",
157 self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output)
158 self.failIf("file renewal secret:" in output, output)
160 output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j",
161 "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa",
163 self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output)
164 self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output)
165 self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output)
168 output = self._dump_cap(u.to_string())
169 self.failUnless("SSK Read-only URI:" in output, output)
170 self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output)
171 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output)
172 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
174 u = u.get_verify_cap()
175 output = self._dump_cap(u.to_string())
176 self.failUnless("SSK Verifier URI:" in output, output)
177 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output)
178 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
180 def test_dump_cap_directory(self):
181 writekey = "\x01" * 16
182 fingerprint = "\xfe" * 32
183 u1 = uri.WriteableSSKFileURI(writekey, fingerprint)
184 u = uri.NewDirectoryURI(u1)
186 output = self._dump_cap(u.to_string())
187 self.failUnless("Directory Writeable URI:" in output, output)
188 self.failUnless("writekey: aeaqcaibaeaqcaibaeaqcaibae" in output,
190 self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output)
191 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output,
193 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
195 output = self._dump_cap("--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa",
197 self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output)
199 output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j",
201 self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output)
202 self.failIf("file renewal secret:" in output, output)
204 output = self._dump_cap("--nodeid", "tqc35esocrvejvg4mablt6aowg6tl43j",
205 "--client-secret", "5s33nk3qpvnj2fw3z4mnm2y6fa",
207 self.failUnless("write_enabler: mgcavriox2wlb5eer26unwy5cw56elh3sjweffckkmivvsxtaknq" in output, output)
208 self.failUnless("file renewal secret: arpszxzc2t6kb4okkg7sp765xgkni5z7caavj7lta73vmtymjlxq" in output, output)
209 self.failUnless("lease renewal secret: 7pjtaumrb7znzkkbvekkmuwpqfjyfyamznfz4bwwvmh4nw33lorq" in output, output)
212 output = self._dump_cap(u.to_string())
213 self.failUnless("Directory Read-only URI:" in output, output)
214 self.failUnless("readkey: nvgh5vj2ekzzkim5fgtb4gey5y" in output, output)
215 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output)
216 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
218 u = u.get_verify_cap()
219 output = self._dump_cap(u.to_string())
220 self.failUnless("Directory Verifier URI:" in output, output)
221 self.failUnless("storage index: nt4fwemuw7flestsezvo2eveke" in output, output)
222 self.failUnless("fingerprint: 737p57x6737p57x6737p57x6737p57x6737p57x6737p57x6737a" in output, output)
224 def _catalog_shares(self, *basedirs):
225 o = debug.CatalogSharesOptions()
226 o.stdout,o.stderr = StringIO(), StringIO()
227 args = list(basedirs)
229 debug.catalog_shares(o)
230 out = o.stdout.getvalue()
231 err = o.stderr.getvalue()
234 def test_catalog_shares_error(self):
235 nodedir1 = "cli/test_catalog_shares/node1"
236 sharedir = os.path.join(nodedir1, "storage", "shares", "mq", "mqfblse6m5a6dh45isu2cg7oji")
237 fileutil.make_dirs(sharedir)
238 f = open(os.path.join(sharedir, "8"), "wb")
239 open("cli/test_catalog_shares/node1/storage/shares/mq/not-a-dir", "wb").close()
240 # write a bogus share that looks a little bit like CHK
241 f.write("\x00\x00\x00\x01" + "\xff" * 200) # this triggers an assert
244 nodedir2 = "cli/test_catalog_shares/node2"
245 fileutil.make_dirs(nodedir2)
246 open("cli/test_catalog_shares/node1/storage/shares/not-a-dir", "wb").close()
248 # now make sure that the 'catalog-shares' commands survives the error
249 out, err = self._catalog_shares(nodedir1, nodedir2)
250 self.failUnlessEqual(out, "", out)
251 self.failUnless("Error processing " in err,
252 "didn't see 'error processing' in '%s'" % err)
253 #self.failUnless(nodedir1 in err,
254 # "didn't see '%s' in '%s'" % (nodedir1, err))
255 # windows mangles the path, and os.path.join isn't enough to make
256 # up for it, so just look for individual strings
257 self.failUnless("node1" in err,
258 "didn't see 'node1' in '%s'" % err)
259 self.failUnless("mqfblse6m5a6dh45isu2cg7oji" in err,
260 "didn't see 'mqfblse6m5a6dh45isu2cg7oji' in '%s'" % err)
264 def do_cli(self, verb, *args, **kwargs):
266 "--node-directory", self.getdir("client0"),
268 argv = [verb] + nodeargs + list(args)
269 stdin = kwargs.get("stdin", "")
270 stdout, stderr = StringIO(), StringIO()
271 d = threads.deferToThread(runner.runner, argv, run_by_human=False,
272 stdin=StringIO(stdin),
273 stdout=stdout, stderr=stderr)
275 return rc, stdout.getvalue(), stderr.getvalue()
279 class CreateAlias(SystemTestMixin, CLITestMixin, unittest.TestCase):
281 def _test_webopen(self, args, expected_url):
282 woo = cli.WebopenOptions()
283 all_args = ["--node-directory", self.getdir("client0")] + list(args)
284 woo.parseOptions(all_args)
286 rc = cli.webopen(woo, urls.append)
287 self.failUnlessEqual(rc, 0)
288 self.failUnlessEqual(len(urls), 1)
289 self.failUnlessEqual(urls[0], expected_url)
291 def test_create(self):
292 self.basedir = os.path.dirname(self.mktemp())
293 d = self.set_up_nodes()
294 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
295 def _done((rc,stdout,stderr)):
296 self.failUnless("Alias 'tahoe' created" in stdout)
298 aliases = get_aliases(self.getdir("client0"))
299 self.failUnless("tahoe" in aliases)
300 self.failUnless(aliases["tahoe"].startswith("URI:DIR2:"))
302 d.addCallback(lambda res: self.do_cli("create-alias", "two"))
304 def _stash_urls(res):
305 aliases = get_aliases(self.getdir("client0"))
306 node_url_file = os.path.join(self.getdir("client0"), "node.url")
307 nodeurl = open(node_url_file, "r").read().strip()
308 uribase = nodeurl + "uri/"
309 self.tahoe_url = uribase + urllib.quote(aliases["tahoe"])
310 self.tahoe_subdir_url = self.tahoe_url + "/subdir"
311 self.two_url = uribase + urllib.quote(aliases["two"])
312 self.two_uri = aliases["two"]
313 d.addCallback(_stash_urls)
315 d.addCallback(lambda res: self.do_cli("create-alias", "two")) # dup
316 def _check_create_duplicate((rc,stdout,stderr)):
317 self.failIfEqual(rc, 0)
318 self.failUnless("Alias 'two' already exists!" in stderr)
319 aliases = get_aliases(self.getdir("client0"))
320 self.failUnlessEqual(aliases["two"], self.two_uri)
321 d.addCallback(_check_create_duplicate)
323 d.addCallback(lambda res: self.do_cli("add-alias", "added", self.two_uri))
324 def _check_add((rc,stdout,stderr)):
325 self.failUnlessEqual(rc, 0)
326 self.failUnless("Alias 'added' added" in stdout)
327 d.addCallback(_check_add)
329 # check add-alias with a duplicate
330 d.addCallback(lambda res: self.do_cli("add-alias", "two", self.two_uri))
331 def _check_add_duplicate((rc,stdout,stderr)):
332 self.failIfEqual(rc, 0)
333 self.failUnless("Alias 'two' already exists!" in stderr)
334 aliases = get_aliases(self.getdir("client0"))
335 self.failUnlessEqual(aliases["two"], self.two_uri)
336 d.addCallback(_check_add_duplicate)
338 def _test_urls(junk):
339 self._test_webopen([], self.tahoe_url)
340 self._test_webopen(["/"], self.tahoe_url)
341 self._test_webopen(["tahoe:"], self.tahoe_url)
342 self._test_webopen(["tahoe:/"], self.tahoe_url)
343 self._test_webopen(["tahoe:subdir"], self.tahoe_subdir_url)
344 self._test_webopen(["tahoe:subdir/"], self.tahoe_subdir_url + '/')
345 self._test_webopen(["tahoe:subdir/file"], self.tahoe_subdir_url + '/file')
346 # if "file" is indeed a file, then the url produced by webopen in
347 # this case is disallowed by the webui. but by design, webopen
348 # passes through the mistake from the user to the resultant
350 self._test_webopen(["tahoe:subdir/file/"], self.tahoe_subdir_url + '/file/')
351 self._test_webopen(["two:"], self.two_url)
352 d.addCallback(_test_urls)
356 class Put(SystemTestMixin, CLITestMixin, unittest.TestCase):
358 def test_unlinked_immutable_stdin(self):
359 # tahoe get `echo DATA | tahoe put`
360 # tahoe get `echo DATA | tahoe put -`
362 self.basedir = self.mktemp()
364 d = self.set_up_nodes()
365 d.addCallback(lambda res: self.do_cli("put", stdin=DATA))
367 (rc, stdout, stderr) = res
368 self.failUnless("waiting for file data on stdin.." in stderr)
369 self.failUnless("200 OK" in stderr, stderr)
370 self.readcap = stdout
371 self.failUnless(self.readcap.startswith("URI:CHK:"))
372 d.addCallback(_uploaded)
373 d.addCallback(lambda res: self.do_cli("get", self.readcap))
374 def _downloaded(res):
375 (rc, stdout, stderr) = res
376 self.failUnlessEqual(stderr, "")
377 self.failUnlessEqual(stdout, DATA)
378 d.addCallback(_downloaded)
379 d.addCallback(lambda res: self.do_cli("put", "-", stdin=DATA))
380 d.addCallback(lambda (rc,stdout,stderr):
381 self.failUnlessEqual(stdout, self.readcap))
384 def test_unlinked_immutable_from_file(self):
386 # tahoe put ./file.txt
387 # tahoe put /tmp/file.txt
388 # tahoe put ~/file.txt
389 self.basedir = os.path.dirname(self.mktemp())
390 # this will be "allmydata.test.test_cli/Put/test_put_from_file/RANDOM"
391 # and the RANDOM directory will exist. Raw mktemp returns a filename.
393 rel_fn = os.path.join(self.basedir, "DATAFILE")
394 abs_fn = os.path.abspath(rel_fn)
395 # we make the file small enough to fit in a LIT file, for speed
396 f = open(rel_fn, "w")
397 f.write("short file")
399 d = self.set_up_nodes()
400 d.addCallback(lambda res: self.do_cli("put", rel_fn))
401 def _uploaded((rc,stdout,stderr)):
403 self.failUnless(readcap.startswith("URI:LIT:"))
404 self.readcap = readcap
405 d.addCallback(_uploaded)
406 d.addCallback(lambda res: self.do_cli("put", "./" + rel_fn))
407 d.addCallback(lambda (rc,stdout,stderr):
408 self.failUnlessEqual(stdout, self.readcap))
409 d.addCallback(lambda res: self.do_cli("put", abs_fn))
410 d.addCallback(lambda (rc,stdout,stderr):
411 self.failUnlessEqual(stdout, self.readcap))
412 # we just have to assume that ~ is handled properly
415 def test_immutable_from_file(self):
416 # tahoe put file.txt uploaded.txt
417 # tahoe - uploaded.txt
418 # tahoe put file.txt subdir/uploaded.txt
419 # tahoe put file.txt tahoe:uploaded.txt
420 # tahoe put file.txt tahoe:subdir/uploaded.txt
421 # tahoe put file.txt DIRCAP:./uploaded.txt
422 # tahoe put file.txt DIRCAP:./subdir/uploaded.txt
423 self.basedir = os.path.dirname(self.mktemp())
425 rel_fn = os.path.join(self.basedir, "DATAFILE")
426 abs_fn = os.path.abspath(rel_fn)
427 # we make the file small enough to fit in a LIT file, for speed
429 DATA2 = "short file two"
430 f = open(rel_fn, "w")
434 d = self.set_up_nodes()
435 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
437 d.addCallback(lambda res:
438 self.do_cli("put", rel_fn, "uploaded.txt"))
439 def _uploaded((rc,stdout,stderr)):
440 readcap = stdout.strip()
441 self.failUnless(readcap.startswith("URI:LIT:"))
442 self.failUnless("201 Created" in stderr, stderr)
443 self.readcap = readcap
444 d.addCallback(_uploaded)
445 d.addCallback(lambda res:
446 self.do_cli("get", "tahoe:uploaded.txt"))
447 d.addCallback(lambda (rc,stdout,stderr):
448 self.failUnlessEqual(stdout, DATA))
450 d.addCallback(lambda res:
451 self.do_cli("put", "-", "uploaded.txt", stdin=DATA2))
452 def _replaced((rc,stdout,stderr)):
453 readcap = stdout.strip()
454 self.failUnless(readcap.startswith("URI:LIT:"))
455 self.failUnless("200 OK" in stderr, stderr)
456 d.addCallback(_replaced)
458 d.addCallback(lambda res:
459 self.do_cli("put", rel_fn, "subdir/uploaded2.txt"))
460 d.addCallback(lambda res: self.do_cli("get", "subdir/uploaded2.txt"))
461 d.addCallback(lambda (rc,stdout,stderr):
462 self.failUnlessEqual(stdout, DATA))
464 d.addCallback(lambda res:
465 self.do_cli("put", rel_fn, "tahoe:uploaded3.txt"))
466 d.addCallback(lambda res: self.do_cli("get", "tahoe:uploaded3.txt"))
467 d.addCallback(lambda (rc,stdout,stderr):
468 self.failUnlessEqual(stdout, DATA))
470 d.addCallback(lambda res:
471 self.do_cli("put", rel_fn, "tahoe:subdir/uploaded4.txt"))
472 d.addCallback(lambda res:
473 self.do_cli("get", "tahoe:subdir/uploaded4.txt"))
474 d.addCallback(lambda (rc,stdout,stderr):
475 self.failUnlessEqual(stdout, DATA))
477 def _get_dircap(res):
478 self.dircap = get_aliases(self.getdir("client0"))["tahoe"]
479 d.addCallback(_get_dircap)
481 d.addCallback(lambda res:
482 self.do_cli("put", rel_fn,
483 self.dircap+":./uploaded5.txt"))
484 d.addCallback(lambda res:
485 self.do_cli("get", "tahoe:uploaded5.txt"))
486 d.addCallback(lambda (rc,stdout,stderr):
487 self.failUnlessEqual(stdout, DATA))
489 d.addCallback(lambda res:
490 self.do_cli("put", rel_fn,
491 self.dircap+":./subdir/uploaded6.txt"))
492 d.addCallback(lambda res:
493 self.do_cli("get", "tahoe:subdir/uploaded6.txt"))
494 d.addCallback(lambda (rc,stdout,stderr):
495 self.failUnlessEqual(stdout, DATA))
499 def test_mutable_unlinked(self):
500 # FILECAP = `echo DATA | tahoe put --mutable`
501 # tahoe get FILECAP, compare against DATA
502 # echo DATA2 | tahoe put - FILECAP
503 # tahoe get FILECAP, compare against DATA2
504 # tahoe put file.txt FILECAP
505 self.basedir = os.path.dirname(self.mktemp())
508 rel_fn = os.path.join(self.basedir, "DATAFILE")
509 abs_fn = os.path.abspath(rel_fn)
510 DATA3 = "three" * 100
511 f = open(rel_fn, "w")
515 d = self.set_up_nodes()
517 d.addCallback(lambda res: self.do_cli("put", "--mutable", stdin=DATA))
519 (rc, stdout, stderr) = res
520 self.failUnless("waiting for file data on stdin.." in stderr)
521 self.failUnless("200 OK" in stderr)
522 self.filecap = stdout
523 self.failUnless(self.filecap.startswith("URI:SSK:"))
524 d.addCallback(_created)
525 d.addCallback(lambda res: self.do_cli("get", self.filecap))
526 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA))
528 d.addCallback(lambda res: self.do_cli("put", "-", self.filecap, stdin=DATA2))
530 (rc, stdout, stderr) = res
531 self.failUnless("waiting for file data on stdin.." in stderr)
532 self.failUnless("200 OK" in stderr)
533 self.failUnlessEqual(self.filecap, stdout)
534 d.addCallback(_replaced)
535 d.addCallback(lambda res: self.do_cli("get", self.filecap))
536 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2))
538 d.addCallback(lambda res: self.do_cli("put", rel_fn, self.filecap))
540 (rc, stdout, stderr) = res
541 self.failUnless("200 OK" in stderr)
542 self.failUnlessEqual(self.filecap, stdout)
543 d.addCallback(_replaced2)
544 d.addCallback(lambda res: self.do_cli("get", self.filecap))
545 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA3))
549 def test_mutable(self):
550 # echo DATA1 | tahoe put --mutable - uploaded.txt
551 # echo DATA2 | tahoe put - uploaded.txt # should modify-in-place
552 # tahoe get uploaded.txt, compare against DATA2
554 self.basedir = os.path.dirname(self.mktemp())
556 fn1 = os.path.join(self.basedir, "DATA1")
561 fn2 = os.path.join(self.basedir, "DATA2")
566 d = self.set_up_nodes()
567 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
568 d.addCallback(lambda res:
569 self.do_cli("put", "--mutable", fn1, "tahoe:uploaded.txt"))
570 d.addCallback(lambda res:
571 self.do_cli("put", fn2, "tahoe:uploaded.txt"))
572 d.addCallback(lambda res:
573 self.do_cli("get", "tahoe:uploaded.txt"))
574 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2))
577 class Cp(SystemTestMixin, CLITestMixin, unittest.TestCase):
578 def test_unicode_filename(self):
579 self.basedir = os.path.dirname(self.mktemp())
581 fn1 = os.path.join(self.basedir, "Ärtonwall")
582 DATA1 = "unicode file content"
583 open(fn1, "wb").write(DATA1)
585 fn2 = os.path.join(self.basedir, "Metallica")
586 DATA2 = "non-unicode file content"
587 open(fn2, "wb").write(DATA2)
590 # Assure that uploading a file whose name contains unicode character doesn't
591 # prevent further uploads in the same directory
592 d = self.set_up_nodes()
593 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
594 d.addCallback(lambda res: self.do_cli("cp", fn1, "tahoe:"))
595 d.addCallback(lambda res: self.do_cli("cp", fn2, "tahoe:"))
597 d.addCallback(lambda res: self.do_cli("get", "tahoe:Ärtonwall"))
598 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA1))
600 d.addCallback(lambda res: self.do_cli("get", "tahoe:Metallica"))
601 d.addCallback(lambda (rc,out,err): self.failUnlessEqual(out, DATA2))
604 test_unicode_filename.todo = "This behavior is not yet supported, although it does happen to work (for reasons that are ill-understood) on many platforms. See issue ticket #534."
606 def test_dangling_symlink_vs_recursion(self):
607 if not hasattr(os, 'symlink'):
608 raise unittest.SkipTest("There is no symlink on this platform.")
609 # cp -r on a directory containing a dangling symlink shouldn't assert
610 self.basedir = os.path.dirname(self.mktemp())
611 dn = os.path.join(self.basedir, "dir")
613 fn = os.path.join(dn, "Fakebandica")
614 ln = os.path.join(dn, "link")
617 d = self.set_up_nodes()
618 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
619 d.addCallback(lambda res: self.do_cli("cp", "--recursive",
623 class Backup(SystemTestMixin, CLITestMixin, unittest.TestCase):
624 def writeto(self, path, data):
625 d = os.path.dirname(os.path.join(self.basedir, "home", path))
626 fileutil.make_dirs(d)
627 f = open(os.path.join(self.basedir, "home", path), "w")
631 def count_output(self, out):
632 mo = re.search(r"(\d)+ files uploaded \((\d+) reused\), (\d+) directories created \((\d+) reused\)", out)
633 return [int(s) for s in mo.groups()]
635 def count_output2(self, out):
636 mo = re.search(r"(\d)+ files checked, (\d+) directories checked, (\d+) directories read", out)
637 return [int(s) for s in mo.groups()]
639 def test_backup(self):
640 self.basedir = os.path.dirname(self.mktemp())
642 # is the backupdb available? If so, we test that a second backup does
643 # not create new directories.
645 have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
648 # create a small local directory with a couple of files
649 source = os.path.join(self.basedir, "home")
650 fileutil.make_dirs(os.path.join(source, "empty"))
651 self.writeto("parent/subdir/foo.txt", "foo")
652 self.writeto("parent/subdir/bar.txt", "bar\n" * 1000)
653 self.writeto("parent/blah.txt", "blah")
655 def do_backup(use_backupdb=True, verbose=False):
657 if not have_bdb or not use_backupdb:
658 cmd.append("--no-backupdb")
660 cmd.append("--verbose")
662 cmd.append("tahoe:backups")
663 return self.do_cli(*cmd)
665 d = self.set_up_nodes()
666 d.addCallback(lambda res: self.do_cli("create-alias", "tahoe"))
669 d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups"))
670 def _should_complain((rc, out, err)):
671 self.failUnless("I was unable to import a python sqlite library" in err, err)
672 d.addCallback(_should_complain)
674 d.addCallback(lambda res: do_backup())
675 def _check0((rc, out, err)):
676 self.failUnlessEqual(err, "")
677 self.failUnlessEqual(rc, 0)
678 fu, fr, dc, dr = self.count_output(out)
679 # foo.txt, bar.txt, blah.txt
680 self.failUnlessEqual(fu, 3)
681 self.failUnlessEqual(fr, 0)
682 # empty, home, home/parent, home/parent/subdir
683 self.failUnlessEqual(dc, 4)
684 self.failUnlessEqual(dr, 0)
685 d.addCallback(_check0)
687 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups"))
688 def _check1((rc, out, err)):
689 self.failUnlessEqual(err, "")
690 self.failUnlessEqual(rc, 0)
691 self.failUnlessEqual(sorted(out.split()), ["Archives", "Latest"])
692 d.addCallback(_check1)
693 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest"))
694 def _check2((rc, out, err)):
695 self.failUnlessEqual(err, "")
696 self.failUnlessEqual(rc, 0)
697 self.failUnlessEqual(sorted(out.split()), ["empty", "parent"])
698 d.addCallback(_check2)
699 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Latest/empty"))
700 def _check2a((rc, out, err)):
701 self.failUnlessEqual(err, "")
702 self.failUnlessEqual(rc, 0)
703 self.failUnlessEqual(out.strip(), "")
704 d.addCallback(_check2a)
705 d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt"))
706 def _check3((rc, out, err)):
707 self.failUnlessEqual(err, "")
708 self.failUnlessEqual(rc, 0)
709 self.failUnlessEqual(out, "foo")
710 d.addCallback(_check3)
711 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
712 def _check4((rc, out, err)):
713 self.failUnlessEqual(err, "")
714 self.failUnlessEqual(rc, 0)
715 self.old_archives = out.split()
716 self.failUnlessEqual(len(self.old_archives), 1)
717 d.addCallback(_check4)
720 d.addCallback(lambda res: do_backup())
721 def _check4a((rc, out, err)):
722 # second backup should reuse everything, if the backupdb is
724 self.failUnlessEqual(err, "")
725 self.failUnlessEqual(rc, 0)
727 fu, fr, dc, dr = self.count_output(out)
728 # foo.txt, bar.txt, blah.txt
729 self.failUnlessEqual(fu, 0)
730 self.failUnlessEqual(fr, 3)
731 # empty, home, home/parent, home/parent/subdir
732 self.failUnlessEqual(dc, 0)
733 self.failUnlessEqual(dr, 4)
734 d.addCallback(_check4a)
737 # sneak into the backupdb, crank back the "last checked"
738 # timestamp to force a check on all files
739 def _reset_last_checked(res):
740 dbfile = os.path.join(self.basedir,
741 "client0", "private", "backupdb.sqlite")
742 self.failUnless(os.path.exists(dbfile), dbfile)
743 bdb = backupdb.get_backupdb(dbfile)
744 bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
745 bdb.connection.commit()
747 d.addCallback(_reset_last_checked)
749 d.addCallback(lambda res: do_backup(verbose=True))
750 def _check4b((rc, out, err)):
751 # we should check all files, and re-use all of them. None of
752 # the directories should have been changed.
753 self.failUnlessEqual(err, "")
754 self.failUnlessEqual(rc, 0)
755 fu, fr, dc, dr = self.count_output(out)
756 fchecked, dchecked, dread = self.count_output2(out)
757 self.failUnlessEqual(fchecked, 3)
758 self.failUnlessEqual(fu, 0)
759 self.failUnlessEqual(fr, 3)
760 # TODO: backupdb doesn't do dirs yet; when it does, this will
761 # change to dchecked=4, and maybe dread=0
762 self.failUnlessEqual(dchecked, 0)
763 self.failUnlessEqual(dread, 4)
764 self.failUnlessEqual(dc, 0)
765 self.failUnlessEqual(dr, 4)
766 d.addCallback(_check4b)
768 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
769 def _check5((rc, out, err)):
770 self.failUnlessEqual(err, "")
771 self.failUnlessEqual(rc, 0)
772 self.new_archives = out.split()
776 self.failUnlessEqual(len(self.new_archives), expected_new)
777 # the original backup should still be the oldest (i.e. sorts
778 # alphabetically towards the beginning)
779 self.failUnlessEqual(sorted(self.new_archives)[0],
780 self.old_archives[0])
781 d.addCallback(_check5)
784 time.sleep(1) # get us to a new second
785 self.writeto("parent/subdir/foo.txt", "FOOF!")
786 # and turn a file into a directory
787 os.unlink(os.path.join(source, "parent/blah.txt"))
788 os.mkdir(os.path.join(source, "parent/blah.txt"))
789 self.writeto("parent/blah.txt/surprise file", "surprise")
790 self.writeto("parent/blah.txt/surprisedir/subfile", "surprise")
791 # turn a directory into a file
792 os.rmdir(os.path.join(source, "empty"))
793 self.writeto("empty", "imagine nothing being here")
795 d.addCallback(_modify)
796 def _check5a((rc, out, err)):
797 # second backup should reuse bar.txt (if backupdb is available),
798 # and upload the rest. None of the directories can be reused.
799 self.failUnlessEqual(err, "")
800 self.failUnlessEqual(rc, 0)
802 fu, fr, dc, dr = self.count_output(out)
803 # new foo.txt, surprise file, subfile, empty
804 self.failUnlessEqual(fu, 4)
806 self.failUnlessEqual(fr, 1)
807 # home, parent, subdir, blah.txt, surprisedir
808 self.failUnlessEqual(dc, 5)
809 self.failUnlessEqual(dr, 0)
810 d.addCallback(_check5a)
811 d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
812 def _check6((rc, out, err)):
813 self.failUnlessEqual(err, "")
814 self.failUnlessEqual(rc, 0)
815 self.new_archives = out.split()
819 self.failUnlessEqual(len(self.new_archives), expected_new)
820 self.failUnlessEqual(sorted(self.new_archives)[0],
821 self.old_archives[0])
822 d.addCallback(_check6)
823 d.addCallback(lambda res: self.do_cli("get", "tahoe:backups/Latest/parent/subdir/foo.txt"))
824 def _check7((rc, out, err)):
825 self.failUnlessEqual(err, "")
826 self.failUnlessEqual(rc, 0)
827 self.failUnlessEqual(out, "FOOF!")
828 # the old snapshot should not be modified
829 return self.do_cli("get", "tahoe:backups/Archives/%s/parent/subdir/foo.txt" % self.old_archives[0])
830 d.addCallback(_check7)
831 def _check8((rc, out, err)):
832 self.failUnlessEqual(err, "")
833 self.failUnlessEqual(rc, 0)
834 self.failUnlessEqual(out, "foo")
835 d.addCallback(_check8)
837 d.addCallback(lambda res: do_backup(use_backupdb=False))
838 def _check9((rc, out, err)):
839 # --no-backupdb means re-upload everything. We still get to
840 # re-use the directories, since nothing changed.
841 self.failUnlessEqual(err, "")
842 self.failUnlessEqual(rc, 0)
843 fu, fr, dc, dr = self.count_output(out)
844 self.failUnlessEqual(fu, 5)
845 self.failUnlessEqual(fr, 0)
846 self.failUnlessEqual(dc, 0)
847 self.failUnlessEqual(dr, 5)
848 d.addCallback(_check9)
852 # on our old dapper buildslave, this test takes a long time (usually
853 # 130s), so we have to bump up the default 120s timeout. The create-alias
854 # and initial backup alone take 60s, probably because of the handful of
855 # dirnodes being created (RSA key generation). The backup between check4
856 # and check4a takes 6s, as does the backup before check4b.
857 test_backup.timeout = 300