from mock import patch
-from allmydata.util import fileutil, hashutil, base32
+from allmydata.util import fileutil, hashutil, base32, keyutil
from allmydata import uri
from allmydata.immutable import upload
from allmydata.interfaces import MDMF_VERSION, SDMF_VERSION
from allmydata.mutable.publish import MutableData
from allmydata.dirnode import normalize
+from allmydata.scripts.common_http import socket_error
+import allmydata.scripts.common_http
+from pycryptopp.publickey import ed25519
# Test that the scripts can be imported.
from allmydata.scripts import create_node, debug, keygen, startstop_node, \
self.failUnlessIn(" debug trial [options] [[file|package|module|TestCase|testmethod]...]", help)
self.failUnlessIn("The 'tahoe debug trial' command uses the correct imports", help)
+ def test_debug_flogtool(self):
+ options = debug.FlogtoolOptions()
+ help = str(options)
+ self.failUnlessIn(" debug flogtool ", help)
+ self.failUnlessIn("The 'tahoe debug flogtool' command uses the correct imports", help)
+
+ for (option, shortcut, oClass, desc) in options.subCommands:
+ subhelp = str(oClass())
+ self.failUnlessIn(" debug flogtool %s " % (option,), subhelp)
+
class CreateAlias(GridTestMixin, CLITestMixin, unittest.TestCase):
return d
+class Admin(unittest.TestCase):
+ def do_cli(self, *args, **kwargs):
+ argv = list(args)
+ stdin = kwargs.get("stdin", "")
+ stdout, stderr = StringIO(), StringIO()
+ d = threads.deferToThread(runner.runner, argv, run_by_human=False,
+ stdin=StringIO(stdin),
+ stdout=stdout, stderr=stderr)
+ def _done(res):
+ return stdout.getvalue(), stderr.getvalue()
+ d.addCallback(_done)
+ return d
+
+ def test_generate_keypair(self):
+ d = self.do_cli("admin", "generate-keypair")
+ def _done( (stdout, stderr) ):
+ lines = [line.strip() for line in stdout.splitlines()]
+ privkey_bits = lines[0].split()
+ pubkey_bits = lines[1].split()
+ sk_header = "private:"
+ vk_header = "public:"
+ self.failUnlessEqual(privkey_bits[0], sk_header, lines[0])
+ self.failUnlessEqual(pubkey_bits[0], vk_header, lines[1])
+ self.failUnless(privkey_bits[1].startswith("priv-v0-"), lines[0])
+ self.failUnless(pubkey_bits[1].startswith("pub-v0-"), lines[1])
+ sk_bytes = base32.a2b(keyutil.remove_prefix(privkey_bits[1], "priv-v0-"))
+ sk = ed25519.SigningKey(sk_bytes)
+ vk_bytes = base32.a2b(keyutil.remove_prefix(pubkey_bits[1], "pub-v0-"))
+ self.failUnlessEqual(sk.get_verifying_key_bytes(), vk_bytes)
+ d.addCallback(_done)
+ return d
+
+ def test_derive_pubkey(self):
+ priv1,pub1 = keyutil.make_keypair()
+ d = self.do_cli("admin", "derive-pubkey", priv1)
+ def _done( (stdout, stderr) ):
+ lines = stdout.split("\n")
+ privkey_line = lines[0].strip()
+ pubkey_line = lines[1].strip()
+ sk_header = "private: priv-v0-"
+ vk_header = "public: pub-v0-"
+ self.failUnless(privkey_line.startswith(sk_header), privkey_line)
+ self.failUnless(pubkey_line.startswith(vk_header), pubkey_line)
+ pub2 = pubkey_line[len(vk_header):]
+ self.failUnlessEqual("pub-v0-"+pub2, pub1)
+ d.addCallback(_done)
+ return d
+
+
class List(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_list(self):
self.basedir = "cli/List/list"
# is the backupdb available? If so, we test that a second backup does
# not create new directories.
hush = StringIO()
- have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
- hush)
+ bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
+ hush)
+ self.failUnless(bdb)
# create a small local directory with a couple of files
source = os.path.join(self.basedir, "home")
d = self.do_cli("create-alias", "tahoe")
- if not have_bdb:
- d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups"))
- def _should_complain((rc, out, err)):
- self.failUnless("I was unable to import a python sqlite library" in err, err)
- d.addCallback(_should_complain)
- d.addCallback(self.stall, 1.1) # make sure the backups get distinct timestamps
-
d.addCallback(lambda res: do_backup())
def _check0((rc, out, err)):
self.failUnlessReallyEqual(err, "")
# available
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
- if have_bdb:
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- # foo.txt, bar.txt, blah.txt
- self.failUnlessReallyEqual(fu, 0)
- self.failUnlessReallyEqual(fr, 3)
- self.failUnlessReallyEqual(fs, 0)
- # empty, home, home/parent, home/parent/subdir
- self.failUnlessReallyEqual(dc, 0)
- self.failUnlessReallyEqual(dr, 4)
- self.failUnlessReallyEqual(ds, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ # foo.txt, bar.txt, blah.txt
+ self.failUnlessReallyEqual(fu, 0)
+ self.failUnlessReallyEqual(fr, 3)
+ self.failUnlessReallyEqual(fs, 0)
+ # empty, home, home/parent, home/parent/subdir
+ self.failUnlessReallyEqual(dc, 0)
+ self.failUnlessReallyEqual(dr, 4)
+ self.failUnlessReallyEqual(ds, 0)
d.addCallback(_check4a)
- if have_bdb:
- # sneak into the backupdb, crank back the "last checked"
- # timestamp to force a check on all files
- def _reset_last_checked(res):
- dbfile = os.path.join(self.get_clientdir(),
- "private", "backupdb.sqlite")
- self.failUnless(os.path.exists(dbfile), dbfile)
- bdb = backupdb.get_backupdb(dbfile)
- bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
- bdb.cursor.execute("UPDATE directories SET last_checked=0")
- bdb.connection.commit()
-
- d.addCallback(_reset_last_checked)
-
- d.addCallback(self.stall, 1.1)
- d.addCallback(lambda res: do_backup(verbose=True))
- def _check4b((rc, out, err)):
- # we should check all files, and re-use all of them. None of
- # the directories should have been changed, so we should
- # re-use all of them too.
- self.failUnlessReallyEqual(err, "")
- self.failUnlessReallyEqual(rc, 0)
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- fchecked, dchecked = self.count_output2(out)
- self.failUnlessReallyEqual(fchecked, 3)
- self.failUnlessReallyEqual(fu, 0)
- self.failUnlessReallyEqual(fr, 3)
- self.failUnlessReallyEqual(fs, 0)
- self.failUnlessReallyEqual(dchecked, 4)
- self.failUnlessReallyEqual(dc, 0)
- self.failUnlessReallyEqual(dr, 4)
- self.failUnlessReallyEqual(ds, 0)
- d.addCallback(_check4b)
+ # sneak into the backupdb, crank back the "last checked"
+ # timestamp to force a check on all files
+ def _reset_last_checked(res):
+ dbfile = os.path.join(self.get_clientdir(),
+ "private", "backupdb.sqlite")
+ self.failUnless(os.path.exists(dbfile), dbfile)
+ bdb = backupdb.get_backupdb(dbfile)
+ bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
+ bdb.cursor.execute("UPDATE directories SET last_checked=0")
+ bdb.connection.commit()
+
+ d.addCallback(_reset_last_checked)
+
+ d.addCallback(self.stall, 1.1)
+ d.addCallback(lambda res: do_backup(verbose=True))
+ def _check4b((rc, out, err)):
+ # we should check all files, and re-use all of them. None of
+ # the directories should have been changed, so we should
+ # re-use all of them too.
+ self.failUnlessReallyEqual(err, "")
+ self.failUnlessReallyEqual(rc, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ fchecked, dchecked = self.count_output2(out)
+ self.failUnlessReallyEqual(fchecked, 3)
+ self.failUnlessReallyEqual(fu, 0)
+ self.failUnlessReallyEqual(fr, 3)
+ self.failUnlessReallyEqual(fs, 0)
+ self.failUnlessReallyEqual(dchecked, 4)
+ self.failUnlessReallyEqual(dc, 0)
+ self.failUnlessReallyEqual(dr, 4)
+ self.failUnlessReallyEqual(ds, 0)
+ d.addCallback(_check4b)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
def _check5((rc, out, err)):
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
- expected_new = 2
- if have_bdb:
- expected_new += 1
- self.failUnlessReallyEqual(len(self.new_archives), expected_new, out)
+ self.failUnlessReallyEqual(len(self.new_archives), 3, out)
# the original backup should still be the oldest (i.e. sorts
# alphabetically towards the beginning)
self.failUnlessReallyEqual(sorted(self.new_archives)[0],
# and upload the rest. None of the directories can be reused.
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
- if have_bdb:
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- # new foo.txt, surprise file, subfile, empty
- self.failUnlessReallyEqual(fu, 4)
- # old bar.txt
- self.failUnlessReallyEqual(fr, 1)
- self.failUnlessReallyEqual(fs, 0)
- # home, parent, subdir, blah.txt, surprisedir
- self.failUnlessReallyEqual(dc, 5)
- self.failUnlessReallyEqual(dr, 0)
- self.failUnlessReallyEqual(ds, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ # new foo.txt, surprise file, subfile, empty
+ self.failUnlessReallyEqual(fu, 4)
+ # old bar.txt
+ self.failUnlessReallyEqual(fr, 1)
+ self.failUnlessReallyEqual(fs, 0)
+ # home, parent, subdir, blah.txt, surprisedir
+ self.failUnlessReallyEqual(dc, 5)
+ self.failUnlessReallyEqual(dr, 0)
+ self.failUnlessReallyEqual(ds, 0)
d.addCallback(_check5a)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
def _check6((rc, out, err)):
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
- expected_new = 3
- if have_bdb:
- expected_new += 1
- self.failUnlessReallyEqual(len(self.new_archives), expected_new)
+ self.failUnlessReallyEqual(len(self.new_archives), 4)
self.failUnlessReallyEqual(sorted(self.new_archives)[0],
self.old_archives[0])
d.addCallback(_check6)
DATA = "data" * 100
d = c0.upload(upload.Data(DATA, convergence=""))
def _stash_bad(ur):
- self.uri_1share = ur.uri
- self.delete_shares_numbered(ur.uri, range(1,10))
+ self.uri_1share = ur.get_uri()
+ self.delete_shares_numbered(ur.get_uri(), range(1,10))
d.addCallback(_stash_bad)
# the download is abandoned as soon as it's clear that we won't get
return d
+ def test_broken_socket(self):
+ # When the http connection breaks (such as when node.url is overwritten
+ # by a confused user), a user friendly error message should be printed.
+ self.basedir = "cli/Errors/test_broken_socket"
+ self.set_up_grid()
+
+ # Simulate a connection error
+ endheaders = allmydata.scripts.common_http.httplib.HTTPConnection.endheaders
+ def _fix_endheaders(*args):
+ allmydata.scripts.common_http.httplib.HTTPConnection.endheaders = endheaders
+ def _socket_error(*args, **kwargs):
+ raise socket_error('test error')
+ allmydata.scripts.common_http.httplib.HTTPConnection.endheaders = _socket_error
+
+ d = self.do_cli("mkdir")
+ def _check_invalid((rc,stdout,stderr)):
+ self.failIfEqual(rc, 0)
+ self.failUnlessIn("Error trying to connect to http://127.0.0.1", stderr)
+ d.addCallback(_check_invalid)
+ d.addCallback(_fix_endheaders)
+ return d
+
class Get(GridTestMixin, CLITestMixin, unittest.TestCase):
def test_get_without_alias(self):