-# the backupdb is only available if sqlite3 is available. Python-2.5.x and
-# beyond include sqlite3 in the standard library. For python-2.4, the
-# "pysqlite2" "package" (or "module") (which, despite the confusing name, uses
-# sqlite3, and which, confusingly, comes in the "pysqlite" "distribution" (or
-# "package")) must be installed. On debian, install python-pysqlite2
-
import os.path, sys, time, random, stat
+
from allmydata.util.netstring import netstring
from allmydata.util.hashutil import backupdb_dirhash
from allmydata.util import base32
create_version=(SCHEMA_v2, 2), just_create=False):
# open or create the given backupdb file. The parent directory must
# exist.
- try:
- import sqlite3
- sqlite = sqlite3 # pyflakes whines about 'import sqlite3 as sqlite' ..
- except ImportError:
- from pysqlite2 import dbapi2
- sqlite = dbapi2 # .. when this clause does it too
- # This import should never fail, because setuptools requires that the
- # "pysqlite" distribution is present at start time (if on Python < 2.5).
+ import sqlite3
must_create = not os.path.exists(dbfile)
try:
- db = sqlite.connect(dbfile)
- except (EnvironmentError, sqlite.OperationalError), e:
+ db = sqlite3.connect(dbfile)
+ except (EnvironmentError, sqlite3.OperationalError), e:
print >>stderr, "Unable to create/open backupdb file %s: %s" % (dbfile, e)
return None
try:
c.execute("SELECT version FROM version")
version = c.fetchone()[0]
- except sqlite.DatabaseError, e:
+ except sqlite3.DatabaseError, e:
# this indicates that the file is not a compatible database format.
# Perhaps it was created with an old version, or it might be junk.
print >>stderr, "backupdb file is unusable: %s" % e
db.commit()
version = 2
if version == 2:
- return BackupDB_v2(sqlite, db)
+ return BackupDB_v2(sqlite3, db)
print >>stderr, "Unable to handle backupdb version %s" % version
return None
c.execute("INSERT INTO caps (filecap) VALUES (?)", (filecap,))
except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
# sqlite3 on sid gives IntegrityError
- # pysqlite2 on dapper gives OperationalError
+ # pysqlite2 (which we don't use, so maybe no longer relevant) on dapper gives OperationalError
pass
c.execute("SELECT fileid FROM caps WHERE filecap=?", (filecap,))
foundrow = c.fetchone()
from allmydata.scripts import backupdb
class BackupDB(unittest.TestCase):
- def create_or_skip(self, dbfile):
+ def create(self, dbfile):
stderr = StringIO()
bdb = backupdb.get_backupdb(dbfile, stderr=stderr)
- if not bdb:
- if "I was unable to import a python sqlite library" in stderr.getvalue():
- raise unittest.SkipTest("sqlite unavailable, skipping test")
+ self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,))
return bdb
def skip_if_cannot_represent_filename(self, u):
self.basedir = basedir = os.path.join("backupdb", "create")
fileutil.make_dirs(basedir)
dbfile = os.path.join(basedir, "dbfile")
- bdb = self.create_or_skip(dbfile)
- self.failUnless(bdb)
+ bdb = self.create(dbfile)
self.failUnlessEqual(bdb.VERSION, 2)
def test_upgrade_v1_v2(self):
created = backupdb.get_backupdb(dbfile, stderr=stderr,
create_version=(backupdb.SCHEMA_v1, 1),
just_create=True)
- if not created:
- if "I was unable to import a python sqlite library" in stderr.getvalue():
- raise unittest.SkipTest("sqlite unavailable, skipping test")
- self.fail("unable to create v1 backupdb")
+ self.failUnless(created, "unable to create v1 backupdb")
# now we should have a v1 database on disk
- bdb = self.create_or_skip(dbfile)
- self.failUnless(bdb)
+ bdb = self.create(dbfile)
self.failUnlessEqual(bdb.VERSION, 2)
def test_fail(self):
stderr_f)
self.failUnlessEqual(bdb, None)
stderr = stderr_f.getvalue()
- if "I was unable to import a python sqlite library" in stderr:
- pass
- else:
- self.failUnless("backupdb file is unusable" in stderr, stderr)
- self.failUnless("file is encrypted or is not a database" in stderr,
- stderr)
+ self.failUnlessIn("backupdb file is unusable", stderr)
+ self.failUnlessIn("file is encrypted or is not a database", stderr)
# put a directory in the way, to exercise a different error path
where = os.path.join(basedir, "roadblock-dir")
bdb = backupdb.get_backupdb(where, stderr_f)
self.failUnlessEqual(bdb, None)
stderr = stderr_f.getvalue()
- if "I was unable to import a python sqlite library" in stderr:
- pass
- else:
- self.failUnless(("Unable to create/open backupdb file %s" % where)
- in stderr, stderr)
- self.failUnless("unable to open database file" in stderr, stderr)
+ self.failUnlessIn("Unable to create/open backupdb file %s" % (where,), stderr)
+ self.failUnlessIn("unable to open database file", stderr)
def writeto(self, filename, data):
self.basedir = basedir = os.path.join("backupdb", "check")
fileutil.make_dirs(basedir)
dbfile = os.path.join(basedir, "dbfile")
- bdb = self.create_or_skip(dbfile)
- self.failUnless(bdb)
+ bdb = self.create(dbfile)
foo_fn = self.writeto("foo.txt", "foo.txt")
blah_fn = self.writeto("bar/blah.txt", "blah.txt")
fileutil.make_dirs(basedir)
where = os.path.join(basedir, "tooold.db")
- bdb = self.create_or_skip(where)
+ bdb = self.create(where)
# reach into the DB and make it old
bdb.cursor.execute("UPDATE version SET version=0")
bdb.connection.commit()
self.basedir = basedir = os.path.join("backupdb", "directory")
fileutil.make_dirs(basedir)
dbfile = os.path.join(basedir, "dbfile")
- bdb = self.create_or_skip(dbfile)
- self.failUnless(bdb)
+ bdb = self.create(dbfile)
contents = {u"file1": "URI:CHK:blah1",
u"file2": "URI:CHK:blah2",
self.basedir = basedir = os.path.join("backupdb", "unicode")
fileutil.make_dirs(basedir)
dbfile = os.path.join(basedir, "dbfile")
- bdb = self.create_or_skip(dbfile)
- self.failUnless(bdb)
+ bdb = self.create(dbfile)
self.writeto(u"f\u00f6\u00f6.txt", "foo.txt")
files = [fn for fn in listdir_unicode(unicode(basedir)) if fn.endswith(".txt")]
# is the backupdb available? If so, we test that a second backup does
# not create new directories.
hush = StringIO()
- have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
- hush)
+ bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
+ hush)
+ self.failUnless(bdb)
# create a small local directory with a couple of files
source = os.path.join(self.basedir, "home")
d = self.do_cli("create-alias", "tahoe")
- if not have_bdb:
- d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups"))
- def _should_complain((rc, out, err)):
- self.failUnless("I was unable to import a python sqlite library" in err, err)
- d.addCallback(_should_complain)
- d.addCallback(self.stall, 1.1) # make sure the backups get distinct timestamps
-
d.addCallback(lambda res: do_backup())
def _check0((rc, out, err)):
self.failUnlessReallyEqual(err, "")
# available
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
- if have_bdb:
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- # foo.txt, bar.txt, blah.txt
- self.failUnlessReallyEqual(fu, 0)
- self.failUnlessReallyEqual(fr, 3)
- self.failUnlessReallyEqual(fs, 0)
- # empty, home, home/parent, home/parent/subdir
- self.failUnlessReallyEqual(dc, 0)
- self.failUnlessReallyEqual(dr, 4)
- self.failUnlessReallyEqual(ds, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ # foo.txt, bar.txt, blah.txt
+ self.failUnlessReallyEqual(fu, 0)
+ self.failUnlessReallyEqual(fr, 3)
+ self.failUnlessReallyEqual(fs, 0)
+ # empty, home, home/parent, home/parent/subdir
+ self.failUnlessReallyEqual(dc, 0)
+ self.failUnlessReallyEqual(dr, 4)
+ self.failUnlessReallyEqual(ds, 0)
d.addCallback(_check4a)
- if have_bdb:
- # sneak into the backupdb, crank back the "last checked"
- # timestamp to force a check on all files
- def _reset_last_checked(res):
- dbfile = os.path.join(self.get_clientdir(),
- "private", "backupdb.sqlite")
- self.failUnless(os.path.exists(dbfile), dbfile)
- bdb = backupdb.get_backupdb(dbfile)
- bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
- bdb.cursor.execute("UPDATE directories SET last_checked=0")
- bdb.connection.commit()
-
- d.addCallback(_reset_last_checked)
-
- d.addCallback(self.stall, 1.1)
- d.addCallback(lambda res: do_backup(verbose=True))
- def _check4b((rc, out, err)):
- # we should check all files, and re-use all of them. None of
- # the directories should have been changed, so we should
- # re-use all of them too.
- self.failUnlessReallyEqual(err, "")
- self.failUnlessReallyEqual(rc, 0)
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- fchecked, dchecked = self.count_output2(out)
- self.failUnlessReallyEqual(fchecked, 3)
- self.failUnlessReallyEqual(fu, 0)
- self.failUnlessReallyEqual(fr, 3)
- self.failUnlessReallyEqual(fs, 0)
- self.failUnlessReallyEqual(dchecked, 4)
- self.failUnlessReallyEqual(dc, 0)
- self.failUnlessReallyEqual(dr, 4)
- self.failUnlessReallyEqual(ds, 0)
- d.addCallback(_check4b)
+ # sneak into the backupdb, crank back the "last checked"
+ # timestamp to force a check on all files
+ def _reset_last_checked(res):
+ dbfile = os.path.join(self.get_clientdir(),
+ "private", "backupdb.sqlite")
+ self.failUnless(os.path.exists(dbfile), dbfile)
+ bdb = backupdb.get_backupdb(dbfile)
+ bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
+ bdb.cursor.execute("UPDATE directories SET last_checked=0")
+ bdb.connection.commit()
+
+ d.addCallback(_reset_last_checked)
+
+ d.addCallback(self.stall, 1.1)
+ d.addCallback(lambda res: do_backup(verbose=True))
+ def _check4b((rc, out, err)):
+ # we should check all files, and re-use all of them. None of
+ # the directories should have been changed, so we should
+ # re-use all of them too.
+ self.failUnlessReallyEqual(err, "")
+ self.failUnlessReallyEqual(rc, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ fchecked, dchecked = self.count_output2(out)
+ self.failUnlessReallyEqual(fchecked, 3)
+ self.failUnlessReallyEqual(fu, 0)
+ self.failUnlessReallyEqual(fr, 3)
+ self.failUnlessReallyEqual(fs, 0)
+ self.failUnlessReallyEqual(dchecked, 4)
+ self.failUnlessReallyEqual(dc, 0)
+ self.failUnlessReallyEqual(dr, 4)
+ self.failUnlessReallyEqual(ds, 0)
+ d.addCallback(_check4b)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
def _check5((rc, out, err)):
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
- expected_new = 2
- if have_bdb:
- expected_new += 1
- self.failUnlessReallyEqual(len(self.new_archives), expected_new, out)
+ self.failUnlessReallyEqual(len(self.new_archives), 3, out)
# the original backup should still be the oldest (i.e. sorts
# alphabetically towards the beginning)
self.failUnlessReallyEqual(sorted(self.new_archives)[0],
# and upload the rest. None of the directories can be reused.
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
- if have_bdb:
- fu, fr, fs, dc, dr, ds = self.count_output(out)
- # new foo.txt, surprise file, subfile, empty
- self.failUnlessReallyEqual(fu, 4)
- # old bar.txt
- self.failUnlessReallyEqual(fr, 1)
- self.failUnlessReallyEqual(fs, 0)
- # home, parent, subdir, blah.txt, surprisedir
- self.failUnlessReallyEqual(dc, 5)
- self.failUnlessReallyEqual(dr, 0)
- self.failUnlessReallyEqual(ds, 0)
+ fu, fr, fs, dc, dr, ds = self.count_output(out)
+ # new foo.txt, surprise file, subfile, empty
+ self.failUnlessReallyEqual(fu, 4)
+ # old bar.txt
+ self.failUnlessReallyEqual(fr, 1)
+ self.failUnlessReallyEqual(fs, 0)
+ # home, parent, subdir, blah.txt, surprisedir
+ self.failUnlessReallyEqual(dc, 5)
+ self.failUnlessReallyEqual(dr, 0)
+ self.failUnlessReallyEqual(ds, 0)
d.addCallback(_check5a)
d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
def _check6((rc, out, err)):
self.failUnlessReallyEqual(err, "")
self.failUnlessReallyEqual(rc, 0)
self.new_archives = out.split()
- expected_new = 3
- if have_bdb:
- expected_new += 1
- self.failUnlessReallyEqual(len(self.new_archives), expected_new)
+ self.failUnlessReallyEqual(len(self.new_archives), 4)
self.failUnlessReallyEqual(sorted(self.new_archives)[0],
self.old_archives[0])
d.addCallback(_check6)