]> git.rkrishnan.org Git - tahoe-lafs/tahoe-lafs.git/commitdiff
Simplifications resulting from requiring Python 2.5 and therefore being able to use...
authordavid-sarah <david-sarah@jacaranda.org>
Wed, 16 May 2012 02:47:25 +0000 (02:47 +0000)
committerdavid-sarah <david-sarah@jacaranda.org>
Wed, 16 May 2012 02:47:25 +0000 (02:47 +0000)
src/allmydata/__init__.py
src/allmydata/_auto_deps.py
src/allmydata/scripts/backupdb.py
src/allmydata/test/test_backupdb.py
src/allmydata/test/test_cli.py
src/allmydata/test/test_version.py

index 6356bacda17da5b5ccfa1ae8906a15404962af43..083567b3ec061234586b2aada6e13498d4b57284 100644 (file)
@@ -195,15 +195,11 @@ def get_package_versions_and_locations():
                 trace_info = (etype, str(emsg), ([None] + traceback.extract_tb(etrace))[-1])
                 packages.append( (pkgname, (None, None, trace_info)) )
             else:
-                if 'sqlite' in pkgname:
-                    packages.append( (pkgname, (get_version(module, 'version'), package_dir(module.__file__),
-                                               'sqlite %s' % (get_version(module, 'sqlite_version'),))) )
-                else:
-                    comment = None
-                    if pkgname == 'setuptools' and hasattr(module, '_distribute'):
-                        # distribute does not report its version in any module variables
-                        comment = 'distribute'
-                    packages.append( (pkgname, (get_version(module, '__version__'), package_dir(module.__file__), comment)) )
+                comment = None
+                if pkgname == 'setuptools' and hasattr(module, '_distribute'):
+                    # distribute does not report its version in any module variables
+                    comment = 'distribute'
+                packages.append( (pkgname, (get_version(module, '__version__'), package_dir(module.__file__), comment)) )
         elif pkgname == 'python':
             packages.append( (pkgname, (platform.python_version(), sys.executable, None)) )
         elif pkgname == 'platform':
@@ -278,7 +274,7 @@ def cross_check(pkg_resources_vers_and_locs, imported_vers_and_locs_list):
     """This function returns a list of errors due to any failed cross-checks."""
 
     errors = []
-    not_pkg_resourceable = set(['sqlite3', 'python', 'platform', __appname__.lower()])
+    not_pkg_resourceable = set(['python', 'platform', __appname__.lower()])
     not_import_versionable = set(['zope.interface', 'mock', 'pyasn1'])
     ignorable = set(['argparse', 'pyutil', 'zbase32', 'distribute', 'twisted-web', 'twisted-core'])
 
index 5bb2c0c282a20f2445e66b6a2ff87e8b069fef9b..3b602e1a69e3cbe8a7d4bc89ece7796af1d5e2b2 100644 (file)
@@ -80,17 +80,6 @@ package_imports = [
 def require_more():
     import sys
 
-    # Sqlite comes built into Python >= 2.5, and is provided by the "pysqlite"
-    # distribution for Python 2.4.
-    try:
-        import sqlite3
-        sqlite3 # hush pyflakes
-        package_imports.append(('sqlite3', 'sqlite3'))
-    except ImportError:
-        # pysqlite v2.0.5 was shipped in Ubuntu 6.06 LTS "dapper" and Nexenta NCP 1.
-        install_requires.append("pysqlite >= 2.0.5")
-        package_imports.append(('pysqlite', 'pysqlite2.dbapi2'))
-
     # Don't try to get the version number of setuptools in frozen builds, because
     # that triggers 'site' processing that causes failures. Note that frozen
     # builds still (unfortunately) import pkg_resources in .tac files, so the
index 817bd0be68433559286115212a8daaa202a9d3e9..75ee0d9ce0c670c4cc5898b09ea98858a20b629c 100644 (file)
@@ -1,11 +1,6 @@
 
-# the backupdb is only available if sqlite3 is available. Python-2.5.x and
-# beyond include sqlite3 in the standard library. For python-2.4, the
-# "pysqlite2" "package" (or "module") (which, despite the confusing name, uses
-# sqlite3, and which, confusingly, comes in the "pysqlite" "distribution" (or
-# "package")) must be installed. On debian, install python-pysqlite2
-
 import os.path, sys, time, random, stat
+
 from allmydata.util.netstring import netstring
 from allmydata.util.hashutil import backupdb_dirhash
 from allmydata.util import base32
@@ -68,19 +63,12 @@ def get_backupdb(dbfile, stderr=sys.stderr,
                  create_version=(SCHEMA_v2, 2), just_create=False):
     # open or create the given backupdb file. The parent directory must
     # exist.
-    try:
-        import sqlite3
-        sqlite = sqlite3 # pyflakes whines about 'import sqlite3 as sqlite' ..
-    except ImportError:
-        from pysqlite2 import dbapi2
-        sqlite = dbapi2 # .. when this clause does it too
-        # This import should never fail, because setuptools requires that the
-        # "pysqlite" distribution is present at start time (if on Python < 2.5).
+    import sqlite3
 
     must_create = not os.path.exists(dbfile)
     try:
-        db = sqlite.connect(dbfile)
-    except (EnvironmentError, sqlite.OperationalError), e:
+        db = sqlite3.connect(dbfile)
+    except (EnvironmentError, sqlite3.OperationalError), e:
         print >>stderr, "Unable to create/open backupdb file %s: %s" % (dbfile, e)
         return None
 
@@ -94,7 +82,7 @@ def get_backupdb(dbfile, stderr=sys.stderr,
     try:
         c.execute("SELECT version FROM version")
         version = c.fetchone()[0]
-    except sqlite.DatabaseError, e:
+    except sqlite3.DatabaseError, e:
         # this indicates that the file is not a compatible database format.
         # Perhaps it was created with an old version, or it might be junk.
         print >>stderr, "backupdb file is unusable: %s" % e
@@ -108,7 +96,7 @@ def get_backupdb(dbfile, stderr=sys.stderr,
         db.commit()
         version = 2
     if version == 2:
-        return BackupDB_v2(sqlite, db)
+        return BackupDB_v2(sqlite3, db)
     print >>stderr, "Unable to handle backupdb version %s" % version
     return None
 
@@ -263,7 +251,7 @@ class BackupDB_v2:
             c.execute("INSERT INTO caps (filecap) VALUES (?)", (filecap,))
         except (self.sqlite_module.IntegrityError, self.sqlite_module.OperationalError):
             # sqlite3 on sid gives IntegrityError
-            # pysqlite2 on dapper gives OperationalError
+            # pysqlite2 (which we don't use, so maybe no longer relevant) on dapper gives OperationalError
             pass
         c.execute("SELECT fileid FROM caps WHERE filecap=?", (filecap,))
         foundrow = c.fetchone()
index 6cd4ffa2ee1d91a58086d93ceb763503fef313b8..835e2531563fe00e32d0dbce16588463a9af3eef 100644 (file)
@@ -9,12 +9,10 @@ from allmydata.util.assertutil import precondition
 from allmydata.scripts import backupdb
 
 class BackupDB(unittest.TestCase):
-    def create_or_skip(self, dbfile):
+    def create(self, dbfile):
         stderr = StringIO()
         bdb = backupdb.get_backupdb(dbfile, stderr=stderr)
-        if not bdb:
-            if "I was unable to import a python sqlite library" in stderr.getvalue():
-                raise unittest.SkipTest("sqlite unavailable, skipping test")
+        self.failUnless(bdb, "unable to create backupdb from %r" % (dbfile,))
         return bdb
 
     def skip_if_cannot_represent_filename(self, u):
@@ -31,8 +29,7 @@ class BackupDB(unittest.TestCase):
         self.basedir = basedir = os.path.join("backupdb", "create")
         fileutil.make_dirs(basedir)
         dbfile = os.path.join(basedir, "dbfile")
-        bdb = self.create_or_skip(dbfile)
-        self.failUnless(bdb)
+        bdb = self.create(dbfile)
         self.failUnlessEqual(bdb.VERSION, 2)
 
     def test_upgrade_v1_v2(self):
@@ -43,13 +40,9 @@ class BackupDB(unittest.TestCase):
         created = backupdb.get_backupdb(dbfile, stderr=stderr,
                                         create_version=(backupdb.SCHEMA_v1, 1),
                                         just_create=True)
-        if not created:
-            if "I was unable to import a python sqlite library" in stderr.getvalue():
-                raise unittest.SkipTest("sqlite unavailable, skipping test")
-            self.fail("unable to create v1 backupdb")
+        self.failUnless(created, "unable to create v1 backupdb")
         # now we should have a v1 database on disk
-        bdb = self.create_or_skip(dbfile)
-        self.failUnless(bdb)
+        bdb = self.create(dbfile)
         self.failUnlessEqual(bdb.VERSION, 2)
 
     def test_fail(self):
@@ -65,12 +58,8 @@ class BackupDB(unittest.TestCase):
                                     stderr_f)
         self.failUnlessEqual(bdb, None)
         stderr = stderr_f.getvalue()
-        if "I was unable to import a python sqlite library" in stderr:
-            pass
-        else:
-            self.failUnless("backupdb file is unusable" in stderr, stderr)
-            self.failUnless("file is encrypted or is not a database" in stderr,
-                            stderr)
+        self.failUnlessIn("backupdb file is unusable", stderr)
+        self.failUnlessIn("file is encrypted or is not a database", stderr)
 
         # put a directory in the way, to exercise a different error path
         where = os.path.join(basedir, "roadblock-dir")
@@ -79,12 +68,8 @@ class BackupDB(unittest.TestCase):
         bdb = backupdb.get_backupdb(where, stderr_f)
         self.failUnlessEqual(bdb, None)
         stderr = stderr_f.getvalue()
-        if "I was unable to import a python sqlite library" in stderr:
-            pass
-        else:
-            self.failUnless(("Unable to create/open backupdb file %s" % where)
-                            in stderr, stderr)
-            self.failUnless("unable to open database file" in stderr, stderr)
+        self.failUnlessIn("Unable to create/open backupdb file %s" % (where,), stderr)
+        self.failUnlessIn("unable to open database file", stderr)
 
 
     def writeto(self, filename, data):
@@ -98,8 +83,7 @@ class BackupDB(unittest.TestCase):
         self.basedir = basedir = os.path.join("backupdb", "check")
         fileutil.make_dirs(basedir)
         dbfile = os.path.join(basedir, "dbfile")
-        bdb = self.create_or_skip(dbfile)
-        self.failUnless(bdb)
+        bdb = self.create(dbfile)
 
         foo_fn = self.writeto("foo.txt", "foo.txt")
         blah_fn = self.writeto("bar/blah.txt", "blah.txt")
@@ -164,7 +148,7 @@ class BackupDB(unittest.TestCase):
         fileutil.make_dirs(basedir)
 
         where = os.path.join(basedir, "tooold.db")
-        bdb = self.create_or_skip(where)
+        bdb = self.create(where)
         # reach into the DB and make it old
         bdb.cursor.execute("UPDATE version SET version=0")
         bdb.connection.commit()
@@ -182,8 +166,7 @@ class BackupDB(unittest.TestCase):
         self.basedir = basedir = os.path.join("backupdb", "directory")
         fileutil.make_dirs(basedir)
         dbfile = os.path.join(basedir, "dbfile")
-        bdb = self.create_or_skip(dbfile)
-        self.failUnless(bdb)
+        bdb = self.create(dbfile)
 
         contents = {u"file1": "URI:CHK:blah1",
                     u"file2": "URI:CHK:blah2",
@@ -245,8 +228,7 @@ class BackupDB(unittest.TestCase):
         self.basedir = basedir = os.path.join("backupdb", "unicode")
         fileutil.make_dirs(basedir)
         dbfile = os.path.join(basedir, "dbfile")
-        bdb = self.create_or_skip(dbfile)
-        self.failUnless(bdb)
+        bdb = self.create(dbfile)
 
         self.writeto(u"f\u00f6\u00f6.txt", "foo.txt")
         files = [fn for fn in listdir_unicode(unicode(basedir)) if fn.endswith(".txt")]
index 485414b8cb192756889e4c0a6d2daaa5fea7ab74..5aa58786fba4331cb02ac3805568b1164ef7eb53 100644 (file)
@@ -2461,8 +2461,9 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
         # is the backupdb available? If so, we test that a second backup does
         # not create new directories.
         hush = StringIO()
-        have_bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
-                                         hush)
+        bdb = backupdb.get_backupdb(os.path.join(self.basedir, "dbtest"),
+                                    hush)
+        self.failUnless(bdb)
 
         # create a small local directory with a couple of files
         source = os.path.join(self.basedir, "home")
@@ -2481,13 +2482,6 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
 
         d = self.do_cli("create-alias", "tahoe")
 
-        if not have_bdb:
-            d.addCallback(lambda res: self.do_cli("backup", source, "tahoe:backups"))
-            def _should_complain((rc, out, err)):
-                self.failUnless("I was unable to import a python sqlite library" in err, err)
-            d.addCallback(_should_complain)
-            d.addCallback(self.stall, 1.1) # make sure the backups get distinct timestamps
-
         d.addCallback(lambda res: do_backup())
         def _check0((rc, out, err)):
             self.failUnlessReallyEqual(err, "")
@@ -2548,61 +2542,56 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
             # available
             self.failUnlessReallyEqual(err, "")
             self.failUnlessReallyEqual(rc, 0)
-            if have_bdb:
-                fu, fr, fs, dc, dr, ds = self.count_output(out)
-                # foo.txt, bar.txt, blah.txt
-                self.failUnlessReallyEqual(fu, 0)
-                self.failUnlessReallyEqual(fr, 3)
-                self.failUnlessReallyEqual(fs, 0)
-                # empty, home, home/parent, home/parent/subdir
-                self.failUnlessReallyEqual(dc, 0)
-                self.failUnlessReallyEqual(dr, 4)
-                self.failUnlessReallyEqual(ds, 0)
+            fu, fr, fs, dc, dr, ds = self.count_output(out)
+            # foo.txt, bar.txt, blah.txt
+            self.failUnlessReallyEqual(fu, 0)
+            self.failUnlessReallyEqual(fr, 3)
+            self.failUnlessReallyEqual(fs, 0)
+            # empty, home, home/parent, home/parent/subdir
+            self.failUnlessReallyEqual(dc, 0)
+            self.failUnlessReallyEqual(dr, 4)
+            self.failUnlessReallyEqual(ds, 0)
         d.addCallback(_check4a)
 
-        if have_bdb:
-            # sneak into the backupdb, crank back the "last checked"
-            # timestamp to force a check on all files
-            def _reset_last_checked(res):
-                dbfile = os.path.join(self.get_clientdir(),
-                                      "private", "backupdb.sqlite")
-                self.failUnless(os.path.exists(dbfile), dbfile)
-                bdb = backupdb.get_backupdb(dbfile)
-                bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
-                bdb.cursor.execute("UPDATE directories SET last_checked=0")
-                bdb.connection.commit()
-
-            d.addCallback(_reset_last_checked)
-
-            d.addCallback(self.stall, 1.1)
-            d.addCallback(lambda res: do_backup(verbose=True))
-            def _check4b((rc, out, err)):
-                # we should check all files, and re-use all of them. None of
-                # the directories should have been changed, so we should
-                # re-use all of them too.
-                self.failUnlessReallyEqual(err, "")
-                self.failUnlessReallyEqual(rc, 0)
-                fu, fr, fs, dc, dr, ds = self.count_output(out)
-                fchecked, dchecked = self.count_output2(out)
-                self.failUnlessReallyEqual(fchecked, 3)
-                self.failUnlessReallyEqual(fu, 0)
-                self.failUnlessReallyEqual(fr, 3)
-                self.failUnlessReallyEqual(fs, 0)
-                self.failUnlessReallyEqual(dchecked, 4)
-                self.failUnlessReallyEqual(dc, 0)
-                self.failUnlessReallyEqual(dr, 4)
-                self.failUnlessReallyEqual(ds, 0)
-            d.addCallback(_check4b)
+        # sneak into the backupdb, crank back the "last checked"
+        # timestamp to force a check on all files
+        def _reset_last_checked(res):
+            dbfile = os.path.join(self.get_clientdir(),
+                                  "private", "backupdb.sqlite")
+            self.failUnless(os.path.exists(dbfile), dbfile)
+            bdb = backupdb.get_backupdb(dbfile)
+            bdb.cursor.execute("UPDATE last_upload SET last_checked=0")
+            bdb.cursor.execute("UPDATE directories SET last_checked=0")
+            bdb.connection.commit()
+
+        d.addCallback(_reset_last_checked)
+
+        d.addCallback(self.stall, 1.1)
+        d.addCallback(lambda res: do_backup(verbose=True))
+        def _check4b((rc, out, err)):
+            # we should check all files, and re-use all of them. None of
+            # the directories should have been changed, so we should
+            # re-use all of them too.
+            self.failUnlessReallyEqual(err, "")
+            self.failUnlessReallyEqual(rc, 0)
+            fu, fr, fs, dc, dr, ds = self.count_output(out)
+            fchecked, dchecked = self.count_output2(out)
+            self.failUnlessReallyEqual(fchecked, 3)
+            self.failUnlessReallyEqual(fu, 0)
+            self.failUnlessReallyEqual(fr, 3)
+            self.failUnlessReallyEqual(fs, 0)
+            self.failUnlessReallyEqual(dchecked, 4)
+            self.failUnlessReallyEqual(dc, 0)
+            self.failUnlessReallyEqual(dr, 4)
+            self.failUnlessReallyEqual(ds, 0)
+        d.addCallback(_check4b)
 
         d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
         def _check5((rc, out, err)):
             self.failUnlessReallyEqual(err, "")
             self.failUnlessReallyEqual(rc, 0)
             self.new_archives = out.split()
-            expected_new = 2
-            if have_bdb:
-                expected_new += 1
-            self.failUnlessReallyEqual(len(self.new_archives), expected_new, out)
+            self.failUnlessReallyEqual(len(self.new_archives), 3, out)
             # the original backup should still be the oldest (i.e. sorts
             # alphabetically towards the beginning)
             self.failUnlessReallyEqual(sorted(self.new_archives)[0],
@@ -2627,27 +2616,23 @@ class Backup(GridTestMixin, CLITestMixin, StallMixin, unittest.TestCase):
             # and upload the rest. None of the directories can be reused.
             self.failUnlessReallyEqual(err, "")
             self.failUnlessReallyEqual(rc, 0)
-            if have_bdb:
-                fu, fr, fs, dc, dr, ds = self.count_output(out)
-                # new foo.txt, surprise file, subfile, empty
-                self.failUnlessReallyEqual(fu, 4)
-                # old bar.txt
-                self.failUnlessReallyEqual(fr, 1)
-                self.failUnlessReallyEqual(fs, 0)
-                # home, parent, subdir, blah.txt, surprisedir
-                self.failUnlessReallyEqual(dc, 5)
-                self.failUnlessReallyEqual(dr, 0)
-                self.failUnlessReallyEqual(ds, 0)
+            fu, fr, fs, dc, dr, ds = self.count_output(out)
+            # new foo.txt, surprise file, subfile, empty
+            self.failUnlessReallyEqual(fu, 4)
+            # old bar.txt
+            self.failUnlessReallyEqual(fr, 1)
+            self.failUnlessReallyEqual(fs, 0)
+            # home, parent, subdir, blah.txt, surprisedir
+            self.failUnlessReallyEqual(dc, 5)
+            self.failUnlessReallyEqual(dr, 0)
+            self.failUnlessReallyEqual(ds, 0)
         d.addCallback(_check5a)
         d.addCallback(lambda res: self.do_cli("ls", "tahoe:backups/Archives"))
         def _check6((rc, out, err)):
             self.failUnlessReallyEqual(err, "")
             self.failUnlessReallyEqual(rc, 0)
             self.new_archives = out.split()
-            expected_new = 3
-            if have_bdb:
-                expected_new += 1
-            self.failUnlessReallyEqual(len(self.new_archives), expected_new)
+            self.failUnlessReallyEqual(len(self.new_archives), 4)
             self.failUnlessReallyEqual(sorted(self.new_archives)[0],
                                  self.old_archives[0])
         d.addCallback(_check6)
index 836e5965e5f97ed0dc35b53f91b744cca29d1cbf..296db0624c2f34f9adc666c716bad66747f68088 100644 (file)
@@ -58,7 +58,7 @@ class CheckRequirement(unittest.TestCase):
         res = cross_check({}, [])
         self.failUnlessEqual(res, [])
 
-        res = cross_check({}, [("sqlite3", ("1.0", "", "blah"))])
+        res = cross_check({}, [("allmydata-tahoe", ("1.0", "", "blah"))])
         self.failUnlessEqual(res, [])
 
         res = cross_check({"foo": ("unparseable", "")}, [])