示例#1
0
    def test06_Transactions(self):
        d = self.d
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test06_Transactions..." % self.__class__.__name__

        assert d.get('new rec', txn=self.txn) == None
        d.put('new rec', 'this is a new record', self.txn)
        assert d.get('new rec', txn=self.txn) == 'this is a new record'
        self.txn.abort()
        assert d.get('new rec') == None

        self.txn = self.env.txn_begin()

        assert d.get('new rec', txn=self.txn) == None
        d.put('new rec', 'this is a new record', self.txn)
        assert d.get('new rec', txn=self.txn) == 'this is a new record'
        self.txn.commit()
        assert d.get('new rec') == 'this is a new record'

        self.txn = self.env.txn_begin()
        c = d.cursor(self.txn)
        rec = c.first()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            rec = c.next()
        assert count == self._numKeys + 1

        c.close()  # Cursors *MUST* be closed before commit!
        self.txn.commit()

        # flush pending updates
        try:
            self.env.txn_checkpoint(0, 0, 0)
        except db.DBIncompleteError:
            pass

        if db.version() >= (4, 0):
            statDict = self.env.log_stat(0)
            assert statDict.has_key('magic')
            assert statDict.has_key('version')
            assert statDict.has_key('cur_file')
            assert statDict.has_key('region_nowait')

        # must have at least one log file present:
        logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
        assert logs != None
        for log in logs:
            if verbose:
                print 'log file: ' + log
        if db.version() >= (4, 2):
            logs = self.env.log_archive(db.DB_ARCH_REMOVE)
            assert not logs

        self.txn = self.env.txn_begin()
示例#2
0
    def test06_Transactions(self):
        d = self.d
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test06_Transactions..." % self.__class__.__name__

        assert d.get('new rec', txn=self.txn) == None
        d.put('new rec', 'this is a new record', self.txn)
        assert d.get('new rec', txn=self.txn) == 'this is a new record'
        self.txn.abort()
        assert d.get('new rec') == None

        self.txn = self.env.txn_begin()

        assert d.get('new rec', txn=self.txn) == None
        d.put('new rec', 'this is a new record', self.txn)
        assert d.get('new rec', txn=self.txn) == 'this is a new record'
        self.txn.commit()
        assert d.get('new rec') == 'this is a new record'

        self.txn = self.env.txn_begin()
        c = d.cursor(self.txn)
        rec = c.first()
        count = 0
        while rec is not None:
            count = count + 1
            if verbose and count % 100 == 0:
                print rec
            rec = c.next()
        assert count == self._numKeys+1

        c.close()                # Cursors *MUST* be closed before commit!
        self.txn.commit()

        # flush pending updates
        try:
            self.env.txn_checkpoint (0, 0, 0)
        except db.DBIncompleteError:
            pass

        if db.version() >= (4,0):
            statDict = self.env.log_stat(0);
            assert statDict.has_key('magic')
            assert statDict.has_key('version')
            assert statDict.has_key('cur_file')
            assert statDict.has_key('region_nowait')

        # must have at least one log file present:
        logs = self.env.log_archive(db.DB_ARCH_ABS | db.DB_ARCH_LOG)
        assert logs != None
        for log in logs:
            if verbose:
                print 'log file: ' + log
        if db.version() >= (4,2):
            logs = self.env.log_archive(db.DB_ARCH_REMOVE)
            assert not logs

        self.txn = self.env.txn_begin()
示例#3
0
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(),)
    if db.version() >= (5, 0):
        print 'bsddb.db.full_version(): %s' % repr(db.full_version())
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    suffix = '__'
    print 'py module:            %s' % getattr(bsddb, '__file' + suffix)
    print 'extension module:     %s' % getattr(bsddb, '__file' + suffix)
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#4
0
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    if db.version() >= (5, 0):
        print 'bsddb.db.full_version(): %s' % repr(db.full_version())
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    suffix = '__'
    print 'py module:            %s' % getattr(bsddb, '__file' + suffix)
    print 'extension module:     %s' % getattr(bsddb, '__file' + suffix)
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#5
0
    def get_dbdir_summary(self, dirpath, name):
        """
        Returns (people_count, bsddb_version, schema_version) of
        current DB.
        Returns ("Unknown", "Unknown", "Unknown") if invalid DB or other error.
        """
        if config.get('preferences.use-bsddb3') or sys.version_info[0] >= 3:
            from bsddb3 import dbshelve, db
        else:
            from bsddb import dbshelve, db

        from gramps.gen.db import META, PERSON_TBL
        from  gramps.gen.db.dbconst import BDBVERSFN

        bdbversion_file = os.path.join(dirpath, BDBVERSFN)
        if os.path.isfile(bdbversion_file):
            vers_file = open(bdbversion_file)
            bsddb_version = vers_file.readline().strip()
        else:
            return "Unknown", "Unknown", "Unknown"
        
        current_bsddb_version = str(db.version())
        if bsddb_version != current_bsddb_version:
            return "Unknown", bsddb_version, "Unknown"
        
        env = db.DBEnv()
        flags = db.DB_CREATE | db.DB_PRIVATE |\
            db.DB_INIT_MPOOL |\
            db.DB_INIT_LOG | db.DB_INIT_TXN
        try:
            env.open(dirpath, flags)
        except Exception as msg:
            LOG.warning("Error opening db environment for '%s': %s" %
                        (name, str(msg)))
            try:
                env.close()
            except Exception as msg:
                LOG.warning("Error closing db environment for '%s': %s" %
                        (name, str(msg)))
            return "Unknown", bsddb_version, "Unknown"
        dbmap1 = dbshelve.DBShelf(env)
        fname = os.path.join(dirpath, META + ".db")
        try:
            dbmap1.open(fname, META, db.DB_HASH, db.DB_RDONLY)
        except:
            env.close()
            return "Unknown", bsddb_version, "Unknown"
        schema_version = dbmap1.get(b'version', default=None)
        dbmap1.close()
        dbmap2 = dbshelve.DBShelf(env)
        fname = os.path.join(dirpath, PERSON_TBL + ".db")
        try:
            dbmap2.open(fname, PERSON_TBL, db.DB_HASH, db.DB_RDONLY)
        except:
            env.close()
            return "Unknown", bsddb_version, schema_version
        count = len(dbmap2)
        dbmap2.close()
        env.close()
        return (count, bsddb_version, schema_version)
示例#6
0
文件: bdb.py 项目: juve/mule
	def __init__(self, path, name, duplicates=False):
		self.path = path
		self.dbpath = os.path.join(self.path, name)
		
		if not os.path.isdir(self.path):
			os.makedirs(self.path)
		
		self.env = bdb.DBEnv()
		self.env.set_tx_max(self.max_txns)
		self.env.set_lk_max_lockers(self.max_txns*2)
		self.env.set_lk_max_locks(self.max_txns*2)
		self.env.set_lk_max_objects(self.max_txns*2)
		self.env.set_lk_detect(bdb.DB_LOCK_DEFAULT)
		self.env.set_flags(bdb.DB_TXN_NOSYNC, True)
		if hasattr(self.env, "log_set_config"):
			self.env.log_set_config(bdb.DB_LOG_AUTO_REMOVE, True)
		self.env.open(self.path, bdb.DB_CREATE | bdb.DB_INIT_LOCK | 
				bdb.DB_INIT_LOG | bdb.DB_INIT_MPOOL | bdb.DB_INIT_TXN | 
				bdb.DB_RECOVER | bdb.DB_THREAD)
		
		self.db = bdb.DB(self.env)
		if duplicates:
			self.db.set_flags(bdb.DB_DUPSORT)
		if bdb.version() > (4,1):
			txn = self.env.txn_begin()
			self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD,
				dbtype=bdb.DB_BTREE, txn=txn)
			txn.commit()
		else:
			self.db.open(self.dbpath, name, flags=bdb.DB_CREATE|bdb.DB_THREAD,
				dbtype=bdb.DB_BTREE)
			
		self.thread = DatabaseManagerThread(self.env)
		self.thread.start()
    def test13_associate_in_transaction(self):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test13_associateAutoCommit..." % \
                  self.__class__.__name__

        txn = self.env.txn_begin()
        try:
            self.createDB(txn=txn)

            self.secDB = db.DB(self.env)
            self.secDB.set_flags(db.DB_DUP)
            self.secDB.set_get_returns_none(2)
            self.secDB.open(self.filename, "secondary", db.DB_BTREE,
                       db.DB_CREATE | db.DB_THREAD, txn=txn)
            if db.version() >= (4,1):
                self.getDB().associate(self.secDB, self.getGenre, txn=txn)
            else:
                self.getDB().associate(self.secDB, self.getGenre)

            self.addDataToDB(self.getDB(), txn=txn)
        except:
            txn.abort()
            raise

        self.txn_finish_test(self.secDB, txn=txn)
示例#8
0
    def test13_associate_in_transaction(self):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test13_associateAutoCommit..." % \
                  self.__class__.__name__

        txn = self.env.txn_begin()
        try:
            self.createDB(txn=txn)

            self.secDB = db.DB(self.env)
            self.secDB.set_flags(db.DB_DUP)
            self.secDB.set_get_returns_none(2)
            self.secDB.open(self.filename,
                            "secondary",
                            db.DB_BTREE,
                            db.DB_CREATE | db.DB_THREAD,
                            txn=txn)
            if db.version() >= (4, 1):
                self.getDB().associate(self.secDB, self.getGenre, txn=txn)
            else:
                self.getDB().associate(self.secDB, self.getGenre)

            self.addDataToDB(self.getDB(), txn=txn)
        except:
            txn.abort()
            raise

        self.txn_finish_test(self.secDB, txn=txn)
示例#9
0
class BasicWithEnvTestCase(BasicTestCase):
    dbopenflags = db.DB_THREAD
    useEnv = 1
    envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK

    #----------------------------------------

    def test07_EnvRemoveAndRename(self):
        if not self.env:
            return

        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test07_EnvRemoveAndRename..." % self.__class__.__name__

        # can't rename or remove an open DB
        self.d.close()

        newname = self.filename + '.renamed'
        self.env.dbrename(self.filename, None, newname)
        self.env.dbremove(newname)

    # dbremove and dbrename are in 4.1 and later
    if db.version() < (4, 1):
        del test07_EnvRemoveAndRename
示例#10
0
 def test_DB_set_flags_persists(self):
     if db.version() < (4,2):
         # The get_flags API required for this to work is only available
         # in BerkeleyDB >= 4.2
         return
     try:
         db1 = db.DB()
         db1.set_flags(db.DB_DUPSORT)
         db1.open(self.filename, db.DB_HASH, db.DB_CREATE)
         db1['a'] = 'eh'
         db1['a'] = 'A'
         self.assertEqual([('a', 'A')], db1.items())
         db1.put('a', 'Aa')
         self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
         db1.close()
         db1 = db.DB()
         # no set_flags call, we're testing that it reads and obeys
         # the flags on open.
         db1.open(self.filename, db.DB_HASH)
         self.assertEqual([('a', 'A'), ('a', 'Aa')], db1.items())
         # if it read the flags right this will replace all values
         # for key 'a' instead of adding a new one.  (as a dict should)
         db1['a'] = 'new A'
         self.assertEqual([('a', 'new A')], db1.items())
     finally:
         db1.close()
         os.unlink(self.filename)
示例#11
0
def test_suite():
    res = unittest.TestSuite()

    res.addTest(unittest.makeSuite(ComparatorTests))
    if db.version() >= (3, 3, 11):
        res.addTest(unittest.makeSuite(BtreeExceptionsTestCase))
        res.addTest(unittest.makeSuite(BtreeKeyCompareTestCase))
    return res
示例#12
0
def test_suite ():
    res = unittest.TestSuite ()

    res.addTest (unittest.makeSuite (ComparatorTests))
    if db.version () >= (3, 3, 11):
        res.addTest (unittest.makeSuite (BtreeExceptionsTestCase))
        res.addTest (unittest.makeSuite (BtreeKeyCompareTestCase))
    return res
示例#13
0
 def test00_version(self):
     info = db.version()
     if verbose:
         print "\n", "-=" * 20
         print "bsddb.db.version(): %s" % (info,)
         print db.DB_VERSION_STRING
         print "-=" * 20
     assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR, db.DB_VERSION_PATCH)
示例#14
0
def print_versions():
    print()
    print('-=' * 38)
    print(db.DB_VERSION_STRING)
    print('bsddb.db.version():   %s' % (db.version(), ))
    if db.version() >= (5, 0) :
        print('bsddb.db.full_version(): %s' %repr(db.full_version()))
    print('bsddb.db.__version__: %s' % db.__version__)
    print('bsddb.db.cvsid:       %s' % db.cvsid)

    # Workaround for allowing generating an EGGs as a ZIP files.
    suffix="__"
    print('py module:            %s' % getattr(bsddb, "__file"+suffix))
    print('extension module:     %s' % getattr(bsddb, "__file"+suffix))

    print('python version:       %s' % sys.version)
    print('My pid:               %s' % os.getpid())
    print('-=' * 38)
示例#15
0
    def get_dbdir_summary(self, dirpath, name):
        """
        Returns (people_count, bsddb_version, schema_version) of
        current DB.
        Returns ("Unknown", "Unknown", "Unknown") if invalid DB or other error.
        """
        from bsddb3 import dbshelve, db

        from gramps.gen.db import META, PERSON_TBL
        from  gramps.gen.db.dbconst import BDBVERSFN

        bdbversion_file = os.path.join(dirpath, BDBVERSFN)
        if os.path.isfile(bdbversion_file):
            vers_file = open(bdbversion_file)
            bsddb_version = vers_file.readline().strip()
        else:
            return "Unknown", "Unknown", "Unknown"
        
        current_bsddb_version = str(db.version())
        if bsddb_version != current_bsddb_version:
            return "Unknown", bsddb_version, "Unknown"
        
        env = db.DBEnv()
        flags = db.DB_CREATE | db.DB_PRIVATE |\
            db.DB_INIT_MPOOL |\
            db.DB_INIT_LOG | db.DB_INIT_TXN
        try:
            env.open(dirpath, flags)
        except Exception as msg:
            LOG.warning("Error opening db environment for '%s': %s" %
                        (name, str(msg)))
            try:
                env.close()
            except Exception as msg:
                LOG.warning("Error closing db environment for '%s': %s" %
                        (name, str(msg)))
            return "Unknown", bsddb_version, "Unknown"
        dbmap1 = dbshelve.DBShelf(env)
        fname = os.path.join(dirpath, META + ".db")
        try:
            dbmap1.open(fname, META, db.DB_HASH, db.DB_RDONLY)
        except:
            env.close()
            return "Unknown", bsddb_version, "Unknown"
        schema_version = dbmap1.get(b'version', default=None)
        dbmap1.close()
        dbmap2 = dbshelve.DBShelf(env)
        fname = os.path.join(dirpath, PERSON_TBL + ".db")
        try:
            dbmap2.open(fname, PERSON_TBL, db.DB_HASH, db.DB_RDONLY)
        except:
            env.close()
            return "Unknown", bsddb_version, schema_version
        count = len(dbmap2)
        dbmap2.close()
        env.close()
        return (count, bsddb_version, schema_version)
示例#16
0
 def test00_version(self):
     info = db.version()
     if verbose:
         print '\n', '-=' * 20
         print 'bsddb.db.version(): %s' % (info, )
         print db.DB_VERSION_STRING
         print '-=' * 20
     assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
                     db.DB_VERSION_PATCH)
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    if db.version() >= (5, 0):
        print 'bsddb.db.full_version(): %s' % repr(db.full_version())
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid

    # Workaround for allowing generating an EGGs as a ZIP files.
    suffix = "__"
    print 'py module:            %s' % getattr(bsddb, "__file" + suffix)
    print 'extension module:     %s' % getattr(bsddb, "__file" + suffix)

    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#18
0
 def test00_version(self):
     info = db.version()
     if verbose:
         print '\n', '-=' * 20
         print 'bsddb.db.version(): %s' % (info, )
         print db.DB_VERSION_STRING
         print '-=' * 20
     assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
                     db.DB_VERSION_PATCH)
示例#19
0
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#20
0
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#21
0
def print_versions():
    print
    print "-=" * 38
    print db.DB_VERSION_STRING
    print "bsddb.db.version():   %s" % (db.version(),)
    print "bsddb.db.__version__: %s" % db.__version__
    print "bsddb.db.cvsid:       %s" % db.cvsid
    print "python version:       %s" % sys.version
    print "My pid:               %s" % os.getpid()
    print "-=" * 38
示例#22
0
 def createDB(self, txn=None):
     self.cur = None
     self.secDB = None
     self.primary = db.DB(self.env)
     self.primary.set_get_returns_none(2)
     if db.version() >= (4, 1):
         self.primary.open(self.filename, "primary", self.dbtype,
                       db.DB_CREATE | db.DB_THREAD | self.dbFlags, txn=txn)
     else:
         self.primary.open(self.filename, "primary", self.dbtype,
                       db.DB_CREATE | db.DB_THREAD | self.dbFlags)
示例#23
0
def print_versions():
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    if db.version() >= (5, 0) :
        print 'bsddb.db.full_version(): %s' %repr(db.full_version())
    print 'bsddb.db.__version__: %s' % db.__version__

    # Workaround for allowing generating an EGGs as a ZIP files.
    suffix="__"
    print 'py module:            %s' % getattr(bsddb, "__file"+suffix)
    print 'extension module:     %s' % getattr(bsddb, "__file"+suffix)

    print 'Test working dir:     %s' % get_test_path_prefix()
    import platform
    print 'python version:       %s %s' % \
            (sys.version.replace("\r", "").replace("\n", ""), \
            platform.architecture()[0])
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#24
0
def print_versions():
    print()
    print('-=' * 38)
    print(db.DB_VERSION_STRING)
    print('bsddb.db.version():   %s' % (db.version(), ))
    print('bsddb.db.__version__: %s' % db.__version__)
    print('bsddb.db.cvsid:       %s' % db.cvsid)
    print('py module:            %s' % bsddb.__file__)
    print('extension module:     %s' % bsddb._bsddb.__file__)
    print('python version:       %s' % sys.version)
    print('My pid:               %s' % os.getpid())
    print('-=' * 38)
示例#25
0
def print_versions():
    print()
    print('-=' * 38)
    print(db.DB_VERSION_STRING)
    print('bsddb.db.version():   %s' % (db.version(), ))
    if db.version() >= (5, 0) :
        print('bsddb.db.full_version(): %s' %repr(db.full_version()))
    print('bsddb.db.__version__: %s' % db.__version__)

    # Workaround for allowing generating an EGGs as a ZIP files.
    suffix="__"
    print('py module:            %s' % getattr(bsddb, "__file"+suffix))
    print('extension module:     %s' % getattr(bsddb, "__file"+suffix))

    print('Test working dir:     %s' % get_test_path_prefix())
    import platform
    print('python version:       %s %s' % \
            (sys.version.replace("\r", "").replace("\n", ""), \
            platform.architecture()[0]))
    print('My pid:               %s' % os.getpid())
    print('-=' * 38)
示例#26
0
def print_versions():
    print()
    print('-=' * 38)
    print(db.DB_VERSION_STRING)
    print('bsddb.db.version():   %s' % (db.version(), ))
    print('bsddb.db.__version__: %s' % db.__version__)
    print('bsddb.db.cvsid:       %s' % db.cvsid)
    print('py module:            %s' % bsddb.__file__)
    print('extension module:     %s' % bsddb._bsddb.__file__)
    print('python version:       %s' % sys.version)
    print('My pid:               %s' % os.getpid())
    print('-=' * 38)
示例#27
0
    def __init__(self):
        if db.version() < (4,1):
            return
        self.env = db.DBEnv()
        self.env.open(env_name,
                      db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL)
        self.the_txn = self.env.txn_begin()

        self.map = db.DB(self.env)
        self.map.open('xxx.db', "p",
                      db.DB_HASH, db.DB_CREATE, 0666, txn=self.the_txn)
        del self.env
        del self.the_txn
示例#28
0
def test_suite():
    suite = unittest.TestSuite()

    if db.version() >= (3, 3, 11):
        suite.addTest(unittest.makeSuite(AssociateErrorTestCase))

        suite.addTest(unittest.makeSuite(AssociateHashTestCase))
        suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
        suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))

        if db.version() >= (4, 1):
            suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))

        suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
        suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
        suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))

        if have_threads:
            suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
            suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
            suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))

    return suite
示例#29
0
 def createDB(self, txn=None):
     self.cur = None
     self.secDB = None
     self.primary = db.DB(self.env)
     self.primary.set_get_returns_none(2)
     if db.version() >= (4, 1):
         self.primary.open(self.filename,
                           "primary",
                           self.dbtype,
                           db.DB_CREATE | db.DB_THREAD | self.dbFlags,
                           txn=txn)
     else:
         self.primary.open(self.filename, "primary", self.dbtype,
                           db.DB_CREATE | db.DB_THREAD | self.dbFlags)
示例#30
0
def test_suite():
    suite = unittest.TestSuite()

    if db.version() >= (3, 3, 11):
        suite.addTest(unittest.makeSuite(AssociateErrorTestCase))

        suite.addTest(unittest.makeSuite(AssociateHashTestCase))
        suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
        suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))

        if db.version() >= (4, 1):
            suite.addTest(unittest.makeSuite(AssociateBTreeTxnTestCase))

        suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
        suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
        suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))

        if have_threads:
            suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
            suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
            suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))

    return suite
示例#31
0
    def test06_Truncate(self):
        if db.version() < (3, 3):
            # truncate is a feature of BerkeleyDB 3.3 and above
            return

        d = self.d
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test99_Truncate..." % self.__class__.__name__

        d.put("abcde", "ABCDE")
        num = d.truncate()
        assert num >= 1, "truncate returned <= 0 on non-empty database"
        num = d.truncate()
        assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num, )
示例#32
0
    def test06_Truncate(self):
        if db.version() < (3,3):
            # truncate is a feature of BerkeleyDB 3.3 and above
            return

        d = self.d
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test99_Truncate..." % self.__class__.__name__

        d.put("abcde", "ABCDE");
        num = d.truncate()
        assert num >= 1, "truncate returned <= 0 on non-empty database"
        num = d.truncate()
        assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
示例#33
0
def print_versions():
    try:
        # For Pythons w/distutils pybsddb
        from bsddb3 import db
    except ImportError:
        # For Python 2.3
        from bsddb import db
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#34
0
def print_versions():
    print
    print "-=" * 38
    print db.DB_VERSION_STRING
    print "bsddb.db.version():   %s" % (db.version(),)
    print "bsddb.db.__version__: %s" % db.__version__
    print "bsddb.db.cvsid:       %s" % db.cvsid

    # Workaround for allowing generating an EGGs as a ZIP files.
    suffix = "__"
    print "py module:            %s" % getattr(bsddb, "__file" + suffix)
    print "extension module:     %s" % getattr(bsddb, "__file" + suffix)

    print "python version:       %s" % sys.version
    print "My pid:               %s" % os.getpid()
    print "-=" * 38
示例#35
0
def print_versions():
    try:
        # For Pythons w/distutils pybsddb
        from bsddb3 import db
    except ImportError:
        # For Python 2.3
        from bsddb import db
    print
    print '-=' * 38
    print db.DB_VERSION_STRING
    print 'bsddb.db.version():   %s' % (db.version(), )
    print 'bsddb.db.__version__: %s' % db.__version__
    print 'bsddb.db.cvsid:       %s' % db.cvsid
    print 'python version:       %s' % sys.version
    print 'My pid:               %s' % os.getpid()
    print '-=' * 38
示例#36
0
    def test01_simple(self):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test01_simple..." % self.__class__.__name__

        anID = self.env.lock_id()
        if verbose:
            print "locker ID: %s" % anID
        lock = self.env.lock_get(anID, "some locked thing", db.DB_LOCK_WRITE)
        if verbose:
            print "Aquired lock: %s" % lock
        time.sleep(1)
        self.env.lock_put(lock)
        if verbose:
            print "Released lock: %s" % lock
        if db.version() >= (4,0):
            self.env.lock_id_free(anID)
示例#37
0
    def test07_TxnTruncate(self):
        if db.version() < (3, 3):
            # truncate is a feature of BerkeleyDB 3.3 and above
            return

        d = self.d
        if verbose:
            print "\n", "-=" * 30
            print "Running %s.test07_TxnTruncate..." % self.__class__.__name__

        d.put("abcde", "ABCDE")
        txn = self.env.txn_begin()
        num = d.truncate(txn)
        assert num >= 1, "truncate returned <= 0 on non-empty database"
        num = d.truncate(txn)
        assert num == 0, "truncate on empty DB returned nonzero (%r)" % (num,)
        txn.commit()
示例#38
0
    def theThread(self, sleepTime, lockType):
        name = currentThread().getName()
        if lockType ==  db.DB_LOCK_WRITE:
            lt = "write"
        else:
            lt = "read"

        anID = self.env.lock_id()
        if verbose:
            print "%s: locker ID: %s" % (name, anID)

        lock = self.env.lock_get(anID, "some locked thing", lockType)
        if verbose:
            print "%s: Aquired %s lock: %s" % (name, lt, lock)

        time.sleep(sleepTime)

        self.env.lock_put(lock)
        if verbose:
            print "%s: Released %s lock: %s" % (name, lt, lock)
        if db.version() >= (4,0):
            self.env.lock_id_free(anID)
示例#39
0
def test_suite():
    suite = unittest.TestSuite()
    if db.version() >= (4, 3):
        suite.addTest(unittest.makeSuite(DBSequenceTest))
    return suite
示例#40
0
    def test02_basicPost32(self):
        # Basic Queue tests using the new DB.consume method in DB 3.2+
        # (No cursor needed)

        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test02_basicPost32..." % self.__class__.__name__

        if db.version() < (3, 2, 0):
            if verbose:
                print "Test not run, DB not new enough..."
            return

        d = db.DB()
        d.set_re_len(40)  # Queues must be fixed length
        d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)

        if verbose:
            print "before appends" + '-' * 30
            pprint(d.stat())

        for x in string.letters:
            d.append(x * 40)

        assert len(d) == 52

        d.put(100, "some more data")
        d.put(101, "and some more ")
        d.put(75, "out of order")
        d.put(1, "replacement data")

        assert len(d) == 55

        if verbose:
            print "before close" + '-' * 30
            pprint(d.stat())

        d.close()
        del d
        d = db.DB()
        d.open(self.filename)
        #d.set_get_returns_none(true)

        if verbose:
            print "after open" + '-' * 30
            pprint(d.stat())

        d.append("one more")

        if verbose:
            print "after append" + '-' * 30
            pprint(d.stat())

        rec = d.consume()
        while rec:
            if verbose:
                print rec
            rec = d.consume()

        if verbose:
            print "after consume loop" + '-' * 30
            pprint(d.stat())

        d.close()
示例#41
0
def test_suite():
    suite = unittest.TestSuite()
    if db.version() >= (4,3):
        suite.addTest(unittest.makeSuite(DBSequenceTest))
    return suite
示例#42
0
    def test02_basicPost32(self):
        # Basic Queue tests using the new DB.consume method in DB 3.2+
        # (No cursor needed)

        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test02_basicPost32..." % self.__class__.__name__

        if db.version() < (3, 2, 0):
            if verbose:
                print "Test not run, DB not new enough..."
            return

        d = db.DB()
        d.set_re_len(40)  # Queues must be fixed length
        d.open(self.filename, db.DB_QUEUE, db.DB_CREATE)

        if verbose:
            print "before appends" + '-' * 30
            pprint(d.stat())

        for x in string.letters:
            d.append(x * 40)

        assert len(d) == 52

        d.put(100, "some more data")
        d.put(101, "and some more ")
        d.put(75,  "out of order")
        d.put(1,   "replacement data")

        assert len(d) == 55

        if verbose:
            print "before close" + '-' * 30
            pprint(d.stat())

        d.close()
        del d
        d = db.DB()
        d.open(self.filename)
        #d.set_get_returns_none(true)

        if verbose:
            print "after open" + '-' * 30
            pprint(d.stat())

        d.append("one more")

        if verbose:
            print "after append" + '-' * 30
            pprint(d.stat())

        rec = d.consume()
        while rec:
            if verbose:
                print rec
            rec = d.consume()

        if verbose:
            print "after consume loop" + '-' * 30
            pprint(d.stat())

        d.close()
示例#43
0
    def __init__(self, **kwargs):
        # create db environment
        additional_flags = kwargs.get('flags', 0)
        recovery_mode = kwargs.get('recover', 0)
        if recovery_mode == 2:
            additional_flags |= db.DB_RECOVER_FATAL
        elif recovery_mode == 1:
            additional_flags |= db.DB_RECOVER
            if hasattr(db, 'DB_REGISTER'):
                additional_flags |= db.DB_REGISTER

        self._env = db.DBEnv()
        # ability to override settings' dir for testing purposes
        data_dir = kwargs.get('dir', self.dir)
        self._env.set_data_dir(data_dir)
        # ability to override settings' log_dir for testing purposes
        log_dir = kwargs.get('log_dir', kwargs.get('dir', self.log_dir))
        self._env.set_lg_dir(log_dir)
        self._env.set_tx_max(self.txn_max)

        if self.txn_timeout is not None:
            if self.txn_timeout > 0:
                self._env.set_timeout(self.txn_timeout, db.DB_SET_TXN_TIMEOUT)
            else:
                self._env.set_flags(db.DB_TXN_NOWAIT, 1)

        if self.cache_size is not None:
            self._env.set_cachesize(*self.cache_size)

        if os.name != 'nt' and self.shm_key:
            self._env.set_shm_key(self.shm_key)
            additional_flags |= db.DB_SYSTEM_MEM

        # replication settings
        rep_config = settings['store'].get('rep_config', None)
        init_rep = kwargs.get('init_rep', False)

        if rep_config:
            # in replicated environments use non-durable transactions
            self._env.set_flags(db.DB_TXN_NOSYNC, 1)
            additional_flags |= db.DB_INIT_REP

        self._env.open(self.env_dir,
                       db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
                       db.DB_INIT_LOG | db.DB_INIT_TXN | db.DB_CREATE |
                       additional_flags)

        db_flags = db.DB_THREAD | db.DB_AUTO_COMMIT | db.DB_CREATE | \
                   db.DB_MULTIVERSION
        db_mode = 0o660

        if rep_config:
            from porcupine.db.bsddb.replication import ReplicationService

            # initialiaze replication service
            self.replication_service = \
                ReplicationService(self._env, rep_config)

            if init_rep:
                # check multiprocessing
                is_multiprocess = services['main'].is_multiprocess or \
                                  services['management'].is_multiprocess

                if is_multiprocess and int(rep_config['priority']) > 0 \
                        and db.version() < (4, 8):
                    self._env.close()
                    self.__remove_env()
                    raise exceptions.ConfigurationError(
                        'Multiprocessing master candidates ' +
                        'require BerkeleyDB 4.8 or higher')

                # start replication service
                self.replication_service.start()

                # wait for client start-up
                timeout = time.time() + 20
                while time.time() < timeout and \
                        not self.replication_service.is_master() and \
                        not self.replication_service.client_startup_done:
                    time.sleep(0.02)

                timeout = time.time() + 20
                while time.time() < timeout and \
                        not (os.path.exists(
                             os.path.join(self.dir, 'porcupine.db'))):
                    time.sleep(0.02)
        else:
            self.replication_service = None

        # open items db
        while True:
            self._itemdb = db.DB(self._env)
            self._itemdb.set_pagesize(2048)
            try:
                self._itemdb.open('porcupine.db',
                                  'items',
                                  dbtype=db.DB_BTREE,
                                  mode=db_mode,
                                  flags=db_flags)
            except db.DBLockDeadlockError:
                self._itemdb.close()
                continue
            break

        # open documents db
        while True:
            self._docdb = db.DB(self._env)
            try:
                self._docdb.open('porcupine.db',
                                 'docs',
                                 dbtype=db.DB_HASH,
                                 mode=db_mode,
                                 flags=db_flags)
            except db.DBLockDeadlockError:
                self._docdb.close()
                continue
            break

        # open indices
        self._indices = {}
        for name, unique, immutable in settings['store']['indices']:
            self._indices[name] = DbIndex(self._env, self._itemdb,
                                          name, unique, immutable, db_flags)

        self._running = True

        maintain = kwargs.get('maintain', False)
        if maintain and self._maintenance_thread is None:
            # start deadlock detector
            self._maintenance_thread = Thread(target=self.__maintain,
                                              name='DB maintenance thread')
            self._maintenance_thread.start()
            # start checkpoint thread
            self._checkpoint_thread = Thread(target=self.__checkpoint,
                                             name='DB checkpoint thread')
            self._checkpoint_thread.start()
示例#44
0
import os
from os import path as os_path, mkdir
import threading
from threading import Lock, current_thread, Event
import errno
from errno import EEXIST
import pickle
from pickle import dumps as pickle, loads as unpickle
try:
    import bsddb3 as bsddb3
    from bsddb3 import db as bsddb
except ImportError:
    _no_bssdb3 = True
else:
    db_major, db_minor = bsddb.version()[:2]
    _no_bssdb3 = False

if __name__ == "__main__":  # add pythomnic/lib to sys.path
    import os
    import sys
    main_module_dir = os.path.dirname(
        sys.modules["__main__"].__file__) or os.getcwd()
    sys.path.insert(
        0, os.path.normpath(os.path.join(main_module_dir, "..", "..", "lib")))

import exc_string
from exc_string import exc_string
import typecheck
from typecheck import typecheck, callable, optional
import pmnc.perf_info
示例#45
0
# Written by Ross Cohen
# See LICENSE.txt for license information.

from os import path

try:
    from bsddb3 import db
    version_info = db.__version__.split('.')
    if version_info < [4,1]:
        raise ImportError
    if db.version() < (4,1):
        raise ImportError
except ImportError:
    from bsddb import db
    version_info = db.__version__.split('.')
    if version_info < [4,1]:
        raise ImportError, 'bsddb 4.1 or higher is required'
    if db.version() < (4,1):
        raise ImportError, 'berkeleydb 4.1 or higher is required'

history_format_version = 1
rebuild_format_version = 4

class VersionMismatchException(Exception): pass

def check_format_version(dir):
    try:
        fd = open(path.join(dir, 'format'), 'r')
        ver = int(fd.read())
        fd.close()
    except IOError:
示例#46
0
    def __init__(self, **kwargs):
        # create db environment
        additional_flags = kwargs.get('flags', 0)
        recovery_mode = kwargs.get('recover', 0)
        if recovery_mode == 2:
            additional_flags |= db.DB_RECOVER_FATAL
        elif recovery_mode == 1:
            additional_flags |= db.DB_RECOVER
            if hasattr(db, 'DB_REGISTER'):
                additional_flags |= db.DB_REGISTER

        self._env = db.DBEnv()
        # ability to override settings' dir for testing purposes
        data_dir = kwargs.get('dir', self.dir)
        self._env.set_data_dir(data_dir)
        # ability to override settings' log_dir for testing purposes
        log_dir = kwargs.get('log_dir', kwargs.get('dir', self.log_dir))
        self._env.set_lg_dir(log_dir)
        self._env.set_tx_max(self.txn_max)

        if self.txn_timeout is not None:
            if self.txn_timeout > 0:
                self._env.set_timeout(self.txn_timeout, db.DB_SET_TXN_TIMEOUT)
            else:
                self._env.set_flags(db.DB_TXN_NOWAIT, 1)

        if self.cache_size is not None:
            self._env.set_cachesize(*self.cache_size)

        if os.name != 'nt' and self.shm_key:
            self._env.set_shm_key(self.shm_key)
            additional_flags |= db.DB_SYSTEM_MEM

        # replication settings
        rep_config = settings['store'].get('rep_config', None)
        init_rep = kwargs.get('init_rep', False)

        if rep_config:
            # in replicated environments use non-durable transactions
            self._env.set_flags(db.DB_TXN_NOSYNC, 1)
            additional_flags |= db.DB_INIT_REP

        self._env.open(
            self.env_dir,
            db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG
            | db.DB_INIT_TXN | db.DB_CREATE | additional_flags)

        db_flags = db.DB_THREAD | db.DB_AUTO_COMMIT | db.DB_CREATE | \
                   db.DB_MULTIVERSION
        db_mode = 0o660

        if rep_config:
            from porcupine.db.bsddb.replication import ReplicationService

            # initialiaze replication service
            self.replication_service = \
                ReplicationService(self._env, rep_config)

            if init_rep:
                # check multiprocessing
                is_multiprocess = services['main'].is_multiprocess or \
                                  services['management'].is_multiprocess

                if is_multiprocess and int(rep_config['priority']) > 0 \
                        and db.version() < (4, 8):
                    self._env.close()
                    self.__remove_env()
                    raise exceptions.ConfigurationError(
                        'Multiprocessing master candidates ' +
                        'require BerkeleyDB 4.8 or higher')

                # start replication service
                self.replication_service.start()

                # wait for client start-up
                timeout = time.time() + 20
                while time.time() < timeout and \
                        not self.replication_service.is_master() and \
                        not self.replication_service.client_startup_done:
                    time.sleep(0.02)

                timeout = time.time() + 20
                while time.time() < timeout and \
                        not (os.path.exists(
                             os.path.join(self.dir, 'porcupine.db'))):
                    time.sleep(0.02)
        else:
            self.replication_service = None

        # open items db
        while True:
            self._itemdb = db.DB(self._env)
            self._itemdb.set_pagesize(2048)
            try:
                self._itemdb.open('porcupine.db',
                                  'items',
                                  dbtype=db.DB_BTREE,
                                  mode=db_mode,
                                  flags=db_flags)
            except db.DBLockDeadlockError:
                self._itemdb.close()
                continue
            break

        # open documents db
        while True:
            self._docdb = db.DB(self._env)
            try:
                self._docdb.open('porcupine.db',
                                 'docs',
                                 dbtype=db.DB_HASH,
                                 mode=db_mode,
                                 flags=db_flags)
            except db.DBLockDeadlockError:
                self._docdb.close()
                continue
            break

        # open indices
        self._indices = {}
        for name, unique, immutable in settings['store']['indices']:
            self._indices[name] = DbIndex(self._env, self._itemdb, name,
                                          unique, immutable, db_flags)

        self._running = True

        maintain = kwargs.get('maintain', False)
        if maintain and self._maintenance_thread is None:
            # start deadlock detector
            self._maintenance_thread = Thread(target=self.__maintain,
                                              name='DB maintenance thread')
            self._maintenance_thread.start()
            # start checkpoint thread
            self._checkpoint_thread = Thread(target=self.__checkpoint,
                                             name='DB checkpoint thread')
            self._checkpoint_thread.start()
示例#47
0
class DB(object):
    "Berkeley DB database interface"
    # data dir
    dir = os.path.abspath(settings['store']['bdb_data_dir'])
    if dir[-1] != '/':
        dir += '/'
    # log_dir
    log_dir = os.path.abspath(settings['store'].get('bdb_log_dir', dir))
    if log_dir[-1] != '/':
        log_dir += '/'
    # environment files directory
    env_dir = os.path.abspath(settings['store'].get(
        'env_dir', os.path.abspath(settings['global']['temp_folder'])))
    if env_dir[-1] != '/':
        env_dir += '/'
    # cache size
    cache_size = settings['store'].get('cache_size', None)
    # maximum concurrent transactions
    # due to snapshot isolation this should be kept high enough
    txn_max = settings['store'].get('max_tx', 1000)
    # transaction timeout
    txn_timeout = settings['store'].get('tx_timeout', None)
    # shared memory key
    shm_key = settings['store'].get('shm_key', None)
    # maintenance (deadlock detector) thread
    _maintenance_thread = None
    # checkpoint thread
    _checkpoint_thread = None
    # trickle thread
    _trickle_thread = None

    # log berkeleyDB version
    logger.info('BerkeleyDB version is %s' %
                '.'.join(str(x) for x in db.version()))

    def __init__(self, **kwargs):
        # create db environment
        additional_flags = kwargs.get('flags', 0)
        recovery_mode = kwargs.get('recover', 0)
        if recovery_mode == 2:
            additional_flags |= db.DB_RECOVER_FATAL
        elif recovery_mode == 1:
            additional_flags |= db.DB_RECOVER
            if hasattr(db, 'DB_REGISTER'):
                additional_flags |= db.DB_REGISTER

        self._env = db.DBEnv()
        # ability to override settings' dir for testing purposes
        data_dir = kwargs.get('dir', self.dir)
        self._env.set_data_dir(data_dir)
        # ability to override settings' log_dir for testing purposes
        log_dir = kwargs.get('log_dir', kwargs.get('dir', self.log_dir))
        self._env.set_lg_dir(log_dir)
        self._env.set_tx_max(self.txn_max)

        if self.txn_timeout is not None:
            if self.txn_timeout > 0:
                self._env.set_timeout(self.txn_timeout, db.DB_SET_TXN_TIMEOUT)
            else:
                self._env.set_flags(db.DB_TXN_NOWAIT, 1)

        if self.cache_size is not None:
            self._env.set_cachesize(*self.cache_size)

        if os.name != 'nt' and self.shm_key:
            self._env.set_shm_key(self.shm_key)
            additional_flags |= db.DB_SYSTEM_MEM

        # replication settings
        rep_config = settings['store'].get('rep_config', None)
        init_rep = kwargs.get('init_rep', False)

        if rep_config:
            # in replicated environments use non-durable transactions
            self._env.set_flags(db.DB_TXN_NOSYNC, 1)
            additional_flags |= db.DB_INIT_REP

        self._env.open(
            self.env_dir,
            db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_LOG
            | db.DB_INIT_TXN | db.DB_CREATE | additional_flags)

        db_flags = db.DB_THREAD | db.DB_AUTO_COMMIT | db.DB_CREATE | \
                   db.DB_MULTIVERSION
        db_mode = 0o660

        if rep_config:
            from porcupine.db.bsddb.replication import ReplicationService

            # initialiaze replication service
            self.replication_service = \
                ReplicationService(self._env, rep_config)

            if init_rep:
                # check multiprocessing
                is_multiprocess = services['main'].is_multiprocess or \
                                  services['management'].is_multiprocess

                if is_multiprocess and int(rep_config['priority']) > 0 \
                        and db.version() < (4, 8):
                    self._env.close()
                    self.__remove_env()
                    raise exceptions.ConfigurationError(
                        'Multiprocessing master candidates ' +
                        'require BerkeleyDB 4.8 or higher')

                # start replication service
                self.replication_service.start()

                # wait for client start-up
                timeout = time.time() + 20
                while time.time() < timeout and \
                        not self.replication_service.is_master() and \
                        not self.replication_service.client_startup_done:
                    time.sleep(0.02)

                timeout = time.time() + 20
                while time.time() < timeout and \
                        not (os.path.exists(
                             os.path.join(self.dir, 'porcupine.db'))):
                    time.sleep(0.02)
        else:
            self.replication_service = None

        # open items db
        while True:
            self._itemdb = db.DB(self._env)
            self._itemdb.set_pagesize(2048)
            try:
                self._itemdb.open('porcupine.db',
                                  'items',
                                  dbtype=db.DB_BTREE,
                                  mode=db_mode,
                                  flags=db_flags)
            except db.DBLockDeadlockError:
                self._itemdb.close()
                continue
            break

        # open documents db
        while True:
            self._docdb = db.DB(self._env)
            try:
                self._docdb.open('porcupine.db',
                                 'docs',
                                 dbtype=db.DB_HASH,
                                 mode=db_mode,
                                 flags=db_flags)
            except db.DBLockDeadlockError:
                self._docdb.close()
                continue
            break

        # open indices
        self._indices = {}
        for name, unique, immutable in settings['store']['indices']:
            self._indices[name] = DbIndex(self._env, self._itemdb, name,
                                          unique, immutable, db_flags)

        self._running = True

        maintain = kwargs.get('maintain', False)
        if maintain and self._maintenance_thread is None:
            # start deadlock detector
            self._maintenance_thread = Thread(target=self.__maintain,
                                              name='DB maintenance thread')
            self._maintenance_thread.start()
            # start checkpoint thread
            self._checkpoint_thread = Thread(target=self.__checkpoint,
                                             name='DB checkpoint thread')
            self._checkpoint_thread.start()
            #if hasattr(self._env, 'memp_trickle'):
            #    # strart memp_trickle thread
            #    self._trickle_thread = Thread(target=self.__trickle,
            #                                  name='DB memp_trickle thread')
            #    self._trickle_thread.start()

    def is_open(self):
        return self._running

    # item operations
    def get_item(self, oid):
        if type(oid) != bytes:
            oid = oid.encode('utf-8')
        try:
            return self._indices['_id'].db.get(oid,
                                               txn=context._trans
                                               and context._trans.txn)
        except UnicodeEncodeError:
            return None
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    def put_item(self, item):
        try:
            self._itemdb.put(
                pack_value(item._pid) + b'_' + pack_value(item._id),
                persist.dumps(item), context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction
        except db.DBError as e:
            if e.args[0] == _err_unsupported_index_type:
                raise db.DBError('Unsupported indexed data type')
            else:
                raise

    def delete_item(self, item):
        try:
            self._itemdb.delete(
                pack_value(item._pid) + b'_' + pack_value(item._id),
                context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    # containers
    def get_children(self, container_id):
        cursor = Cursor(self._itemdb, '_pid')
        cursor.set_scope(container_id)
        cursor.set_range(None, None)
        return cursor

    def get_child_by_name(self, container_id, name):
        try:
            return self._indices['displayName'].db.get(
                pack_value(container_id) + b'_' + pack_value(name),
                txn=context._trans and context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    # external attributes
    def get_external(self, id):
        try:
            return self._docdb.get(id.encode('ascii'),
                                   txn=context._trans and context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            if context._trans is not None:
                context._trans.abort()
            raise exceptions.DBRetryTransaction

    def put_external(self, id, stream):
        try:
            self._docdb.put(id.encode('ascii'), stream, context._trans.txn)
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    def delete_external(self, id):
        try:
            self._docdb.delete(id.encode('ascii'), context._trans.txn)
        except db.DBNotFoundError:
            # virtual external due to elastic schema
            pass
        except (db.DBLockDeadlockError, db.DBLockNotGrantedError):
            context._trans.abort()
            raise exceptions.DBRetryTransaction

    # indices
    def get_cursor_list(self, conditions):
        cur_list = []
        for index, value in conditions:
            cursor = Cursor(self._indices[index].db, self._indices[index].name)
            if isinstance(value, (list, tuple)):
                reversed = (len(value) == 3 and value[2])
                cursor.set_range(value[0], value[1])
                if reversed:
                    cursor.reverse()
            else:
                cursor.set(value)
            cur_list.append(cursor)
        return cur_list

    def query(self, conditions):
        cur_list = self.get_cursor_list(conditions)
        if len(cur_list) == 1:
            return cur_list[0]
        else:
            c_join = Join(self._itemdb, cur_list)
            return c_join

    def test_conditions(self, scope, conditions):
        cur_list = self.get_cursor_list(conditions)
        if len(cur_list) == 1:
            cursor = cur_list[0]
        else:
            cursor = Join(self._itemdb, cur_list)
        cursor.set_scope(scope)
        iterator = iter(cursor)
        try:
            result = bool(next(iterator))
        except StopIteration:
            result = False
        cursor.close()
        return result

    # transactions
    def get_transaction(self, **kwargs):
        nosync = kwargs.get('nosync', False)
        snapshot = kwargs.get('snapshot', False)
        return Transaction(self._env, nosync, snapshot)

    def __remove_env(self):
        files = glob.glob(self.env_dir + '__db.*')
        for file in files:
            try:
                os.remove(file)
            except OSError:
                pass

    # administrative
    def __remove_files(self):
        # environment files
        self.__remove_env()
        # log files
        files = glob.glob(self.log_dir + 'log.*')
        for file in files:
            os.remove(file)
        # database file
        os.remove(self.dir + 'porcupine.db')
        # index file
        os.remove(self.dir + 'porcupine.idx')

    def truncate(self):
        # older versions of bsddb do not support truncate
        if hasattr(self._itemdb, 'truncate'):
            self._itemdb.truncate()
            self._docdb.truncate()
        else:
            # close database
            self.close()
            # remove old database files
            self.__remove_files()
            # open db
            self.__init__()

    def backup(self, output_file):
        # force checkpoint
        self._env.txn_checkpoint(0, 0, db.DB_FORCE)
        logs = self._env.log_archive(db.DB_ARCH_LOG)
        backfiles = [self.dir + 'porcupine.db', self.dir + 'porcupine.idx'] + \
                    [self.log_dir + log.decode() for log in logs]
        # compact backup....
        backup = BackupFile(output_file)
        backup.add_files(backfiles)

    def restore(self, bset):
        self.__remove_files()
        backup = BackupFile(bset)
        backup.extract(self.dir, self.log_dir)

    def shrink(self):
        logs = self._env.log_archive()
        for log in logs:
            os.remove(self.log_dir + log)
        return len(logs)

    def __maintain(self):
        "deadlock detection thread"
        while self._running:
            time.sleep(0.01)
            # deadlock detection
            try:
                aborted = self._env.lock_detect(db.DB_LOCK_YOUNGEST)
                if aborted:
                    logger.critical(
                        "Deadlock: Aborted %d deadlocked transaction(s)" %
                        aborted)
            except db.DBError:
                pass

    def __trickle(self):
        "memp_trickle thread"
        while self._running:
            self._env.memp_trickle(95)
            time.sleep(120)

    def __checkpoint(self):
        "checkpoint thread"
        while self._running:
            if self.replication_service is None \
                    or self.replication_service.is_master():
                # checkpoint every 512KB written
                self._env.txn_checkpoint(512, 0)
            time.sleep(16)

            #stats = self._env.txn_stat()
            #print('txns: %d' % stats['nactive'])
            #print('max txns: %d' % stats['maxnactive'])
            #print()
            #stats = self._env.lock_stat()
            #print('Lockers: %d' % stats['nlockers'])
            #print('Max Lockers: %d' % stats['maxnlockers'])
            #print('Lockers wait: %d' % stats['lockers_wait'])
            #print()
            #print('Locks: %d' % stats['nlocks'])
            #print('Max Locks: %d' % stats['maxnlocks'])
            #print('Locks wait: %d' % stats['lock_wait'])
            #print('Locks no-wait: %d' % stats['lock_nowait'])
            #print()
            #print('Lock objects: %d' % stats['nobjects'])
            #print('Max objects: %d' % stats['maxnobjects'])
            #print('Objects wait: %d' % stats['objs_wait'])
            #print()
            #print('Requested: %d' % stats['nrequests'])
            #print('Released: %d' % stats['nreleases'])
            #print('-' * 80)

    def close(self, **kwargs):
        if self._running:
            self._running = False

            # join threads
            if self._maintenance_thread is not None:
                self._maintenance_thread.join()
            if self._checkpoint_thread is not None:
                self._checkpoint_thread.join()
            if self._trickle_thread is not None:
                self._trickle_thread.join()

            self._itemdb.close()
            self._docdb.close()
            # close indexes
            [index.close() for index in self._indices.values()]
            self._env.close()
            # clean-up environment files
            if (self._maintenance_thread is not None
                    or kwargs.get('clear_env', False)):
                self.__remove_env()