def Open(self, create_if_necessary=False):
        if create_if_necessary and not os.path.exists(self._db_path):
            self.CreateBDB()

        self._dbh = db.DB()
        self._keys_dbh = db.DB()
        self.OpenBDB()
Пример #2
0
 def __init__(self,irc):
     self.__parent=super(Hanzi,self)
     self.__parent.__init__(irc)
     self.pydb=db.DB()
     self.wbdb=db.DB()
     pydbpath=path.join(Hanzi.BASE_PATH,'pinyin.db')
     wbdbpath=path.join(Hanzi.BASE_PATH,'wubi.db')
     self.pydb.open(pydbpath, None, db.DB_HASH, db.DB_RDONLY)
     self.wbdb.open(wbdbpath, None, db.DB_HASH, db.DB_RDONLY)
Пример #3
0
    def __init__(self, db_env):
        self.__db_env = db_env
        self.__namespace = db.DB(db_env)
        self.__namespace.open('namespace.db', None, db.DB_BTREE,
                              db.DB_CREATE | db.DB_AUTO_COMMIT)

        self.__prefix = db.DB(db_env)
        self.__prefix.open("prefix.db", None, db.DB_BTREE,
                           db.DB_CREATE | db.DB_AUTO_COMMIT)
Пример #4
0
    def test02_WithSource(self):
        """
        A Recno file that is given a "backing source file" is essentially a
        simple ASCII file.  Normally each record is delimited by \n and so is
        just a line in the file, but you can set a different record delimiter
        if needed.
        """
        source = os.path.join(os.path.dirname(sys.argv[0]),
                              'db_home/test_recno.txt')
        if not os.path.isdir('db_home'):
            os.mkdir('db_home')
        f = open(source, 'w') # create the file
        f.close()

        d = db.DB()
        # This is the default value, just checking if both int
        d.set_re_delim(0x0A)
        d.set_re_delim('\n')  # and char can be used...
        d.set_re_source(source)
        d.open(self.filename, db.DB_RECNO, db.DB_CREATE)

        data = "The quick brown fox jumped over the lazy dog".split()
        for datum in data:
            d.append(datum)
        d.sync()
        d.close()

        # get the text from the backing source
        text = open(source, 'r').read()
        text = text.strip()
        if verbose:
            print text
            print data
            print text.split('\n')

        assert text.split('\n') == data

        # open as a DB again
        d = db.DB()
        d.set_re_source(source)
        d.open(self.filename, db.DB_RECNO)

        d[3] = 'reddish-brown'
        d[8] = 'comatose'

        d.sync()
        d.close()

        text = open(source, 'r').read()
        text = text.strip()
        if verbose:
            print text
            print text.split('\n')

        assert text.split('\n') == \
             "The quick reddish-brown fox jumped over the comatose dog".split()
Пример #5
0
def start_db():
    global db_filename

    try:
	DB = db.DB()
	DB.open(db_filename, None, db.DB_HASH, db.DB_AUTO_COMMIT)
    except:
	DB = db.DB()
	DB.open(db_filename, None, db.DB_HASH, db.DB_CREATE)
    return DB
Пример #6
0
    def runTest(self):
        print "------------------ Test1  DB Creation and destruction ---------"
        print 'Creating a hash table using Berkeley DB'
        dbName = 'dummy.db'
        if os.path.exists(dbName):
            os.remove(dbName)

        Db = db.DB()
        Db.open(dbName, dbtype=db.DB_HASH, flags=db.DB_CREATE)

        print 'Closing and reopening the database'
        Db.close()

        Db = db.DB()
        Db.open(dbName, dbtype=db.DB_HASH)

        print 'populating with a key from MP'
        F = 128
        fmax = 5500.0
        # max frequency: plain quantization
        F_N = 14
        # for quantization
        T = 125.0
        FileIdx = 104

        Nbits = 2**16

        Tbin = floor(T / (10.0 * 60.0) * (Nbits - 1))
        # Coding over 16bits, max time offset = 10min

        B = int(FileIdx * Nbits + Tbin)

        # preparing the key/values
        beta = ceil((fmax) / (2.0**F_N - 1.0))

        K = int(floor(F) * 2**(F_N) + floor(float(F) / float(beta)))

        print B, beta, K, T, Tbin
        Bbin = struct.pack('<I4', B)
        Kbin = struct.pack('<I4', K)
        Db.put(Kbin, Bbin)

        result = Db.get(Kbin)
        Tres = struct.unpack('<I4', result)
        self.assertEqual(Tres[0], B)
        print 'retrieving the file idex and time position'
        estTimeI = B % Nbits
        estFileI = B / Nbits
        estTime = (float(estTimeI) / (Nbits - 1)) * (600)

        self.assertEqual(estTimeI, Tbin)
        self.assertEqual(estFileI, FileIdx)
        self.assertTrue(abs(estTime - T) < 0.01)

        Db.close()
Пример #7
0
    def __init__(self, repoDir, readOnly=True):
        self.readOnly = readOnly

        if readOnly:
            envFlags = bdb.DB_INIT_MPOOL
            self.dbFlags = bdb.DB_RDONLY
        else:
            envFlags = bdb.DB_CREATE | bdb.DB_INIT_TXN | bdb.DB_INIT_MPOOL | \
                bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG # | bdb.DB_RECOVER_FATAL
            self.dbFlags = bdb.DB_CREATE | bdb.DB_READ_UNCOMMITTED | bdb.DB_AUTO_COMMIT

        localDbFlags = self.dbFlags

        self.env = bdb.DBEnv(0)
        # set size of locking filesystem
        self.env.set_lk_max_objects(100000)
        self.env.set_lk_max_locks(100000)
        # set size of logging manager
        self.env.set_lg_regionmax(1 << 20)
        # open the environment
        #self.env.remove(repoDir)
        self.env.open(repoDir, envFlags)
        # Lazy syncing should improve the throughput
        #self.env.set_flags(bdb.DB_TXN_NOSYNC, 1)

        self._experimentDb = bdb.DB(self.env)
        self._experimentDb.open("experiments.db",
                                dbtype=bdb.DB_HASH,
                                flags=localDbFlags)
        self._requiredCountDb = bdb.DB(self.env)
        self._requiredCountDb.open("requiredCount.db",
                                   dbtype=bdb.DB_HASH,
                                   flags=localDbFlags)
        self._resultCountDb = bdb.DB(self.env)
        self._resultCountDb.open("resultCount.db",
                                 dbtype=bdb.DB_HASH,
                                 flags=localDbFlags)
        self._bundleIdsDb = bdb.DB(self.env)
        self._bundleIdsDb.open("bundleIds.db",
                               dbtype=bdb.DB_HASH,
                               flags=localDbFlags)

        # make the directory where results will be stored
        try:
            os.mkdir(os.path.join(repoDir, 'results'))
        except:
            # already exists? anyway, we'll get an error later
            pass

        if self.readOnly:
            self.txn = None
        else:
            self.txn = self.env.txn_begin()
Пример #8
0
    def __init__(self, db_env, node_pickler):
        self.__db_env = db_env
        self.__dbp = db.DB(db_env)
        self.__dbp.open("IDMap_hash.db", None, db.DB_HASH,
                        db.DB_CREATE | db.DB_AUTO_COMMIT)

        self.__dbs = db.DB(db_env)
        self.__dbs.open("IDMap_recno.db", None, db.DB_RECNO,
                        db.DB_CREATE | db.DB_AUTO_COMMIT)

        # pickling and un-pickling the data
        self.__node_pickler = node_pickler

        self.__loads = self.__node_pickler.loads
        self.__dumps = self.__node_pickler.dumps
def selectByKey(db_name, content_key):
    database = db.DB()
    database.open('../db/' + db_name, dbtype=db.DB_HASH, flags=db.DB_CREATE)
    content_value = database.get(content_key)
    database.close()

    return content_value
def deleteByKey(db_name, content_key):
    database = db.DB()
    database.open('../db/' + db_name, dbtype=db.DB_HASH, flags=db.DB_CREATE)
    database.delete(content_key)

    logging.info("delete " + content_key + " success")
    database.close()
 def _CreateBDB(self):
   #if os.path.exists(self._db_root):
     #raise DBExists("Refusing to overwrite existing file: %s" % self._db_path)
   handle = bdb.DB(self._dbenv)
   handle.set_pagesize(4096)
   handle.open(self._db_path, self._db_name, BDB_ACCESS_FLAGS, bdb.DB_CREATE);
   handle.close()
Пример #12
0
class BaseThreadedTestCase(unittest.TestCase):
    dbtype = db.DB_UNKNOWN  # must be set in derived class
    dbopenflags = 0
    dbsetflags = 0
    envflags = 0

    def setUp(self):
        if verbose:
            dbutils._deadlock_VerboseFile = sys.stdout

        homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
        self.homeDir = homeDir
        try:
            os.mkdir(homeDir)
        except OSError, e:
            if e.errno <> errno.EEXIST: raise
        self.env = db.DBEnv()
        self.setEnvOpts()
        self.env.open(homeDir, self.envflags | db.DB_CREATE)

        self.filename = self.__class__.__name__ + '.db'
        self.d = db.DB(self.env)
        if self.dbsetflags:
            self.d.set_flags(self.dbsetflags)
        self.d.open(self.filename, self.dbtype,
                    self.dbopenflags | db.DB_CREATE)
Пример #13
0
 def check(self): 
     """Perform a self-check."""
     try: 
         self._acquire()
         filename = self.__config.state_db_file
         if not os.path.exists(filename): 
             return
         wasopen = self.__shelf is not None
         should_salvage = False
         if wasopen: 
             self.close()
         log.info("Performing a self-check of the state database...")
         try: 
             corrupt = '%s.corrupt' % filename
             idb = db.DB()
             try: 
                 idb.verify(filename)
                 idb.close()
             except bsddb._db.DBVerifyBadError, ex: 
                 log.exception("Database verification failed.")
                 should_salvage = True
         except Exception, e:
             log.exception("That didn't work.")
             try: 
                 idb.close()
             except:
                 pass
             raise e
         log.info("Self-check complete.")
         if should_salvage:
             self.salvage()
         if wasopen:
             self.open()
    def test02_get_raises_exception(self):
        d = db.DB()
        d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
        d.set_get_returns_none(0)

        for x in string.letters:
            d.put(x, x * 40)

        self.assertRaises(db.DBNotFoundError, d.get, 'bad key')
        self.assertRaises(KeyError, d.get, 'bad key')

        data = d.get('a')
        assert data == 'a' * 40

        count = 0
        exceptionHappened = 0
        c = d.cursor()
        rec = c.first()
        while rec:
            count = count + 1
            try:
                rec = c.next()
            except db.DBNotFoundError:  # end of the records
                exceptionHappened = 1
                break

        assert rec != None
        assert exceptionHappened
        assert count == 52

        c.close()
        d.close()
Пример #15
0
 def _getExperimentDB(self, eID):
     exprDb = bdb.DB(self.env)
     exprDb.open(self._getExperimentDbFilename(eID),
                 dbtype=bdb.DB_RECNO,
                 flags=self.dbFlags)
     #txn=self.txn)
     return exprDb
    def test01_get_returns_none(self):
        d = db.DB()
        d.open(self.filename, db.DB_BTREE, db.DB_CREATE)
        d.set_get_returns_none(1)

        for x in string.letters:
            d.put(x, x * 40)

        data = d.get('bad key')
        assert data == None

        data = d.get('a')
        assert data == 'a' * 40

        count = 0
        c = d.cursor()
        rec = c.first()
        while rec:
            count = count + 1
            rec = c.next()

        assert rec == None
        assert count == 52

        c.close()
        d.close()
Пример #17
0
    def test01_basic(self):
        d = db.DB()
        d.open(self.filename, db.DB_RECNO, db.DB_CREATE)

        for x in letters:
            recno = d.append(x * 60)
            assert type(recno) == type(0)
            assert recno >= 1
            if verbose:
                print recno,

        if verbose: print

        stat = d.stat()
        if verbose:
            pprint(stat)

        for recno in range(1, len(d)+1):
            data = d[recno]
            if verbose:
                print data

            assert type(data) == type("")
            assert data == d.get(recno)

        try:
            data = d[0]  # This should raise a KeyError!?!?!
        except db.DBInvalidArgError, val:
            assert val[0] == db.EINVAL
            if verbose: print val
Пример #18
0
 def check(self): 
     """Perform a self-check."""
     try: 
         self._acquire()
         filename = os.path.join(self.__config.appdata_dir,"history","history.db")
         if not os.path.exists(filename): 
             return
         wasopen = self.__shelf is not None
         should_salvage = False
         if wasopen: 
             self.close()
         log.info("Performing a self-check of the history database...")
         try: 
             idb = db.DB()
             try: 
                 idb.verify(filename)
                 idb.close()
             except bsddb._db.DBVerifyBadError, ex: 
                 log.exception("Database verification failed.")
                 should_salvage = True
         except: 
             log.exception("That didn't work.")
             try: 
                 idb.close()
             except:
                 pass
         log.info("Self-check complete.")
         if should_salvage:
             self.salvage()
         if wasopen:
             self.open()
     finally:
         self._release()
Пример #19
0
def loadChoiceGroups(owner=None, txn=None, bdb=None):
    """
  XXX: Not used anymore?
  """
    setClose = False
    if not bdb:
        db_env = getDbEnv()
        bdb = db.DB(db_env)
        bdb.open(FORMS_DB, PROJECT_DATA_ROOT, dbtype=db.DB_BTREE, txn=txn)
        setClose = True
    bdb_keys = bdb.keys()
    choice_groups = []
    if setClose:
        bdb.close()
    for bdb_key in bdb_keys:
        d = bdb.get(bdb_key, txn=txn)
        bdb_data = cPickle.loads(d)
    """
    if type(bdb_data) == FormChoiceGroup:
      print "type was FormChoiceGroup!"
      if setClose:
        bdb.close()
        db_env.close()
      if bdb_data:
        choice_groups.append(bdb_data)
      else:
        # Need to return an error.
        raise ValueError ("Tried to load ChoiceGroup %d, which has no data"% int(bdb_key) )
  """
    return choice_groups
Пример #20
0
def loadObject(key, txn=None, bdb=None):
    """
  This method loads a bfbdb object with a particular key
  For example:
  >>> print loadObject(2)
  """

    if not key:
        log.warning("loadObject called without object id")
        return None, None

    key = "%020d" % (int(key))
    setClose = False

    #Debug:
    #print "reading %s" % (key)

    # OPEN
    if not bdb:
        db_env = getDbEnv()
        bdb = db.DB(db_env)
        bdb.open(FORMS_DB, PROJECT_DATA_ROOT, dbtype=db.DB_BTREE, txn=txn)
        setClose = True

    bdb_data = bdb.get(key, txn=txn)

    # CLOSE
    if setClose:
        bdb.close()

    if (bdb_data):
        return cPickle.loads(bdb_data)
    else:
        # Need to return an error.
        raise ValueError("Tried to load form %d, which has no data" % int(key))
Пример #21
0
    def test01_join(self):
        if verbose:
            print '\n', '-=' * 30
            print "Running %s.test01_join..." % \
                  self.__class__.__name__

        # create and populate primary index
        priDB = db.DB(self.env)
        priDB.open(self.filename, "primary", db.DB_BTREE, db.DB_CREATE)
        map(lambda t, priDB=priDB: apply(priDB.put, t), ProductIndex)

        # create and populate secondary index
        secDB = db.DB(self.env)
        secDB.set_flags(db.DB_DUP | db.DB_DUPSORT)
        secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE)
        map(lambda t, secDB=secDB: apply(secDB.put, t), ColorIndex)

        sCursor = None
        jCursor = None
        try:
            # lets look up all of the red Products
            sCursor = secDB.cursor()
            # Don't do the .set() in an assert, or you can get a bogus failure
            # when running python -O
            tmp = sCursor.set('red')
            assert tmp

            # FIXME: jCursor doesn't properly hold a reference to its
            # cursors, if they are closed before jcursor is used it
            # can cause a crash.
            jCursor = priDB.join([sCursor])

            if jCursor.get(0) != ('apple', "Convenience Store"):
                self.fail("join cursor positioned wrong")
            if jCursor.join_item() != 'chainsaw':
                self.fail("DBCursor.join_item returned wrong item")
            if jCursor.get(0)[0] != 'strawberry':
                self.fail("join cursor returned wrong thing")
            if jCursor.get(0):  # there were only three red items to return
                self.fail("join cursor returned too many items")
        finally:
            if jCursor:
                jCursor.close()
            if sCursor:
                sCursor.close()
            priDB.close()
            secDB.close()
Пример #22
0
    def salvage(self): 
        """Salvage what we can recover."""
        try: 
            self._acquire()
            self.close()
            filename = self.__config.state_db_file
            corrupt = '%s.corrupt' % filename
            recovery = '%s.recovery' % filename
            shutil.copyfile(filename, corrupt)
            idb = db.DB()
            try: 
                idb.verify(filename, outfile=recovery, flags=db.DB_SALVAGE)
            except bsddb._db.DBVerifyBadError, ex: 
                pass
            idb.close()
            os.unlink(filename)
            cdb = db.DB()
            cdb.open(corrupt)
            rdb = db.DB()
            rdb.open(filename, dbtype=db.DB_HASH, flags=db.DB_CREATE)
            keys = cdb.keys()
            goodkeys = []
            for key in keys:
                for good in ('cache\0', 'lastfeedid', 'feed#'): 
                    if key[:len(good)] == good:
                        if good == 'cache\0': 
                            break # silently ditch
                        try: 
                            value = cdb.get(key)
                            if key == 'lastfeedid' \
                            and not isinstance(value, int):                             
                                log.warn("Discarding dodgy lastfeedid",
                                         repr(value))
                                continue
                            goodkeys.append(key)
                        except: 
                            log.exception("Can't recover key %s", repr(key))
                        break
                else: 
                    log.error("Discarding dodgy key %s", repr(key))

            for key in goodkeys:
                rdb.put(key, cdb.get(key))

            log.info("Recovered %d of %d keys.", len(goodkeys), len(keys))
            cdb.close()
            rdb.close()
    def CreateBDB(self):
        if os.path.exists(self._db_path):
            raise DBExists("Refusing to overwrite existing file: %s" %
                           self._db_path)
        handle = db.DB()
        handle.set_pagesize(4096)
        handle.open(self._db_path, None, BDB_ACCESS_FLAGS, db.DB_CREATE)
        handle.close()

        if os.path.exists(self._keys_db_path):
            raise DBExists("Refusing to overwrite existing file: %s" %
                           self._keys_db_path)
        handle = db.DB()
        handle.set_pagesize(4096)
        #handle.set_cachesize(0, 20<<20) # 20 meg cache
        handle.open(self._keys_db_path, None, BDB_ACCESS_FLAGS, db.DB_CREATE)
        handle.close()
def bulk_delete(db_name, content_key_list):
    database = db.DB()
    database.open('../db/' + db_name, dbtype=db.DB_HASH, flags=db.DB_CREATE)
    for content_key in content_key_list:
        database.delete(content_key)

        logging.info("delete " + content_key + " success")
    database.close()
Пример #25
0
    def __init_indices(self):
        indices = {}
        for index in self.__index_list:
            indices[index] = db.DB(self.__db_env)
            indices[index].open("index_%s.db" % index, None, db.DB_BTREE,
                                db.DB_CREATE | db.DB_AUTO_COMMIT)

        return indices
def selectAll(db_name):
    result_set = {}
    database = db.DB()
    database.open('../db/' + db_name, dbtype=db.DB_HASH, flags=db.DB_CREATE)
    for content_key, content_value in fetch(database.cursor()):
        result_set[content_key] = content_value

    return result_set
Пример #27
0
def umountUser():
    logindb = db.DB()
    try:
        logindb.open(rootFtpConf + "/login.db", None, db.DB_HASH,
                     db.DB_DIRTY_READ)
    except Exception, err:
        print "Erreur de connexion avec la base : "
        print str(err)
        os._exit(1)
def bulk_insert(db_name, content_dict):
    database = db.DB()
    database.open('../db/' + db_name, dbtype=db.DB_HASH, flags=db.DB_CREATE)
    for content_key in content_dict.keys():
        if database.get(content_key):
            logging.warn(content_key + " already exists")
        else:
            database.put(content_key, content_dict[content_key])
            logging.info("insert " + content_key + " success")
    database.close()
Пример #29
0
    def _openDB(self, db_path, open_type):
        print >> sys.stderr, "..Try to open by", self.open_type_list[open_type]
        d = None
        if open_type == 1:  # 'bsddb.dbshelve'
            db_types = [db.DB_BTREE, db.DB_HASH]
            for dbtype in db_types:
                try:
                    d = dbshelve.open(db_path, filetype=dbtype)
                    break
                except:
                    d = None
#                except:
#                    print_exc()
            if d is not None:
                return d.cursor()
#                data = dict(d.items())
#                d.close()
#                return data
            else:
                return d

        elif open_type == 0:  # 'bsddb.db'
            try:
                d = db.DB()
                d.open(db_path, db.DB_UNKNOWN)
            except:
                d = None
#                print_exc()
            if d is not None:
                return d.cursor()


#                data = dict(d.items())
#                d.close()
#                return data
            else:
                return d

        elif open_type == 2:  # 'pickle'
            try:
                f = open(db_path)
                d = load(f)
                f.close()
                return d
            except:
                return None

        else:
            try:
                f = open(db_path)
                d = f.readlines()
                f.close()
                return d
            except:
                return None
Пример #30
0
def synchronize(word_counts, cooccurrence_counts, word_counts_db_file,
                cooccurrence_counts_db_file):
    """Write structures to disk before clearing their contents from memory.
    """
    word_counts_db = db.DB()
    word_counts_db.open(word_counts_db_file, None, db.DB_HASH, db.DB_CREATE)
    for word, count in word_counts.iteritems():
        word = word.encode(DEF_LOCALE)
        old_val = word_counts_db.get(word)
        if old_val is None:
            new_val = count
        else:
            new_val = int(old_val) + count
        word_counts_db.put(word, str(new_val))
    word_counts_db.close()
    word_counts.clear()

    cooccurrence_counts_db = db.DB()
    cooccurrence_counts_db.open(cooccurrence_counts_db_file, None, db.DB_HASH,
                                db.DB_CREATE)
    for (target, word), count in cooccurrence_counts.iteritems():
        ## Try to find both (target, word) and (word, target) in the
        ## db. This is important if word is itself a target, that is,
        ## two targets cooccurr with each other, because an earlier
        ## pass could have placed the cooccurrence pair a different
        ## order than we have now
        target, word = target.encode(DEF_LOCALE), word.encode(DEF_LOCALE)
        key_1 = "%s,%s" % (target, word)
        old_val_1 = cooccurrence_counts_db.get(key_1)
        try:
            new_val_1 = int(old_val_1) + count
            cooccurrence_counts_db.put(key_1, str(new_val_1))
        except TypeError:
            key_2 = "%s,%s" % (word, target)
            old_val_2 = cooccurrence_counts_db.get(key_2)
            try:
                new_val_2 = int(old_val_2) + count
                cooccurrence_counts_db.put(key_2, str(new_val_2))
            except TypeError:
                cooccurrence_counts_db.put(key_1, str(count))
    cooccurrence_counts_db.close()
    cooccurrence_counts.clear()