def open_db(self, direct): if not os.path.exists(os.path.join(direct, 'jumpstart.db')): wxMessageBox( 'This does not look like a JumpStart database directory', style=wxOK | wxICON_ERROR) return self.env = db.DBEnv() self.env.open(direct, db.DB_JOINENV) self.dbFileChunks = db.DB(self.env) self.dbFileChunks.open('Jumpstart.db', "File-Chunks", db.DB_UNKNOWN, db.DB_RDONLY) self.dbHashName = db.DB(self.env) self.dbHashName.open('Jumpstart.db', "Hash-Name", db.DB_UNKNOWN, db.DB_RDONLY) curs = self.dbFileChunks.cursor() rec = curs.first() while rec is not None: filehash = rec[0] #print b2a_hex(filehash) self.listFiles.Append( [self.hash_to_name(filehash), b2a_hex(filehash)]) rec = curs.next() self.dbHashName.close() self.dbFileChunks.close() self.env.close() self.panelSelections.Enable(1)
def __init__(self,b_file): # load db parameter self.__dbParaIteam , self.__dbParaQty = ParaLoder('../parameter/BSDDBP.json').loadParameter() self.__logConfigFile = self.__dbParaIteam['BSDDB_PARAMETER']['BSDDB_LOG_CONFIG_FILE'] self.__logHandleName = self.__dbParaIteam['BSDDB_PARAMETER']['BSDDB_LOG_HANDLE_NAME'] self.bsddb_path = self.__dbParaIteam['BSDDB_PARAMETER']['BSDDB_STORAGE_PATH'] # load log parameter logging.config.fileConfig(self.__logConfigFile) # create log handle self.__dbLogger = logging.getLogger(self.__logHandleName) # initial self.__os = os.name self.dbenv = db.DBEnv() if self.__os == 'posix': os.makedirs(name=self.bsddb_path,mode=0o666,exist_ok=True) self.__dbLogger.info('this is a posix system') else: # os windows self.__dbLogger.info('this is a windows system') self.bsddb_file = b_file self.dbenv.open(self.bsddb_path, db.DB_CREATE | db.DB_INIT_MPOOL) self.dbinst = db.DB(self.dbenv) self.dbinst.open(b_file,db.DB_BTREE,db.DB_CREATE,mode=0o666) print(self.bsddb_path+self.bsddb_file) if self.__os == 'posix': # if os = posix, add chmod os.chmod(self.bsddb_path+'/'+self.bsddb_file,stat.S_IRWXU) else: # os windows pass
def __init__(self, cachedir=FREENAS_CACHEDIR): log.debug("FreeNAS_BaseCache._init__: enter") self.cachedir = cachedir self.__cachefile = os.path.join(self.cachedir, ".cache.db") if not self.__dir_exists(self.cachedir): os.makedirs(self.cachedir) self.__dbenv = db.DBEnv() self.__dbenv.open( self.cachedir, db.DB_INIT_CDB | db.DB_INIT_MPOOL | db.DB_CREATE, 0o700 ) self.__cache = db.DB(self.__dbenv) self.__cache.open(self.__cachefile, None, db.DB_HASH, db.DB_CREATE) log.debug("FreeNAS_BaseCache._init__: cachedir = %s", self.cachedir) log.debug( "FreeNAS_BaseCache._init__: cachefile = %s", self.__cachefile ) log.debug("FreeNAS_BaseCache._init__: leave")
def openDB(filename, mode): if mode not in ('r', 'w', 'rw', 'c', 'n'): sys.stderr.write("mude must be one of 'r','w','rw','c','n'\n") sys.stderr.flush() return None flags = 0 if mode == 'r': flags = bsd.DB_RDONLY elif mode == 'rw': flags = 0 elif mode == 'w': flags = bsd.DB_CREATE elif mode == 'c': flags = bsd.DB_CREATE elif mode == 'n': flags = bsd.DB_CREATE flags |= bsd.DB_THREAD env = bsd.DBEnv() env.set_lk_detect(bsd.DB_LOCK_DEFAULT) env.open( '.', bsd.DB_PRIVATE | bsd.DB_CREATE | bsd.DB_THREAD | bsd.DB_INIT_LOCK | bsd.DB_INIT_MPOOL) db = bsd.DB(env) db.set_bt_compare(compare_fcn) db.open(filename, bsd.DB_BTREE, flags, 0o666) return _DBWithCursor(db)
def get_dbdir_summary(self, dirpath, name): """ Returns (people_count, bsddb_version, schema_version) of current DB. Returns ("Unknown", "Unknown", "Unknown") if invalid DB or other error. """ if config.get('preferences.use-bsddb3') or sys.version_info[0] >= 3: from bsddb3 import dbshelve, db else: from bsddb import dbshelve, db from gramps.gen.db import META, PERSON_TBL from gramps.gen.db.dbconst import BDBVERSFN bdbversion_file = os.path.join(dirpath, BDBVERSFN) if os.path.isfile(bdbversion_file): vers_file = open(bdbversion_file) bsddb_version = vers_file.readline().strip() else: return "Unknown", "Unknown", "Unknown" current_bsddb_version = str(db.version()) if bsddb_version != current_bsddb_version: return "Unknown", bsddb_version, "Unknown" env = db.DBEnv() flags = db.DB_CREATE | db.DB_PRIVATE |\ db.DB_INIT_MPOOL |\ db.DB_INIT_LOG | db.DB_INIT_TXN try: env.open(dirpath, flags) except Exception as msg: LOG.warning("Error opening db environment for '%s': %s" % (name, str(msg))) try: env.close() except Exception as msg: LOG.warning("Error closing db environment for '%s': %s" % (name, str(msg))) return "Unknown", bsddb_version, "Unknown" dbmap1 = dbshelve.DBShelf(env) fname = os.path.join(dirpath, META + ".db") try: dbmap1.open(fname, META, db.DB_HASH, db.DB_RDONLY) except: env.close() return "Unknown", bsddb_version, "Unknown" schema_version = dbmap1.get(b'version', default=None) dbmap1.close() dbmap2 = dbshelve.DBShelf(env) fname = os.path.join(dirpath, PERSON_TBL + ".db") try: dbmap2.open(fname, PERSON_TBL, db.DB_HASH, db.DB_RDONLY) except: env.close() return "Unknown", bsddb_version, schema_version count = len(dbmap2) dbmap2.close() env.close() return (count, bsddb_version, schema_version)
def setUp(self): self._tmpdir = tempfile.mkdtemp() self.full_name = os.path.join(self._tmpdir, 'test.grdb') self.env = db.DBEnv() self.env.set_cachesize(0, 0x2000000) self.env.set_lk_max_locks(25000) self.env.set_lk_max_objects(25000) # clean up unused logs autoremove_flag = None autoremove_method = None for flag in ["DB_LOG_AUTO_REMOVE", "DB_LOG_AUTOREMOVE"]: if hasattr(db, flag): autoremove_flag = getattr(db, flag) break for method in ["log_set_config", "set_flags"]: if hasattr(self.env, method): autoremove_method = getattr(self.env, method) break if autoremove_method and autoremove_flag: autoremove_method(autoremove_flag, 1) # The DB_PRIVATE flag must go if we ever move to multi-user setup env_flags = db.DB_CREATE|db.DB_RECOVER|db.DB_PRIVATE|\ db.DB_INIT_MPOOL|db.DB_INIT_LOCK|\ db.DB_INIT_LOG|db.DB_INIT_TXN env_name = "%s/env" % (self._tmpdir, ) if not os.path.isdir(env_name): os.mkdir(env_name) self.env.open(env_name, env_flags) (self.person_map, self.surnames) = self._open_tables() (self.place_map, self.placerefs) = self._open_treetables()
class BaseThreadedTestCase(unittest.TestCase): dbtype = db.DB_UNKNOWN # must be set in derived class dbopenflags = 0 dbsetflags = 0 envflags = 0 def setUp(self): if verbose: dbutils._deadlock_VerboseFile = sys.stdout homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except OSError, e: if e.errno <> errno.EEXIST: raise self.env = db.DBEnv() self.setEnvOpts() self.env.open(homeDir, self.envflags | db.DB_CREATE) self.filename = self.__class__.__name__ + '.db' self.d = db.DB(self.env) if self.dbsetflags: self.d.set_flags(self.dbsetflags) self.d.open(self.filename, self.dbtype, self.dbopenflags | db.DB_CREATE)
def create(dbname, primary_namespace, secondary_namespaces, formatname="unknown"): os.mkdir(dbname) config_filename = os.path.join(dbname, "config.dat") BaseDB.write_config(config_filename=config_filename, index_type=INDEX_TYPE, primary_namespace=primary_namespace, secondary_namespaces=secondary_namespaces, fileid_info={}, formatname=formatname) dbenv = db.DBEnv(0) envflags = db.DB_THREAD | db.DB_INIT_MPOOL dbenv.open(dbname, envflags | db.DB_CREATE) primary_table = db.DB(dbenv) primary_table.open("key_%s" % (primary_namespace, ), None, db.DB_BTREE, db.DB_CREATE, 0660) secondary_tables = {} for namespace in secondary_namespaces: x = db.DB(dbenv) x.open("id_%s" % (namespace, ), None, db.DB_BTREE, db.DB_CREATE, 0) secondary_tables[namespace] = x for x in secondary_tables.values(): x.close() primary_table.close() dbenv.close() return open(dbname, "rw")
def setUp(self): n = 100000 self.weibo_ids = self._load_items(n) self.db_env = db.DBEnv() self.db_env.set_tmp_dir(BDB_TMP_PATH) self.db_env.set_lg_dir(BDB_LOG_PATH) self.db_env.set_cachesize(0, 8 * (2 << 25), 1) self.db_env.open(BDB_DATA_PATH, db.DB_INIT_CDB | db.DB_INIT_MPOOL) weibo_hash_db = db.DB(self.db_env) weibo_hash_db.open('weibo_hash', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db = weibo_hash_db weibo_hash_db_rdonly = db.DB(self.db_env) weibo_hash_db_rdonly.open('weibo_hash', None, db.DB_HASH, db.DB_RDONLY) self.weibo_hash_db_rdonly = weibo_hash_db_rdonly weibo_btree_db = db.DB(self.db_env) weibo_btree_db.open('weibo_btree', None, db.DB_BTREE, db.DB_CREATE) self.weibo_btree_db = weibo_btree_db weibo_btree_db_rdonly = db.DB(self.db_env) weibo_btree_db_rdonly.open('weibo_btree', None, db.DB_BTREE, db.DB_RDONLY) self.weibo_btree_db_rdonly = weibo_btree_db_rdonly
def setUp(self): self.filename = self.__class__.__name__ + '.db' homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass self.env = db.DBEnv() self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK )
def __init__(self, directory, dbName, cache = 128, pagesize = None): global dbOpened, dbEnv log.message("Opening database", dbName) self.dbName = dbName self.nextID = 10000 self.cacheSize = cache # try: os.makedirs(directory) except OSError: pass # create db env when needed if not dbEnv: log.debug("BSDDB - creating environment") dbEnv = db.DBEnv() dbEnv.set_lg_max(16 * 1024 * 1024) dbEnv.set_lg_bsize(4 * 1024 * 1024) dbEnv.set_cachesize(0, 16 * 1024 * 1024, 0) dbEnv.set_lk_max_locks(100000) dbEnv.set_lk_max_lockers(100000) dbEnv.set_lk_max_objects(100000) dbEnv.set_get_returns_none(0) envFlags = db.DB_INIT_MPOOL | db.DB_INIT_TXN try: dbEnv.open(directory, db.DB_CREATE | db.DB_RECOVER | envFlags) except db.DBRunRecoveryError: log.warning("BSDDB - creating env without DB_RECOVER") dbEnv.open(directory, db.DB_CREATE | envFlags) # this is potentially dangerous dbEnv.set_flags(db.DB_TXN_NOSYNC, 1) #dbEnv.set_flags(db.DB_TXN_WRITE_NOSYNC, 1) log.debug("BSDDB - opening db", dbName) self.db = db.DB(dbEnv) if not pagesize: self.db.set_pagesize(8192) else: self.db.set_pagesize(pagesize) # transtaction self.txn = dbEnv.txn_begin() # open self.db.open(dbName, dbtype = db.DB_BTREE, flags = db.DB_CREATE | db.DB_AUTO_COMMIT) dbOpened += 1 # cache self.cache = {} self.cacheLinks = { "__first__": [None, "__last__"], "__last__": ["__first__", None], } # stats self.statCount = 0 self.statHit = 0 self.statSwap = 0 self.statMiss = 0 self.statCleanSwap = 0
def __init__(self,path,thread_safe=True): global db from bsddb3 import db Data.__init__(self, path) self.env = db.DBEnv() if not thread_safe: DB_THREAD = 0 else: DB_THREAD = db.DB_THREAD flags = db.DB_CREATE + db.DB_INIT_MPOOL + DB_THREAD self.env.open(path, flags)
def do_open(self): self.homeDir = homeDir = os.path.join( os.path.dirname(sys.argv[0]), 'db_home') try: os.mkdir(homeDir) except os.error: pass self.env = db.DBEnv() self.env.open(homeDir, self.envflags | db.DB_INIT_MPOOL | db.DB_CREATE) self.filename = os.path.split(self.filename)[1] self.d = dbshelve.DBShelf(self.env) self.d.open(self.filename, self.dbtype, self.dbflags)
def setUp(self): homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: pass self.env = db.DBEnv() self.env.open( homeDir, db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_CREATE)
def get_db_handle(dbname, allow_duplicates): # open the db environment dbe = db.DBEnv() dbe.open(config.DATASTORE_FOLDER, db.DB_CREATE | db.DB_INIT_MPOOL | db.DB_INIT_LOCK) # open the specific datastore (db file) within the db environment thisDB = db.DB(dbe) if allow_duplicates: thisDB.set_flags(db.DB_DUP | db.DB_DUPSORT) thisDB.open(config.dbfile(dbname), None, db.DB_BTREE, db.DB_CREATE) return (thisDB, dbe)
def read_wallet_dat(filename): from bsddb3 import db filename = os.path.realpath(filename) env = db.DBEnv() env.set_lk_detect(db.DB_LOCK_DEFAULT) env.open( os.path.dirname(filename), db.DB_PRIVATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL | db.DB_CREATE, ) d = db.DB(env) d.open(filename, 'main', db.DB_BTREE, db.DB_THREAD | db.DB_RDONLY) return collections.OrderedDict((k, d[k]) for k in d.keys())
def setUp(self): self.filename = self.__class__.__name__ + '.db' homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except os.error: import glob files = glob.glob(os.path.join(self.homeDir, '*')) for file in files: os.remove(file) self.env = db.DBEnv() self.env.open(homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
def _init_db_environment(self, homeDir, create=True): if not exists(homeDir): if create is True: mkdir(homeDir) # TODO: implement create method and refactor this to it self.create(homeDir) else: return NO_STORE db_env = db.DBEnv() db_env.set_cachesize(0, CACHESIZE) # TODO # db_env.set_lg_max(1024*1024) db_env.set_flags(ENVSETFLAGS, 1) db_env.open(homeDir, ENVFLAGS | db.DB_CREATE) return db_env
def __init__(self, dir: os_path.isdir): if _no_bssdb3: raise Exception("the state is not available") self._dir = dir # the created databases will be cached here self._db_cache_lock = Lock() self._db_cache = {} # the berkeley db environment opened here will be used # for all the multiple databases of the module env = bsddb.DBEnv() env.set_tx_max(100) env.set_cachesize(0, 4194304) env.set_lg_bsize(131072) env.set_lg_max(4194304) env.set_lk_detect(bsddb.DB_LOCK_RANDOM) env.set_lk_max_locks(16384) env.set_lk_max_lockers(8192) env.set_lk_max_objects(8192) env.set_flags(bsddb.DB_AUTO_COMMIT | bsddb.DB_TXN_NOWAIT, 1) # contributed by jah: if db_major < 4 or (db_major == 4 and db_minor < 7): # legacy API env.set_flags(bsddb.DB_LOG_AUTOREMOVE | bsddb.DB_DSYNC_LOG, 1) try: env.set_flags(bsddb.DB_DIRECT_LOG, 1) except bsddb.DBInvalidArgError: pass # DB_DIRECT_LOG may be unsupported in virtual machines else: # new-style API env.log_set_config(bsddb.DB_LOG_AUTO_REMOVE | bsddb.DB_LOG_DSYNC, 1) try: env.log_set_config(bsddb.DB_LOG_DIRECT, 1) except bsddb.DBInvalidArgError: pass # DB_LOG_DIRECT may be unsupported in virtual machines env.set_event_notify(self._event_notify) env_open_flags = bsddb.DB_CREATE | bsddb.DB_RECOVER | bsddb.DB_THREAD | \ bsddb.DB_INIT_LOCK | bsddb.DB_INIT_MPOOL | bsddb.DB_INIT_TXN | \ bsddb.DB_INIT_LOG | bsddb.DB_PRIVATE env.open(self._dir, env_open_flags) self._env = env
def __init__(self): self.env = db.DBEnv() self.env.open(env_name, db.DB_CREATE | db.DB_INIT_TXN | db.DB_INIT_MPOOL) self.the_txn = self.env.txn_begin() self.map = db.DB(self.env) self.map.open('xxx.db', "p", db.DB_HASH, db.DB_CREATE, 0666, txn=self.the_txn) del self.env del self.the_txn
def setUp(self): self.int_32_max = 0x100000000 self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') try: os.mkdir(self.homeDir) except os.error: pass tempfile.tempdir = self.homeDir self.filename = os.path.split(tempfile.mktemp())[1] tempfile.tempdir = None self.dbenv = db.DBEnv() self.dbenv.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL, 0666) self.d = db.DB(self.dbenv) self.d.open(self.filename, db.DB_BTREE, db.DB_CREATE, 0666)
def _base_test_pickle_DBError(self, pickle): self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) self.db = db.DB(self.env) self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE) self.db.put('spam', 'eggs') assert self.db['spam'] == 'eggs' try: self.db.put('spam', 'ham', flags=db.DB_NOOVERWRITE) except db.DBError, egg: pickledEgg = pickle.dumps(egg) #print repr(pickledEgg) rottenEgg = pickle.loads(pickledEgg) if rottenEgg.args != egg.args or type(rottenEgg) != type(egg): raise Exception, (rottenEgg, '!=', egg)
def open_dbs(self): self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_JOINENV) self.dbFileChunks = db.DB(self.env) self.dbFileChunks.open('Jumpstart.db', "File-Chunks", db.DB_UNKNOWN, db.DB_RDONLY) self.dbHashName = db.DB(self.env) self.dbHashName.open('Jumpstart.db', "Hash-Name", db.DB_UNKNOWN, db.DB_RDONLY) self.dbFileUserChunk = db.DB(self.env) self.dbFileUserChunk.open('Jumpstart.db', "FileUser-Chunk", db.DB_UNKNOWN, db.DB_RDONLY) self.dbFileUserChunk_Blocks = db.DB(self.env) self.dbFileUserChunk_Blocks.open('Jumpstart.db', "FileUserChunk-Blocks", db.DB_UNKNOWN, db.DB_RDONLY)
def __init__(self, database_directory): self._log = logging.getLogger("tinyarchive.database.DBManager") self._log.info("Opening database environment at %s" % database_directory) self._database_directory = os.path.abspath(database_directory) if not os.path.isdir(os.path.join(self._database_directory, "dbenv")): os.mkdir(os.path.join(self._database_directory, "dbenv")) if not os.path.isdir(os.path.join(self._database_directory, "data")): os.mkdir(os.path.join(self._database_directory, "data")) self._env = db.DBEnv() self._env.set_data_dir(os.path.join(self._database_directory, "data")) self._env.open( os.path.join(self._database_directory, "dbenv"), db.DB_INIT_LOCK | db.DB_INIT_LOG | db.DB_INIT_MPOOL | db.DB_CREATE) self._databases = {}
def add_config_to_reports(env, db): """Add the name of the build configuration as metadata to report documents stored in the BDB XML database.""" try: from bsddb3 import db as bdb import dbxml except ImportError: return dbfile = os.path.join(env.path, 'db', 'bitten.dbxml') if not os.path.isfile(dbfile): return dbenv = bdb.DBEnv() dbenv.open(os.path.dirname(dbfile), bdb.DB_CREATE | bdb.DB_INIT_LOCK | bdb.DB_INIT_LOG | bdb.DB_INIT_MPOOL | bdb.DB_INIT_TXN, 0) mgr = dbxml.XmlManager(dbenv, 0) xtn = mgr.createTransaction() container = mgr.openContainer(dbfile, dbxml.DBXML_TRANSACTIONAL) uc = mgr.createUpdateContext() container.addIndex(xtn, '', 'config', 'node-metadata-equality-string', uc) qc = mgr.createQueryContext() for value in mgr.query(xtn, 'collection("%s")/report' % dbfile, qc): doc = value.asDocument() metaval = dbxml.XmlValue() if doc.getMetaData('', 'build', metaval): build_id = int(metaval.asNumber()) cursor = db.cursor() cursor.execute("SELECT config FROM bitten_build WHERE id=%s", (build_id,)) row = cursor.fetchone() if row: doc.setMetaData('', 'config', dbxml.XmlValue(row[0])) container.updateDocument(xtn, doc, uc) else: # an orphaned report, for whatever reason... just remove it container.deleteDocument(xtn, doc, uc) xtn.commit() container.close() dbenv.close(0)
def open_environ(cls, path, create=True): """Open or create the db environment.""" if not os.path.isdir(path): if os.path.exists(path): raise ValueError('%s is not a directory' % path) else: if create: os.makedirs(path) else: raise ValueError('environment does not exists.') env = db.DBEnv() if cls.flags is None: raise NotImplementedError("`flags` attribute must be setted.") env.open(path, cls.flags) return env
def setUp(self): n = 100000 self.weibos = self._load_items(n) self.db_env = db.DBEnv() self.db_env.set_tmp_dir(BDB_TMP_PATH) self.db_env.set_lg_dir(BDB_LOG_PATH) self.db_env.set_cachesize(0, 8 * (2 << 25), 1) self.db_env.open(BDB_DATA_PATH, db.DB_INIT_CDB | db.DB_INIT_MPOOL | db.DB_CREATE) weibo_hash_db = db.DB(self.db_env) weibo_hash_db.open('weibo_hash', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db = weibo_hash_db weibo_hash_db_user = db.DB(self.db_env) weibo_hash_db_user.open('weibo_hash_user', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_user = weibo_hash_db_user weibo_hash_db_retweeted_status = db.DB(self.db_env) weibo_hash_db_retweeted_status.open('weibo_hash_retweeted_status', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_retweeted_status = weibo_hash_db_retweeted_status weibo_hash_db_text = db.DB(self.db_env) weibo_hash_db_text.open('weibo_hash_text', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_text = weibo_hash_db_text weibo_hash_db_timestamp = db.DB(self.db_env) weibo_hash_db_timestamp.open('weibo_hash_timestamp', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_timestamp = weibo_hash_db_timestamp weibo_hash_db_reposts_count = db.DB(self.db_env) weibo_hash_db_reposts_count.open('weibo_hash_reposts_count', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_reposts_count = weibo_hash_db_reposts_count weibo_hash_db_source = db.DB(self.db_env) weibo_hash_db_source.open('weibo_hash_source', None, db.DB_HASH, db.DB_CREATE) self.weibo_hash_db_source = weibo_hash_db_source
def setUp(self): self.homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') try: os.mkdir(self.homeDir) except os.error: pass self.env = db.DBEnv() self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL) self.primary_db = db.DB(self.env) self.primary_db.open(self.db_name, 'primary', db.DB_BTREE, db.DB_CREATE) self.secondary_db = db.DB(self.env) self.secondary_db.set_flags(db.DB_DUP) self.secondary_db.open(self.db_name, 'secondary', db.DB_BTREE, db.DB_CREATE) self.primary_db.associate(self.secondary_db, lambda key, data: data) self.primary_db.put('salad', 'eggs') self.primary_db.put('spam', 'ham') self.primary_db.put('omelet', 'eggs')
def __init__(self, b_path, b_file): self.__os = os.name self.dbenv = db.DBEnv() self.bsddb_path = b_path if self.__os == 'posix': os.makedirs(name=self.bsddb_path, mode=0o666, exist_ok=True) else: # os windows pass self.bsddb_file = b_file self.dbenv.open(b_path, db.DB_CREATE | db.DB_INIT_MPOOL) self.dbinst = db.DB(self.dbenv) self.dbinst.open(b_file, db.DB_BTREE, db.DB_CREATE, mode=0o666) print(self.bsddb_path + self.bsddb_file) if self.__os == 'posix': # if os = posix, add chmod os.chmod(self.bsddb_path + '/' + self.bsddb_file, stat.S_IRWXU) else: # os windows pass
def __init__(self, dbname, mode="r"): if mode not in ("r", "rw"): raise TypeError("Unknown mode: %r" % (mode, )) self.__need_flush = 0 BaseDB.OpenDB.__init__(self, dbname, INDEX_TYPE) self.dbenv = None dbenv = db.DBEnv() envflags = db.DB_THREAD | db.DB_INIT_MPOOL dbenv.open(dbname, envflags) if mode == "r": self._dbopen_flags = db.DB_RDONLY else: self._dbopen_flags = 0 self.primary_table = db.DB(dbenv) self.primary_table.open("key_%s" % (self.primary_namespace, ), None, db.DB_BTREE, self._dbopen_flags, 0660) self.secondary_tables = {} self.dbenv = dbenv