def addPathShelve(uuid, path, fh): config.logger.debug("register: addPathShelve(%s,%s,_fh_)", uuid, path) try: with shelve.open(uuid_cache_path) as db: db[uuid] = str(path) config.logger.debug("register: DB type: %s", str(dbm.whichdb(uuid_cache_path))) except Exception as e: _, _, exc_tb = sys.exc_info() config.logger.error( "register: Exception in addPathShelve(%s,%s) line: %s shelve.open (1) %s", str(uuid), str(path), exc_tb.tb_lineno, e) try: # in case we couldn't open the shelve file as "shelve.open db type could not be determined" # remove all files name uuid_cache_path with any extension as we do not know which extension is choosen by shelve config.logger.info("register: clean uuid cache %s", str(uuid_cache_path)) # note that this deletes all "uuid" files including those for the Viewer and other files of that name prefix like uuid.db.org!! for p in Path(Path(uuid_cache_path).parent).glob("{}*".format( config.uuid_cache)): if str(p) != uuid_cache_path_lock: # if not the lock file, delete: p.unlink() # try again to acccess/create the shelve file with shelve.open(uuid_cache_path) as db: db[uuid] = str(path) config.logger.info("register: Generated db type: %s", str(dbm.whichdb(uuid_cache_path))) except Exception as e: config.logger.error( "register: Exception in addPathShelve(%s,%s) line: %s shelve.open (2) %s", str(uuid), str(path), exc_tb.tb_lineno, e) finally: fh.flush() os.fsync(fh.fileno())
def main(): print("Pickle is available.") db = dbm.dumb.open("dumbdb", "c") db["1"] = "1" db.close() dbstr = dbm.whichdb("dumbdb") if dbstr: print("Dumbdbm is available.") else: print("Dumbdbm is not available.") db = dbm.bsd.open("dbhash", "c") db["1"] = "1" db.close() dbstr = dbm.whichdb("dbhash") if dbstr == "dbhash": print("Dbhash is available.") else: print("Dbhash is not available.") if bsddb is None: dbstr = "" else: db = bsddb.hashopen("bsddb3", "c") db["1"] = "1" db.close() dbstr = dbm.whichdb("bsddb3") if dbstr == "dbhash": print("Bsddb[3] is available.") else: print("Bsddb[3] is not available.") print() hammie = get_pathname_option("Storage", "persistent_storage_file") use_dbm = options["Storage", "persistent_use_database"] if not use_dbm: print("Your storage %s is a: pickle" % (hammie,)) return if not os.path.exists(hammie): print("Your storage file does not exist yet.") return db_type = dbm.whichdb(hammie) if db_type == "dbhash": if hasattr(bsddb, '__version__'): try: db = bsddb.hashopen(hammie, "r") except bsddb.error: pass else: db.close() print("Your storage", hammie, "is a: bsddb[3]") return elif db_type is None: print("Your storage %s is unreadable." % (hammie,)) print("Your storage %s is a: %s" % (hammie, db_type))
def main(): """Main function""" # WRITE ####### db = dbm.dumb.open('foo_dumbdbm', 'c') db['one'] = 'un' db['two'] = 'dos' db['three'] = 'tres' db.close() # WHICH DBM ### print("dbm.whichdb:", dbm.whichdb('foo_dumbdbm')) # READ ######## db = dbm.dumb.open('foo_dumbdbm', 'r') # Iterate loop: first method (common to any dbm module) for k in db.keys(): print(k, ':', db[k]) # Iterate loop: second method (dumbdbm supports db.items()) for k, v in db.items(): print(k, ':', v) db.close()
def main(): """Main function""" # WRITE ####### db = dbm.ndbm.open('foo_dbm', 'c') db['one'] = 'un' db['two'] = 'dos' db['three'] = 'tres' db.close() # WHICH DBM ### print("whichdb:", dbm.whichdb('foo_dbm')) # READ ######## db = dbm.ndbm.open('foo_dbm', 'r') for k in db.keys(): print(k, ':', db[k]) db.close()
def loadmodel(path): """ LOADMODEL - load a model using built-in load module check that model prototype has not changed. if so, adapt to new model prototype. Usage: md=loadmodel(path) """ #check existence of database (independent of file extension!) if whichdb(path): #do nothing pass else: raise IOError("loadmodel error message: file '%s' does not exist" % path) try: #recover model on file and name it md struc = loadvars(path) name = [key for key in struc.keys()] if len(name) > 1: raise IOError( "loadmodel error message: file '%s' contains several variables. Only one model should be present." % path) md = struc[name[0]] return md except Exception as me: print(me) raise IOError("could not load model '%s'" % path)
def main(): """Main function""" # WRITE ####### db = dbm.gnu.open('foo_gdbm', 'c') db['one'] = 'un' db['two'] = 'dos' db['three'] = 'tres' db.close() # WHICH DBM ### print("whichdb:", dbm.whichdb('foo_gdbm')) # READ ######## db = dbm.gnu.open('foo_gdbm', 'r') # Iterate loop: first method (common to any dbm module) for k in db.keys(): print(k, ':', db[k]) # Iterate loop: second method (more efficient) # The following code prints every key in the database db, without having to create a list in memory that contains them all. k = db.firstkey() while k != None: print(k, ':', db[k]) k = db.nextkey(k) db.close()
def is_db_generated(self): '''Update database if available or sleep and try again''' if not self.dbinit_queue.empty(): #A new dictionary was passed #retrieving new dict self.newdict, self.unsearched = self.dbinit_queue.get() #Messaging TMakeSearch to stop querying the dictionary self.db_update = True self.query_queue.put(None) sleep(0.11) #TMakeSearch takes 0.1s naps. Check further ''' if whichdb(self.db) == ('dbhash'): '''For dumbdbm, this jams the app, as does manual updating. it's not dumb, it's just not worthy''' self.fdict.update(self.newdict) else: for key in self.newdict: self.fdict[key] = self.newdict[key] print('fdict is created') self.db_update = False #save new database self.fdict.sync() print('fdict synced') #Open a new TMakeSearch with the updated database #thread.start_new_thread(TMakeSearch, (self.fdict, self.query_queue, self.result_queue)) #Cleaning up self.newdict.clear() self.newdict = None self.gtime = time() - self.gtime #to read about {}.format #also, a label may be simpler self.entry_var.set('Database generation time- ' + str(self.gtime) + 's. Type to search. [F5 - Refresh Database]') #Pass a signal to close TMakeSearch, then reopen it self.query_queue.put(True) thread.start_new_thread( TMakeSearch, (self.fdict, self.query_queue, self.result_queue)) self.entry_box.icursor(0) #self.loading.destroy() self.panel.delete(*self.panel.get_children()) self.panel.insert( '', 0, text='Scorch Mode is faster but uses more memory', values=('Loads database into RAM', )) #self.keylist=fdict.keys() #for scorch mode self.counter = 0 #self.IS_1ST_PRESS=True #for testing #print time()-self.start #print self.dict_size() else: self.after(100, self.is_db_generated)
def __init__(self, filename, content='shelf'): self.filename = filename self.content = content self._exists = os.path.exists(filename) self._is_shelf = content == 'shelf' or dbm.whichdb( filename) == 'dbm.gnu' self.db = None self._modified = False
def setAccountShelve(account_id, fh): config.logger.debug("account: setAccountShelve(%s,_fh_)", account_id) try: with shelve.open(account_cache_path) as db: if account_id in db: return db[account_id] else: new_nr = len(db) db[account_id] = new_nr return new_nr config.logger.debug("account: DB type: %s", str(dbm.whichdb(account_cache_path))) except Exception as e: _, _, exc_tb = sys.exc_info() config.logger.error( "account: Exception in setAccountShelve(%s) line (1): %s shelve.open %s", str(account_id), exc_tb.tb_lineno, e) try: # in case we couldn't open the shelve file as "shelve.open db type could not be determined" # remove all files name account_cache_path with any extension as we do not know which extension is choosen by shelve config.logger.info("account: clean acount cache %s", str(account_cache_path)) # note that this deletes all "account" files including those for the Viewer and other files of that name prefix like account.db.org!! for p in Path(Path(account_cache_path).parent).glob("{}*".format( config.account_cache)): if str(p) != account_cache_lock_path: # if not the lock file, delete: p.unlink() # try again to acccess/create the shelve file with shelve.open(account_cache_path) as db: if account_id in db: return db[account_id] else: new_nr = len(db) db[account_id] = new_nr return new_nr config.logger.info("acount: Generated db type: %s", str(dbm.whichdb(account_cache_path))) except Exception as e: config.logger.error( "account: Exception in setAccountShelve(%s) line (2): %s shelve.open %s", str(account_id), exc_tb.tb_lineno, e) return None finally: fh.flush() os.fsync(fh.fileno())
def update_dictdb(lookup_word, response): response = json.loads(response.decode("utf-8")) meaning = ",".join(response["text"]) if not dbm.whichdb(DB_FILE): with dbm.open(DB_FILE, "n") as dict_db: dict_db[lookup_word] = meaning else: with dbm.open(DB_FILE, "w") as dict_db: dict_db[lookup_word] = meaning
def my_whichdb(filename): if filename[-7:] == ".dblite": return "SCons.dblite" try: with open(filename + ".dblite", "rb"): return "SCons.dblite" except IOError: pass return whichdb(filename)
def execute(self, opt_values, pos_args): dep_file = opt_values["dep_file"] db_type = whichdb(dep_file) six.print_("DBM type is '%s'" % db_type) if db_type in ("dbm", "dbm.ndbm"): # pragma: no cover raise InvalidCommand("ndbm does not support iteration of elements") data = dbm.open(dep_file) for key, value_str in dbm_iter(data): value_dict = json.loads(value_str.decode("utf-8")) value_fmt = pprint.pformat(value_dict, indent=4, width=100) six.print_("{key} -> {value}".format(key=key, value=value_fmt))
def execute(self, opt_values, pos_args): dep_file = opt_values['dep_file'] db_type = whichdb(dep_file) six.print_("DBM type is '%s'" % db_type) if db_type in ('dbm', 'dbm.ndbm'): # pragma: no cover raise InvalidCommand('ndbm does not support iteration of elements') data = dbm.open(dep_file) for key, value_str in dbm_iter(data): value_dict = json.loads(value_str.decode('utf-8')) value_fmt = pprint.pformat(value_dict, indent=4, width=100) six.print_("{key} -> {value}".format(key=key, value=value_fmt))
def test_whichdb(self): for module in dbm_iterator(): # Check whether whichdb correctly guesses module name # for databases opened with "module" module. # Try with empty files first name = module.__name__ if name == 'dbm.dumb': continue # whichdb can't support dbm.dumb test.support.unlink(_fname) f = module.open(_fname, 'c') f.close() self.assertEqual(name, dbm.whichdb(_fname)) # Now add a key f = module.open(_fname, 'w') f[b"1"] = b"1" # and test that we can find it self.assertTrue(b"1" in f) # and read it self.assertTrue(f[b"1"] == b"1") f.close() self.assertEqual(name, dbm.whichdb(_fname))
def info(ctx, data): ''' Display summary information about the DB ''' print('DB Type:', whichdb(ctx.parent.params['db'])) print('Version:', data.get('_dbversion')) print('Name :', data.get('name')) print('Key :', data.get('_key')) print("Count :", len(data)) print() print('Location(s):') pprint.pprint(data.get('_serial_libraries'))
def dep_manager_fixture(request, dep_class, tmp_path_factory): filename = str(tmp_path_factory.mktemp('x', True) / 'testdb') dep_file = Dependency(dep_class, filename) dep_file.whichdb = whichdb(dep_file.name) if dep_class is DbmDB else 'XXX' dep_file.name_ext = db_ext.get(dep_file.whichdb, ['']) def remove_depfile(): if not dep_file._closed: dep_file.close() remove_db(dep_file.name) request.addfinalizer(remove_depfile) return dep_file
def dbm_cache_type(dbmfile): global dbm_types if dbmfile in dbm_types: return dbm_types[dbmfile] module = dbm.whichdb(dbmfile) if module: dbm_type = __import__(module) dbm_types[dbmfile] = dbm_type return dbm_type else: # this is a new file return anydbm
def open(db_name, mode): if os.path.exists(db_name) and \ options.default("globals", "dbm_type") != \ options["globals", "dbm_type"]: dbm_type = dbm.whichdb(db_name) if (sys.platform == "win32" and sys.version_info < (2, 3) and dbm_type == "dbhash"): dbm_type = "db3hash" else: dbm_type = options["globals", "dbm_type"].lower() f = open_funcs.get(dbm_type) if f is None: raise error("Unknown dbm type: %s" % dbm_type) return f(db_name, mode)
def dep_manager_fixture(request, dep_class): # copied from tempdir plugin name = request._pyfuncitem.name name = py.std.re.sub("[\W]", "_", name) my_tmpdir = request.config._tmpdirhandler.mktemp(name, numbered=True) dep_file = Dependency(dep_class, os.path.join(my_tmpdir.strpath, "testdb")) dep_file.whichdb = whichdb(dep_file.name) if dep_class is DbmDB else 'XXX' dep_file.name_ext = db_ext.get(dep_file.whichdb, ['']) def remove_depfile(): if not dep_file._closed: dep_file.close() remove_db(dep_file.name) request.addfinalizer(remove_depfile) return dep_file
def open(self): call_hook("daemon_db_open", [self.filename]) mode = 'c' if dbm.whichdb(self.filename) == 'dbm.gnu': mode += 'u' self.shelf = shelve.open(self.filename, mode) self.check_control_data() if self.caching == CACHE_ALWAYS or\ (self.caching == CACHE_ON_CONNS and self.has_conns): for key in self.shelf: self.cache[key] = self.shelf[key] self.index = list(self.shelf.keys()) log.debug("Shelf opened: %s" % self.shelf)
def __init__(self, *args, **config): super(database,self).__init__(*args, **config) default_db = config.get("dbtype","anydbm") if not default_db.startswith("."): default_db = '.' + default_db self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db) self.__db = None mode = "w" if whichdb(self._db_path) in ("dbm.gnu", "gdbm"): # Allow multiple concurrent writers (see bug #53607). mode += "u" try: # dbm.open() will not work with bytes in python-3.1: # TypeError: can't concat bytes to str self.__db = anydbm_module.open(self._db_path, mode, self._perms) except anydbm_module.error: # XXX handle this at some point try: self._ensure_dirs() self._ensure_dirs(self._db_path) except (OSError, IOError) as e: raise cache_errors.InitializationError(self.__class__, e) # try again if failed try: if self.__db == None: # dbm.open() will not work with bytes in python-3.1: # TypeError: can't concat bytes to str if gdbm is None: self.__db = anydbm_module.open(self._db_path, "c", self._perms) else: # Prefer gdbm type if available, since it allows # multiple concurrent writers (see bug #53607). self.__db = gdbm.open(self._db_path, "cu", self._perms) except anydbm_module.error as e: raise cache_errors.InitializationError(self.__class__, e) self._ensure_access(self._db_path)
def __init__(self, *args, **config): super(database,self).__init__(*args, **config) default_db = config.get("dbtype","anydbm") if not default_db.startswith("."): default_db = '.' + default_db self._db_path = os.path.join(self.location, fs_template.gen_label(self.location, self.label)+default_db) self.__db = None mode = "w" if dbm.whichdb(self._db_path) in ("dbm.gnu", "gdbm"): # Allow multiple concurrent writers (see bug #53607). mode += "u" try: # dbm.open() will not work with bytes in python-3.1: # TypeError: can't concat bytes to str self.__db = dbm.open(self._db_path, mode, self._perms) except dbm.error: # XXX handle this at some point try: self._ensure_dirs() self._ensure_dirs(self._db_path) except (OSError, IOError) as e: raise cache_errors.InitializationError(self.__class__, e) # try again if failed try: if self.__db == None: # dbm.open() will not work with bytes in python-3.1: # TypeError: can't concat bytes to str if gdbm is None: self.__db = dbm.open(self._db_path, "c", self._perms) else: # Prefer gdbm type if available, since it allows # multiple concurrent writers (see bug #53607). self.__db = gdbm.open(self._db_path, "cu", self._perms) except dbm.error as e: raise cache_errors.InitializationError(self.__class__, e) self._ensure_access(self._db_path)
def open_shelf(path): """ Opens a python shelf file, used to store various types of metadata """ shelve_compat.ensure_shelve_compat() # As of Exaile 4, new DBs will only be created as Berkeley DB Hash databases # using either bsddb3 (external) or bsddb (stdlib but sometimes removed). # Existing DBs created with other backends will be migrated to Berkeley DB. # We do this because BDB is generally considered more performant, # and because gdbm currently doesn't work at all in MSYS2. # Some DBM modules don't use the path we give them, but rather they have # multiple filenames. If the specified path doesn't exist, double check # to see if whichdb returns a result before trying to open it with bsddb force_migrate = False if not os.path.exists(path): from dbm import whichdb if whichdb(path) is not None: force_migrate = True if not force_migrate: try: db = bsddb.hashopen(path, 'c') return shelve.BsdDbShelf(db, protocol=PICKLE_PROTOCOL) except bsddb.db.DBInvalidArgError: logger.warning("%s was created with an old backend, migrating it", path) except Exception: raise # special case: zero-length file if not force_migrate and os.path.getsize(path) == 0: os.unlink(path) else: from xl.migrations.database.to_bsddb import migrate migrate(path) db = bsddb.hashopen(path, 'c') return shelve.BsdDbShelf(db, protocol=PICKLE_PROTOCOL)
def depfile(request): if hasattr(request, 'param'): dep_class = request.param else: dep_class = Dependency # copied from tempdir plugin name = request._pyfuncitem.name name = py.std.re.sub("[\W]", "_", name) my_tmpdir = request.config._tmpdirhandler.mktemp(name, numbered=True) dep_file = dep_class(os.path.join(my_tmpdir.strpath, "testdb")) dep_file.whichdb = whichdb(dep_file.name) dep_file.name_ext = db_ext.get(dep_file.whichdb, ['']) def remove_depfile(): if not dep_file._closed: dep_file.close() remove_db(dep_file.name) request.addfinalizer(remove_depfile) return dep_file
def whichdb(filename): """Guess which db package to use to open a db file. Return values: - None if the database file can't be read; - empty string if the file can be read but can't be recognized - the name of the dbm submodule (e.g. 'ndbm' or 'gnu') if recognized. Importing the given module may still fail, and opening the database using that module may still fail. - Actually it is a bit extended form of `dbm.whichdb` that accounts for `sqlite3` """ tst = dbm.whichdb(filename) if tst or tst is None: return tst if issqlite3(filename): return "sqlite3" return tst
def sync(self): # Check here in case we're called after close by plugins that # don't know better. if self.shelf == None: log.debug("No shelf.") return for key in self.cache: self.shelf[key] = self.cache[key] if self.caching == CACHE_OFF or\ (self.caching == CACHE_ON_CONNS and not self.has_conns): self.cache = {} log.debug("Unloaded.") self.shelf.sync() log.debug("Synced.") if dbm.whichdb(self.filename) == 'dbm.gnu': self.shelf.close() self._reorganize() self.open()
env.B(target = 'subdir/f3.out', source = 'subdir/f3.in') env.B(target = 'subdir/f4.out', source = 'subdir/f4.in') """ % locals()) test.write('f1.in', "f1.in\n") test.write('f2.in', "f2.in\n") test.write(['subdir', 'f3.in'], "subdir/f3.in\n") test.write(['subdir', 'f4.in'], "subdir/f4.in\n") test.run() # We don't check for explicit .db or other file, because base "dbm" # can use different file extensions on different implementations. test.fail_test(os.path.exists('.sconsign') and 'dbm' not in dbm.whichdb('.sconsign'), message=".sconsign existed and wasn't any type of dbm file") test.must_not_exist(test.workpath('.sconsign.dblite')) test.must_not_exist(test.workpath('subdir', '.sconsign')) test.must_not_exist(test.workpath('subdir', '.sconsign.dblite')) test.must_match('f1.out', "f1.in\n") test.must_match('f2.out', "f2.in\n") test.must_match(['subdir', 'f3.out'], "subdir/f3.in\n") test.must_match(['subdir', 'f4.out'], "subdir/f4.in\n") test.up_to_date(arguments='.') test.fail_test(os.path.exists('.sconsign') and 'dbm' not in dbm.whichdb('.sconsign'), message=".sconsign existed and wasn't any type of dbm file")
# dbm_whichdb.py import dbm print(dbm.whichdb('/tmp/example.db'))
# then opens them with the appropriate module. It is used as a backend # for shelve, which knows how to store objects using pickle. import dbm import os # https://pymotw.com/2/anydbm/ # Creating a new database db1 = dbm.open('anydbm.db', 'c') # 'c' create a new database when necessary, # 'n' create always db1['key'] = 'value' db1['today'] = 'Sunday' db1['author'] = 'Doug' db1.close() print(dbm.whichdb("anydbm.db")) # dbm.gnu # Open an existing database db2 = dbm.open('anydbm.db', 'r') try: print('keys():', db2.keys()) # keys(): [b'today', b'author', b'key'] k = db2.firstkey() while k is not None: v = db2[k] print('iterating:', k, v) # iterating: b'today' b'Sunday' k = db2.nextkey(k) # iterating: b'author' b'Doug' # iterating: b'key' b'value' print('db["author"] =', db2['author']) # db["author"] = b'Doug' finally: db2.close()
sg.theme('DarkGrey') # DarkGrey - grey25 # DarkGrey5 - grey20 # DarkGray2 - grey17 ip_bgcolor = 'orange3' #blanched almond, orange3 # Define the window's contents play_image = 'a.png' pause_image = 'p.png' icon_image = 'tw_icon.icns' font = 'Andale Mono' guiA = 0 guiB = 0 if dbm.whichdb('tw_db') == None: username = '******' else: # create database for pure strings only db = dbm.open('tw_db', 'c') username_b = db['username'] username = str(username_b, "utf-8") db.close() if dbm.whichdb('shelve_tw_db') == None: listKT = ['Select...'] # Comment list for storing project comments else: # create shelf database for string lists and other objects sh = shelve.open('shelve_tw_db', 'c') listKT = sh['list']
def strip_extension(db_filename): """Strips the underlying database extension for ndbm databases""" ndbm_filename = os.path.splitext(db_filename)[0] return ndbm_filename \ if dbm.whichdb(ndbm_filename) == 'dbm.ndbm' \ else db_filename
def whichdb(filename): """Guess which db package to use to open a db file. Return values: - None if the database file can't be read; - empty string if the file can be read but can't be recognized - the name of the dbm submodule (e.g. 'ndbm' or 'gnu') if recognized. Importing the given module may still fail, and opening the database using that module may still fail. - Actually it is a bit extended form of `dbm.whichdb` that accounts for `bsddb3` and `sqlite3` """ ## use the standard function tst = dbm.whichdb(filename) ## identified or non-existing DB ? if tst or tst is None: return tst ## non-identified DB ## check for bsddb magic numbers (from python2) try: with io.open(filename, 'rb') as f: # Read the start of the file -- the magic number s16 = f.read(16) except OSError: return None s = s16[0:4] # Return "" if not at least 4 bytes if len(s) != 4: return "" # Convert to 4-byte int in native byte order -- return "" if impossible try: (magic, ) = struct.unpack("=l", s) except struct.error: return "" # Check for GNU dbm if magic in (0x13579ace, 0x13579acd, 0x13579acf): return "dbm.gnu" # Check for old Berkeley db hash file format v2 if magic in (0x00061561, 0x61150600): return "bsddb185" # Later versions of Berkeley db hash file have a 12-byte pad in # front of the file type try: (magic, ) = struct.unpack("=l", s16[-4:]) except struct.error: return "" # Check for BSD hash if magic in (0x00061561, 0x61150600): return "bsddb3" if issqlite3(filename): return 'sqlite3' # Unknown return ""
#!/usr/bin/env python3 # encoding: utf-8 # # Copyright (c) 2008 Doug Hellmann All rights reserved. # """ """ #end_pymotw_header import dbm print(dbm.whichdb('/tmp/example.db'))
import dbm # Open database, creating it if necessary. with dbm.open('cache', 'c') as db: # Record some values db[b'hello'] = b'there' db['www.python.org'] = 'Python Website' db['www.cnn.com'] = 'Cable News Network' # Note that the keys are considered bytes now. assert db[b'www.python.org'] == b'Python Website' # Notice how the value is now in bytes. assert db['www.cnn.com'] == b'Cable News Network' # Often-used methods of the dict interface work too. print(db.get('python.org', b'not present')) # Storing a non-string key or value will raise an exception (most # likely a TypeError). To fix use bytes(4) # db['www.yahoo.com'] = 4 key = db.firstkey() while key != None: print(key) key = db.nextkey(key) # db is automatically closed when leaving the with statement. # https://docs.python.org/3/library/dbm.html#dbm.whichdb print("dbm.whichdb(filename)=%s" % (dbm.whichdb('cache')))
def loadvars(*args): """ LOADVARS - function to load variables to a file. This function loads one or more variables from a file. The names of the variables must be supplied. If more than one variable is specified, it may be done with a list of names or a dictionary of name as keys. The output type will correspond to the input type. All the variables in the file may be loaded by specifying only the file name. Usage: a=loadvars('shelve.dat','a') [a,b]=loadvars('shelve.dat',['a','b']) nvdict=loadvars('shelve.dat',{'a':None,'b':None}) nvdict=loadvars('shelve.dat') """ filename = '' nvdict = {} if len(args) >= 1 and isinstance(args[0], str): filename = args[0] if not filename: filename = '/tmp/shelve.dat' else: raise TypeError("Missing file name.") if len(args) >= 2 and isinstance(args[1], str): # (filename,name) for name in args[1:]: nvdict[name] = None elif len(args) == 2 and isinstance(args[1], list): # (filename,[names]) for name in args[1]: nvdict[name] = None elif len(args) == 2 and isinstance(args[1], dict): # (filename,{names:values}) nvdict = args[1] elif len(args) == 1: # (filename) pass else: raise TypeError("Unrecognized input arguments.") if whichdb(filename): print("Loading variables from file '%s'." % filename) else: raise IOError("File '%s' not found." % filename) my_shelf = shelve.open(filename, 'r') # 'r' for read-only if nvdict: for name in nvdict.keys(): try: nvdict[name] = my_shelf[name] print("Variable '%s' loaded." % name) except KeyError: value = None print("Variable '%s' not found." % name) else: for name in my_shelf.keys(): nvdict[name] = my_shelf[name] print("Variable '%s' loaded." % name) my_shelf.close() if len(args) >= 2 and isinstance(args[1], str): # (value) value = [nvdict[name] for name in args[1:]] return value elif len(args) == 2 and isinstance(args[1], list): # ([values]) value = [nvdict[name] for name in args[1]] return value elif (len(args) == 2 and isinstance(args[1], dict)) or (len(args) == 1): # ({names:values}) return nvdict
import dbm db = dbm.open('Bookmark', 'c') print(dbm.whichdb('Bookmark')) #db['MyBlog'] = 'jonathanlife.sinaapp.com' try: print(db['MyBlog2']) except KeyError as err: print(err) #保存,关闭 db.close()
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: dbm.open(file, flag='r', mode=0o666) # Открывает и возвращает хранилище # flag - режим работы 'r' # открытик существующей базы только для чтение (по умалчанию) 'w' # открытие существующей базы только для запись 'c' # открытие файла для записи и чтения, если файла нет, то # создаст новый 'n' # всегда создаёт новую, пустую базу данных, открытую для # чтения и записи # mode - устанавливает права доступа к файлу dbm.whichdb(filename) # Эта функция пытается угадать, какой из нескольких простых # модулей базы данных доступен - dbm.gnu, dbm.ndbm или dbm.dumb - # следует использовать для открытия данного файла. # Возвращает одно из следующих значений: None, если файл нельзя # открыть, потому что он не читается или не существует; # пустая строка (''), если формат файла не может быть догадан; # или строку, содержащую требуемое имя модуля, например # «dbm.ndbm» или «dbm.gnu». dbm.close() # закрытие базы
whichdb 模块 whichdb 模块可以判断给定数据库文件的格式 Notes Python 2 Python 3 import dbm import dbm.ndbm import gdbm import dbm.gnu import dbhash import dbm.bsd import dumbdbm import dbm.dumb import anydbm import dbm import whichdb import dbm ''' import dbm filename = 'database' result = dbm.whichdb(filename) print(result) if result: print('file created by', result) handler = __import__(result) db = handler.open(filename, 'r') print(db.keys()) else: if result is None: print("cannot read database file", filename) else: print("cannot identify database file", filename) db = None '''
whichdb 模块 whichdb 模块可以判断给定数据库文件的格式 Notes Python 2 Python 3 import dbm import dbm.ndbm import gdbm import dbm.gnu import dbhash import dbm.bsd import dumbdbm import dbm.dumb import anydbm import dbm import whichdb import dbm ''' import dbm filename = 'database' result = dbm.whichdb(filename) print(result) if result: print('file created by', result) handler = __import__(result) db = handler.open(filename, 'r') print(db.keys()) else: if result is None: print("cannot read database file", filename) else: print("cannot identify database file", filename) db = None ''' dbm.dumb
nodeinfo_string = nodeinfo_raw elif o in ('-r', '--readable'): Readable = 1 elif o in ('-s', '--size'): Print_Flags['size'] = 1 elif o in ('-t', '--timestamp'): Print_Flags['timestamp'] = 1 elif o in ('-v', '--verbose'): Verbose = 1 if Do_Call: for a in args: Do_Call(a) else: for a in args: dbm_name = dbm.whichdb(a) if dbm_name: Map_Module = {'SCons.dblite' : 'dblite'} dbm = my_import(dbm_name) Do_SConsignDB(Map_Module.get(dbm_name, dbm_name), dbm)(a) else: Do_SConsignDir(a) sys.exit(0) # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4: