def check_open_empty(self): name = mktemp() f = open(name, 'w') f.close() s = FileStorage(name) s.close() unlink(name)
def start_durus(host, port, logfilename, dbfilename): logfile = open(logfilename, 'a+') direct_output(logfile) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def convert(zodb_file_name, durus_file_name): """Read a ZODB FileStorage and write a new Durus FileStorage.""" def generate_durus_object_records(): sio = cStringIO.StringIO() zodb_storage = ZODBFileStorage(zodb_file_name) n = 0 for oid in zodb_storage._index.keys(): n += 1 if n % 10000 == 0: sys.stdout.write('.') sys.stdout.flush() p, serial = zodb_storage.load(oid, '') refs = referencesf(p) # unwrap extra tuple from class meta data sio.seek(0) sio.write(p) sio.truncate() sio.seek(0) def get_class(module_class): module, klass = module_class if module not in sys.modules: __import__(module) return getattr(sys.modules[module], klass) class PersistentRef: def __init__(self, v): oid, module_class = v self.oid_klass = (oid, get_class(module_class)) unpickler = cPickle.Unpickler(sio) unpickler.persistent_load = lambda v: PersistentRef(v) class_meta = unpickler.load() class_meta, extra = class_meta assert extra is None object_state = unpickler.load() if type(object_state) == dict and '_container' in object_state: assert 'data' not in object_state object_state['data'] = object_state['_container'] del object_state['_container'] sio.seek(0) sio.truncate() cPickle.dump(get_class(class_meta), sio, 2) pickler = cPickle.Pickler(sio, 2) def persistent_id(v): if isinstance(v, PersistentRef): return v.oid_klass return None pickler.persistent_id = persistent_id pickler.dump(object_state) record = pack_record(oid, sio.getvalue(), ''.join(refs)) yield record print print n, 'objects written' if os.path.exists(durus_file_name): os.unlink(durus_file_name) durus_storage = FileStorage(durus_file_name) durus_storage._write_transaction(durus_storage.fp, generate_durus_object_records()) durus_storage.fp.close()
def check_bad_record_size(self): name = mktemp() f = open(name, 'wb') g = FileStorage(name) f.seek(0, 2) write_int4_str(f, 'ok') g.close() f.close() raises(ShortRead, FileStorage, name) unlink(name)
def check_reopen(self): f = TempFileStorage() filename = f.get_filename() if os.name == 'nt': f.close() # don't try to re-open an open file on windows return g = FileStorage(filename, readonly=True) raises(IOError, FileStorage, filename) f.close() g.close()
def start_durus(host, port, logfilename, dbfilename): logfile = open(logfilename, 'a+') direct_output(logfile) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) """ In Durus 2.6, there was a fp.name member data. However it doesn't exist in Durus 2.7. However, in both 2.6 and 2.7, there is a get_filename() method that will work. #log(20, 'Storage file=%s host=%s port=%s', storage.fp.name, host, port) """ log(20, 'Storage file=%s host=%s port=%s',storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def startDurus(host, port, logfilename, dbfilename): """Start and initialize the Durus server component. Also opens a log file. """ lf = logfile.open(logfilename, 50000) direct_output(lf) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
class TestBackend: def __init__(self, filename, mode): self.mode = mode if mode == "w": self.storage = FileStorage(filename) self.connection = Connection(self.storage) self.test_db_items = self.connection.get_root() elif mode == "r": self.storage = FileStorage(filename) self.connection = Connection(self.storage) self.test_db_items = self.connection.get_root() self.next_rec_num = 0 # Initialise next record counter self.num_records = len(self.test_db_items) def __setitem__(self, key, value): self.test_db_items[key] = value def __getitem__(self, key): return self.test_db_items[str(key)] def __len__(self): return len(self.test_db_items) def first(self): return self.test_db_items[0] def iteritems(self): while(self.next_rec_num < self.num_records): value = self.test_db_items[self.next_rec_num] self.next_rec_num += 1 yield value def close(self): self.connection.commit() self.storage.close() def getTestDBItems(self): return self.test_db_items.values()
def __init__(self, filename, mode): self.mode = mode if mode == "w": self.storage = FileStorage(filename) self.connection = Connection(self.storage) self.test_db_items = self.connection.get_root() elif mode == "r": self.storage = FileStorage(filename) self.connection = Connection(self.storage) self.test_db_items = self.connection.get_root() self.next_rec_num = 0 # Initialise next record counter self.num_records = len(self.test_db_items)
def __init__(self, storage, cache_size=100000, root_class=None): """(storage:Storage|str, cache_size:int=100000, root_class:class|None=None) Make a connection to `storage`. Set the target number of non-ghosted persistent objects to keep in the cache at `cache_size`. If there is no root object yet, create it as an instance of the root_class (or PersistentDict, if root_class is None), calling the constructor with no arguments. Also, if the root_class is not None, verify that this really is the class of the root object. """ if isinstance(storage, str): from durus.file_storage import FileStorage storage = FileStorage(storage) assert isinstance(storage, durus.storage.Storage) self.storage = storage self.reader = ObjectReader(self) self.changed = {} self.invalid_oids = set() self.new_oid = storage.new_oid # needed by serialize self.cache = Cache(cache_size) self.root = self.get(ROOT_OID) if self.root is None: new_oid = self.new_oid() assert ROOT_OID == new_oid self.root = self.get_cache().get_instance( ROOT_OID, root_class or PersistentDict, self) self.root._p_set_status_saved() self.root.__class__.__init__(self.root) self.root._p_note_change() self.commit() assert root_class in (None, self.root.__class__)
def interactive_client(file, address, cache_size, readonly, repair, startup): if file: storage = FileStorage(file, readonly=readonly, repair=repair) description = file else: socket_address = SocketAddress.new(address) wait_for_server(address=socket_address) storage = ClientStorage(address=socket_address) description = socket_address connection = Connection(storage, cache_size=cache_size) console_module = ModuleType('__console__') sys.modules['__console__'] = console_module namespace = { 'connection': connection, 'root': connection.get_root(), 'get': connection.get, 'sys': sys, 'os': os, 'int8_to_str': int8_to_str, 'str_to_int8': str_to_int8, 'pp': pprint } vars(console_module).update(namespace) configure_readline(vars(console_module), os.path.expanduser("~/.durushistory")) console = InteractiveConsole(vars(console_module)) if startup: console.runsource('execfile("%s")' % os.path.expanduser(startup)) help = (' connection -> the Connection\n' ' root -> the root instance') console.interact('Durus %s\n%s' % (description, help))
def __init__(self, file, new): self.__file = file if new: if os.path.exists(self.__file): os.remove(self.__file) self.__connection = Connection(FileStorage(self.__file)) self.__root = self.__connection.get_root()
def run_trials(): numTrials = 3000 gens = 1000 from multiprocessing.pool import ThreadPool as Pool pool = Pool(50) jids = pool.map(f, [gens] * numTrials) print "Done spawning trials. Retrieving results..." results = pool.map(cloud_result, jids) firstLocusFreqsHists = zeros((numTrials, gens + 1), dtype='float') lastLocusFreqsHists = zeros((numTrials, gens + 1), dtype='float') print "Done retrieving results. Press Enter to serialize..." raw_input() for i, result in enumerate(results): firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists) conn.commit() pool.close() pool.join()
def render_results(timestamp=None): with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) db = conn.get_root() if not timestamp: timestamp = sorted(db.keys())[-1] firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp] print "Done deserializing results. Plotting..." x = [(2, 'First', firstLocusFreqsHists, "effective"), (3, 'Last', lastLocusFreqsHists, "non-effective")] for i, pos, freqsHists, filename in x: freqsHists = freqsHists[:, :801] f = figure(i) hold(False) plot(transpose(freqsHists), color='grey') hold(True) maxGens = freqsHists.shape[1] - 1 plot([0, maxGens], [.05, .05], 'k--') plot([0, maxGens], [.95, .95], 'k--') axis([0, maxGens, 0, 1]) xlabel('Generation') ylabel('1-Frequency of the ' + pos + ' Locus') f.canvas.draw() f.show() savefig(filename + '.png', format='png', dpi=200)
def copy_to_new_format(from_file, to_file, format): tmp_file_name = "%s.%s.tmp" % (to_file, datetime.now()) if format == 1: to_storage = FileStorage1(tmp_file_name) elif format == 2: to_storage = FileStorage2(tmp_file_name) from_storage = FileStorage(from_file, readonly=True) duplicate_file_storage(from_storage, to_storage) old_num_records = len(from_storage.index) assert len(to_storage.index) == old_num_records from_storage.close() repickle_storage(to_storage) assert len(to_storage.index) > max(1, old_num_records/2) move_to_backup(to_file) rename(tmp_file_name, to_file)
def pack_storage_main(): parser = OptionParser() parser.set_description("Packs a Durus storage.") parser.add_option( '--file', dest="file", default=None, help="If this is not given, the storage is through a Durus server.") parser.add_option('--port', dest="port", default=DEFAULT_PORT, type="int", help="Port the server is on. (default=%s)" % DEFAULT_PORT) parser.add_option('--host', dest="host", default=DEFAULT_HOST, help="Host of the server. (default=%s)" % DEFAULT_HOST) (options, args) = parser.parse_args() if options.file is None: wait_for_server(options.host, options.port) storage = ClientStorage(host=options.host, port=options.port) else: storage = FileStorage(options.file) connection = Connection(storage) connection.pack()
def __init__(self): self._conn = Connection(FileStorage(PROJECTS_DATA_PATH)) self._data = self._conn.get_root() if not len(self._data.keys()): self._data["Default"] = PersistentDict( autocomplete=PersistentDict()) self.sync()
class DurusStore(SyncStore): '''Class for Durus object database frontend.''' init = 'durus://' def __init__(self, engine, **kw): super(DurusStore, self).__init__(engine, **kw) self._db = FileStorage(self._engine) self._connection = Connection(self._db) self.sync = self._connection.commit self._store = self._connection.get_root() def close(self): '''Closes all open storage and connections.''' self.sync() self._db.close() super(DurusStore, self).close()
def get_config(): connection = Connection(FileStorage("/var/tmp/test.durus")) root = connection.get_root() # connection set as shown above. if not root.has_key("_pconfig"): cf = GenericConfiguration() root["_pconfig"] = cf root["_pconfig"]["default"] = SECTION() connection.commit() return root["_pconfig"]
def b(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 10 new_oid = s.new_oid() assert new_oid == deleted_oid new_oid = s.new_oid() assert new_oid == int8_to_str(11)
def __init__(self): # durus file storage self.conndurus = Connection(FileStorage(CONFIG['durus_file'])) root = self.conndurus.get_root() if not root.get('users'): root['users'] = PersistentDict() # {user jid: CUser} if not root.get('feeds'): root['feeds'] = CFeeds() self.data = root['users'] self.feeds = root['feeds'] self.save()
def check_repair(self): name = mktemp() g = FileStorage(name) g.close() f = open(name, 'r+b') f.seek(0, 2) p = f.tell() f.write(as_bytes('b')) f.flush() raises(ShortRead, FileStorage, name, readonly=True) h = FileStorage(name, repair=True) f.seek(0, 2) assert p == f.tell() f.close() h.close() unlink(name)
def c(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() r.clear() c.commit() c.pack() c.abort() new_oid = s.new_oid() assert new_oid == int8_to_str(1), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(2), repr(new_oid)
def create_durus_publisher(): global connection filename = os.path.join(tempfile.gettempdir(), 'quixote-demo.durus') print('Opening %r as a Durus database.' % filename) connection = Connection(FileStorage(filename)) root = connection.get_root() session_manager = root.get('session_manager', None) if session_manager is None: session_manager = PersistentSessionManager() connection.get_root()['session_manager'] = session_manager connection.commit() return Publisher(RootDirectory(), session_manager=session_manager, display_exceptions='plain')
def a(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oids = [ r['a0']._p_oid, r['a2']._p_oid, r['a7']._p_oid, r['a8']._p_oid ] del r['a0'] del r['a2'] del r['a7'] del r['a8'] c.commit() c.pack() c.abort() assert c.get(deleted_oids[0])._p_is_ghost() assert c.get(deleted_oids[1])._p_is_ghost() raises(KeyError, getattr, c.get(deleted_oids[0]), 'a') assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 7 c.commit() c.pack() new_oid = s.new_oid() assert new_oid == deleted_oids[-1], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-2], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-3], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-4], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == int8_to_str(11), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(12), repr(new_oid)
def a(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oids = [ r['a0']._p_oid, r['a2']._p_oid, r['a7']._p_oid, r['a8']._p_oid] del r['a0'] del r['a2'] del r['a7'] del r['a8'] c.commit() c.pack() c.abort() assert c.get(deleted_oids[0])._p_is_ghost() assert c.get(deleted_oids[1])._p_is_ghost() raises(ReadConflictError, getattr, c.get(deleted_oids[0]), 'a') assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 7 c.commit() c.pack() new_oid = s.new_oid() assert new_oid == deleted_oids[-1], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-2], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-3], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-4], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == int8_to_str(11), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(12), repr(new_oid)
def __init__(self, engine, **kw): super(DurusStore, self).__init__(engine, **kw) self._db = FileStorage(self._engine) self._connection = Connection(self._db) self.sync = self._connection.commit self._store = self._connection.get_root()
def get_storage(file, repair, readonly): if file: return FileStorage(file, repair=repair, readonly=readonly) else: return TempFileStorage()
def __init__(self, filename): self._connection = Connection(FileStorage(filename))
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def read(self): if os.path.isfile(self.filename): self.db = Connection(FileStorage(self.filename))
def __init__(self, storagefile="default.sav"): self.storage = Connection(FileStorage(storagefile)) self.root = self.storage.get_root() self.running = False
def open_db(path_to_msf): connection = Connection(FileStorage(path_to_msf)) return connection
def __init__(self): self._conn = Connection(FileStorage(SLACKING_DATA_PATH)) self._data = self._conn.get_root()
def showExperimentTimeStamps(): with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) return conn.get_root().keys()
import sys import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', stream=file('log.txt', 'w')) def construct_mud(objstorethunk): """Construct a MUD factory.""" return ConnectionFactory(objstorethunk) def run_mud(mud, port): """Run the MUD factory.""" reactor.listenTCP(port, mud) mud.ticker.add_command(commit_gameworld) mud_ticker.add_command(event_flusher) mud.ticker.start() logging.info("OK, setup done, handing you over to the reactor's loop!") sys.stdout.write("Server is up and running.") reactor.run() if __name__ == '__main__': #this needs to be wrapped in a lambda, because Durus is quite eager to load #stuff. If it wasn't so eager, the ConnectionFactory would just pull what #it needed when, rather than getting silly errors on the next line. connection = lambda: Connection(FileStorage("mudlib.durus")) run_mud(construct_mud(connection), 6666)
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record( int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)