def __init__(self, storage, cache_size=100000, root_class=None): """(storage:Storage|str, cache_size:int=100000, root_class:class|None=None) Make a connection to `storage`. Set the target number of non-ghosted persistent objects to keep in the cache at `cache_size`. If there is no root object yet, create it as an instance of the root_class (or PersistentDict, if root_class is None), calling the constructor with no arguments. Also, if the root_class is not None, verify that this really is the class of the root object. """ if isinstance(storage, str): from durus.file_storage import FileStorage storage = FileStorage(storage) assert isinstance(storage, durus.storage.Storage) self.storage = storage self.reader = ObjectReader(self) self.changed = {} self.invalid_oids = set() self.new_oid = storage.new_oid # needed by serialize self.cache = Cache(cache_size) self.root = self.get(ROOT_OID) if self.root is None: new_oid = self.new_oid() assert ROOT_OID == new_oid self.root = self.get_cache().get_instance( ROOT_OID, root_class or PersistentDict, self) self.root._p_set_status_saved() self.root.__class__.__init__(self.root) self.root._p_note_change() self.commit() assert root_class in (None, self.root.__class__)
def check_open_empty(self): name = mktemp() f = open(name, 'w') f.close() s = FileStorage(name) s.close() unlink(name)
def interactive_client(file, address, cache_size, readonly, repair, startup): if file: storage = FileStorage(file, readonly=readonly, repair=repair) description = file else: socket_address = SocketAddress.new(address) wait_for_server(address=socket_address) storage = ClientStorage(address=socket_address) description = socket_address connection = Connection(storage, cache_size=cache_size) console_module = ModuleType('__console__') sys.modules['__console__'] = console_module namespace = { 'connection': connection, 'root': connection.get_root(), 'get': connection.get, 'sys': sys, 'os': os, 'int8_to_str': int8_to_str, 'str_to_int8': str_to_int8, 'pp': pprint } vars(console_module).update(namespace) configure_readline(vars(console_module), os.path.expanduser("~/.durushistory")) console = InteractiveConsole(vars(console_module)) if startup: console.runsource('execfile("%s")' % os.path.expanduser(startup)) help = (' connection -> the Connection\n' ' root -> the root instance') console.interact('Durus %s\n%s' % (description, help))
def check_file_storage(self): name = mktemp() b = FileStorage(name) assert b.new_oid() == int8_to_str(0) assert b.new_oid() == int8_to_str(1) assert b.new_oid() == int8_to_str(2) raises(KeyError, b.load, int8_to_str(0)) record = pack_record(int8_to_str(0), as_bytes('ok'), as_bytes('')) b.begin() b.store(int8_to_str(0), record) b.end() b.sync() b.begin() b.store(int8_to_str(1), pack_record(int8_to_str(1), as_bytes('no'), as_bytes(''))) b.end() assert len(list(b.gen_oid_record(start_oid=int8_to_str(0)))) == 1 assert len(list(b.gen_oid_record())) == 2 b.pack() b.close() unlink(name + '.prepack') raises(ValueError, b.pack) # storage closed unlink(name + '.pack') raises(ValueError, b.load, int8_to_str(0)) # storage closed unlink(name)
def run_trials(): numTrials = 3000 gens = 1000 from multiprocessing.pool import ThreadPool as Pool pool = Pool(50) jids = pool.map(f, [gens] * numTrials) print "Done spawning trials. Retrieving results..." results = pool.map(cloud_result, jids) firstLocusFreqsHists = zeros((numTrials, gens + 1), dtype='float') lastLocusFreqsHists = zeros((numTrials, gens + 1), dtype='float') print "Done retrieving results. Press Enter to serialize..." raw_input() for i, result in enumerate(results): firstLocusFreqsHists[i, :], lastLocusFreqsHists[i, :] = result with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) conn.get_root()[str(int(floor(time.time())))] = (firstLocusFreqsHists, lastLocusFreqsHists) conn.commit() pool.close() pool.join()
def __init__(self, file, new): self.__file = file if new: if os.path.exists(self.__file): os.remove(self.__file) self.__connection = Connection(FileStorage(self.__file)) self.__root = self.__connection.get_root()
def render_results(timestamp=None): with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) db = conn.get_root() if not timestamp: timestamp = sorted(db.keys())[-1] firstLocusFreqsHists, lastLocusFreqsHists = db[timestamp] print "Done deserializing results. Plotting..." x = [(2, 'First', firstLocusFreqsHists, "effective"), (3, 'Last', lastLocusFreqsHists, "non-effective")] for i, pos, freqsHists, filename in x: freqsHists = freqsHists[:, :801] f = figure(i) hold(False) plot(transpose(freqsHists), color='grey') hold(True) maxGens = freqsHists.shape[1] - 1 plot([0, maxGens], [.05, .05], 'k--') plot([0, maxGens], [.95, .95], 'k--') axis([0, maxGens, 0, 1]) xlabel('Generation') ylabel('1-Frequency of the ' + pos + ' Locus') f.canvas.draw() f.show() savefig(filename + '.png', format='png', dpi=200)
def check_repair(self): name = mktemp() g = FileStorage(name) g.close() f = open(name, 'r+b') f.seek(0, 2) p = f.tell() f.write(as_bytes('b')) f.flush() raises(ShortRead, FileStorage, name, readonly=True) h = FileStorage(name, repair=True) f.seek(0, 2) assert p == f.tell() f.close() h.close() unlink(name)
def pack_storage_main(): parser = OptionParser() parser.set_description("Packs a Durus storage.") parser.add_option( '--file', dest="file", default=None, help="If this is not given, the storage is through a Durus server.") parser.add_option('--port', dest="port", default=DEFAULT_PORT, type="int", help="Port the server is on. (default=%s)" % DEFAULT_PORT) parser.add_option('--host', dest="host", default=DEFAULT_HOST, help="Host of the server. (default=%s)" % DEFAULT_HOST) (options, args) = parser.parse_args() if options.file is None: wait_for_server(options.host, options.port) storage = ClientStorage(host=options.host, port=options.port) else: storage = FileStorage(options.file) connection = Connection(storage) connection.pack()
def start_durus(host, port, logfilename, dbfilename): logfile = open(logfilename, 'a+') direct_output(logfile) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def __init__(self): self._conn = Connection(FileStorage(PROJECTS_DATA_PATH)) self._data = self._conn.get_root() if not len(self._data.keys()): self._data["Default"] = PersistentDict( autocomplete=PersistentDict()) self.sync()
def get_config(): connection = Connection(FileStorage("/var/tmp/test.durus")) root = connection.get_root() # connection set as shown above. if not root.has_key("_pconfig"): cf = GenericConfiguration() root["_pconfig"] = cf root["_pconfig"]["default"] = SECTION() connection.commit() return root["_pconfig"]
def check_reopen(self): f = TempFileStorage() filename = f.get_filename() if os.name == 'nt': f.close() # don't try to re-open an open file on windows return g = FileStorage(filename, readonly=True) raises(IOError, FileStorage, filename) f.close() g.close()
def check_bad_record_size(self): name = mktemp() f = open(name, 'wb') g = FileStorage(name) f.seek(0, 2) write_int4_str(f, 'ok') g.close() f.close() raises(ShortRead, FileStorage, name) unlink(name)
def startDurus(host, port, logfilename, dbfilename): """Start and initialize the Durus server component. Also opens a log file. """ lf = logfile.open(logfilename, 50000) direct_output(lf) logger.setLevel(9) storage = FileStorage(dbfilename, repair=False, readonly=False) log(20, 'Storage file=%s host=%s port=%s', storage.get_filename(), host, port) StorageServer(storage, host=host, port=port).serve()
def __init__(self): # durus file storage self.conndurus = Connection(FileStorage(CONFIG['durus_file'])) root = self.conndurus.get_root() if not root.get('users'): root['users'] = PersistentDict() # {user jid: CUser} if not root.get('feeds'): root['feeds'] = CFeeds() self.data = root['users'] self.feeds = root['feeds'] self.save()
def create_durus_publisher(): global connection filename = os.path.join(tempfile.gettempdir(), 'quixote-demo.durus') print('Opening %r as a Durus database.' % filename) connection = Connection(FileStorage(filename)) root = connection.get_root() session_manager = root.get('session_manager', None) if session_manager is None: session_manager = PersistentSessionManager() connection.get_root()['session_manager'] = session_manager connection.commit() return Publisher(RootDirectory(), session_manager=session_manager, display_exceptions='plain')
def b(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 10 new_oid = s.new_oid() assert new_oid == deleted_oid new_oid = s.new_oid() assert new_oid == int8_to_str(11)
def a(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oids = [ r['a0']._p_oid, r['a2']._p_oid, r['a7']._p_oid, r['a8']._p_oid ] del r['a0'] del r['a2'] del r['a7'] del r['a8'] c.commit() c.pack() c.abort() assert c.get(deleted_oids[0])._p_is_ghost() assert c.get(deleted_oids[1])._p_is_ghost() raises(KeyError, getattr, c.get(deleted_oids[0]), 'a') assert len([repr(oid) for oid, record in s.gen_oid_record()]) == 7 c.commit() c.pack() new_oid = s.new_oid() assert new_oid == deleted_oids[-1], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-2], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-3], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == deleted_oids[-4], (new_oid, deleted_oids) new_oid = s.new_oid() assert new_oid == int8_to_str(11), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(12), repr(new_oid)
def c(self): f = File(prefix='shelftest') name = f.get_name() f.close() s = FileStorage(name) c = Connection(s) r = c.get_root() for x in range(10): r["a%s" % x] = Persistent() c.commit() deleted_oid = r['a9']._p_oid del r['a9'] c.commit() c.pack() c.abort() r.clear() c.commit() c.pack() c.abort() new_oid = s.new_oid() assert new_oid == int8_to_str(1), repr(new_oid) new_oid = s.new_oid() assert new_oid == int8_to_str(2), repr(new_oid)
def __init__(self): self._conn = Connection(FileStorage(SLACKING_DATA_PATH)) self._data = self._conn.get_root()
def __init__(self, filename): self._connection = Connection(FileStorage(filename))
def __init__(self, engine, **kw): super(DurusStore, self).__init__(engine, **kw) self._db = FileStorage(self._engine) self._connection = Connection(self._db) self.sync = self._connection.commit self._store = self._connection.get_root()
def open_db(path_to_msf): connection = Connection(FileStorage(path_to_msf)) return connection
def __init__(self, storagefile="default.sav"): self.storage = Connection(FileStorage(storagefile)) self.root = self.storage.get_root() self.running = False
def __init__(self, folder, dbFileName): self.dbFileName = folder + '/' + dbFileName + '.durus' ##print 'Durus file name: ', self.dbFileName self.con = Connection(FileStorage(self.dbFileName))
def get_storage(file, repair, readonly): if file: return FileStorage(file, repair=repair, readonly=readonly) else: return TempFileStorage()
def read(self): if os.path.isfile(self.filename): self.db = Connection(FileStorage(self.filename))
def showExperimentTimeStamps(): with closing(FileStorage("soda_results.durus")) as durus: conn = Connection(durus) return conn.get_root().keys()
import sys import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', stream=file('log.txt', 'w')) def construct_mud(objstorethunk): """Construct a MUD factory.""" return ConnectionFactory(objstorethunk) def run_mud(mud, port): """Run the MUD factory.""" reactor.listenTCP(port, mud) mud.ticker.add_command(commit_gameworld) mud_ticker.add_command(event_flusher) mud.ticker.start() logging.info("OK, setup done, handing you over to the reactor's loop!") sys.stdout.write("Server is up and running.") reactor.run() if __name__ == '__main__': #this needs to be wrapped in a lambda, because Durus is quite eager to load #stuff. If it wasn't so eager, the ConnectionFactory would just pull what #it needed when, rather than getting silly errors on the next line. connection = lambda: Connection(FileStorage("mudlib.durus")) run_mud(construct_mud(connection), 6666)