def run_server(cls) -> None: """Run the GMB server Spins up the ZEO database server and a background scheduler to update state at the configured interval. Throws an exception if the server is already running. """ if os.path.exists(CONFIG["pid_file"]): with open(CONFIG["pid_file"], "r") as fi: pid = int(fi.read().strip()) if psutil.pid_exists(pid): raise Exception("Server already running!") with open(CONFIG["pid_file"], "w") as pidfile: pidfile.write(str(os.getpid())) logging.info("Starting server...") config = load_config() ZEO.server(path=CONFIG["db_location"], port=config["port"]) client = cls() sched = BackgroundScheduler(daemon=True) sched.add_job( client.update, "interval", seconds=config["update_interval"], next_run_time=datetime.datetime.now(), ) sched.start() logging.info("server running") try: while True: time.sleep(2) except (KeyboardInterrupt, SystemExit): sched.shutdown()
def _decide_database_address(): """When "Testing" is an environment variable, an in-memory server is set up that will be gone after closing the process. Returns the address of that server, or the address as specified in the `db_config.zeoconf` file.""" if "TESTING" in os.environ: address, stop = ZEO.server( ) # <- in-memory server that's gone after closing of process. return address else: return ('127.0.0.1', 8091 ) # <- As specified in the `db_config.zeoconf` file.
def test_client_side(self): # First, traditional: path = tempfile.mkdtemp(prefix='zeo-test-') self.addCleanup(shutil.rmtree, path) addr, stop = ZEO.server(os.path.join(path, 'data.fs'), threaded=False) db = ZEO.DB(addr) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close() stop() # Now, do conflict resolution on the client. addr2, stop = ZEO.server( storage_conf='<mappingstorage>\n</mappingstorage>\n', zeo_conf=dict(client_conflict_resolution=True), threaded=False, ) db = ZEO.DB(addr2) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) self.assertEqual(conn2.root.l.value, 1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close() stop()
def test_client_side(self): # First, traditional: path = tempfile.mkdtemp(prefix='zeo-test-') self.addCleanup(shutil.rmtree, path) addr, stop = ZEO.server(os.path.join(path, 'data.fs'), threaded=False) db = ZEO.DB(addr) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close(); stop() # Now, do conflict resolution on the client. addr2, stop = ZEO.server( storage_conf='<mappingstorage>\n</mappingstorage>\n', zeo_conf=dict(client_conflict_resolution=True), threaded=False, ) db = ZEO.DB(addr2) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) self.assertEqual(conn2.root.l.value, 1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close(); stop()
def test_client_side(self): # First, traditional: addr, stop = ZEO.server('data.fs', threaded=False) db = ZEO.DB(addr) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close() stop() # Now, do conflict resolution on the client. addr2, stop = ZEO.server( storage_conf='<mappingstorage>\n</mappingstorage>\n', zeo_conf=dict(client_conflict_resolution=True), threaded=False, ) db = ZEO.DB(addr2) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) self.assertEqual(conn2.root.l.value, 1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close() stop()
def test_client_side(self): # First, traditional: addr, stop = ZEO.server('data.fs', threaded=False) db = ZEO.DB(addr) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close(); stop() # Now, do conflict resolution on the client. addr2, stop = ZEO.server( storage_conf='<mappingstorage>\n</mappingstorage>\n', zeo_conf=dict(client_conflict_resolution=True), threaded=False, ) db = ZEO.DB(addr2) with db.transaction() as conn: conn.root.l = Length(0) conn2 = db.open() conn2.root.l.change(1) with db.transaction() as conn: conn.root.l.change(1) self.assertEqual(conn2.root.l.value, 1) conn2.transaction_manager.commit() self.assertEqual(conn2.root.l.value, 2) db.close(); stop()
def run(self) -> None: ZEO.server(storage_conf=self._storage_conf, zeo_conf=zeo_conf) asyncore.loop()
import ZODB.config import ZEO import transaction print('jj') ZEO.server(path='localhost', port=8018) db = ZODB.config.databaseFromURL('zodb.config') connection = db.open() root = connection.root() print(root) root['ab'] = 12 transaction.commit() #client=ZEO.client(addr) #connection = ZEO.connection(address) #db = ZEO.DB(address) #connection = db.open()
def shoot(): """zodbshootout-inspired performance exercise. """ options = parser.parse_args() concurrency = options.concurrency object_size = options.object_size transaction_size = options.transaction_size repetitions = options.repetitions if options.ssl: from ZEO.tests.testssl import server_config, client_ssl else: server_config = None client_ssl = lambda : None if options.demo_storage_baseline: db_factory = None headings = ('add', 'update', 'read') stop = lambda : None else: if options.address: addr = options.address if ':' in addr: host, port = addr.split(':') addr = host, int(port) else: addr = '127.0.0.1', int(addr) stop = lambda : None else: if os.path.exists('perf.fs'): os.remove('perf.fs') try: addr, stop = ZEO.server( threaded=False, path='perf.fs', zeo_conf=server_config) except TypeError: # ZEO 4 addr, stop = ZEO.server() db_factory = lambda : ZEO.DB( addr, ssl=client_ssl(), wait_timeout=9999, server_sync=options.server_sync) if options.read_only: headings = ('read', 'prefetch') else: headings = ('add', 'update', 'cached', 'read', 'prefetch') # Initialize database db = db_factory() with db.transaction() as conn: conn.root.speedtest = speedtest = BTree() for ic in range(concurrency): speedtest[ic] = data = BTree() for ir in range(repetitions): data[ir] = P() db.pack() db.close() print('Times per operation in microseconds (o=%s, t=%s, c=%s)' % ( object_size, transaction_size, concurrency)) print(' %12s' * 5 % ('op', 'min', 'mean', 'median', 'max')) queues = [(multiprocessing.Queue(), multiprocessing.Queue()) for ip in range(concurrency)] if options.save: save_file = open(options.save, 'a') else: save_file = None if concurrency > 1 or save_file: processes = [ multiprocessing.Process( target=run_test, args=(db_factory, ip, queues[ip][0], queues[ip][1], object_size, transaction_size, repetitions, options.read_only), ) for ip in range(concurrency) ] for p in processes: p.daemon = True p.start() for iqueue, oqueue in queues: oqueue.get(timeout=9) # ready? for name in headings: for iqueue, oqueue in queues: iqueue.put(None) data = [oqueue.get(timeout=999) / repetitions for _, oqueue in queues] summarize(name, data) if save_file: save_file.write('\t'.join( map(str, (options.name, object_size, transaction_size, concurrency * options.client_hosts, repetitions, options.server_sync, options.ssl, options.demo_storage_baseline, options.address, name, sum(data)/len(data)) ) ) + '\n') for p in processes: p.join(1) else: [(iqueue, oqueue)] = queues for name in headings: iqueue.put(None) if options.profile: import cProfile profiler = cProfile.Profile() profiler.enable() else: profiler = None run_test(db_factory, 0, iqueue, oqueue, object_size, transaction_size, repetitions, options.read_only) oqueue.get(timeout=9) # ready? if profiler is not None: profiler.disable() profiler.dump_stats(options.profile) for name in headings: summarize(name, [oqueue.get(timeout=999) / repetitions]) stop()
import ZEO adr, stop = ZEO.server(path='127.0.0.1', port=8018) print(adr) connection = ZEO.connection(adr) client_storage = ZEO.client(adr) stop()
import ZEO address, stop = ZEO.server(port="8090")
def shoot(): """zodbshootout-inspired performance exercise. """ options = parser.parse_args() concurrency = options.concurrency object_size = options.object_size transaction_size = options.transaction_size repetitions = options.repetitions if options.ssl: from ZEO.tests.testssl import server_config, client_ssl else: server_config = None client_ssl = lambda: None if options.demo_storage_baseline: db_factory = None headings = ('add', 'update', 'read') stop = lambda: None else: if options.address: addr = options.address if ':' in addr: host, port = addr.split(':') addr = host, int(port) else: addr = '127.0.0.1', int(addr) stop = lambda: None else: if os.path.exists('perf.fs'): os.remove('perf.fs') try: addr, stop = ZEO.server(threaded=False, path='perf.fs', zeo_conf=server_config) except TypeError: # ZEO 4 addr, stop = ZEO.server() db_factory = lambda: ZEO.DB(addr, ssl=client_ssl(), wait_timeout=9999, server_sync=options.server_sync) if options.read_only: headings = ('read', 'prefetch') else: headings = ('add', 'update', 'cached', 'read', 'prefetch') # Initialize database db = db_factory() with db.transaction() as conn: conn.root.speedtest = speedtest = BTree() for ic in range(concurrency): speedtest[ic] = data = BTree() for ir in range(repetitions): data[ir] = P() db.pack() db.close() print('Times per operation in microseconds (o=%s, t=%s, c=%s)' % (object_size, transaction_size, concurrency)) print(' %12s' * 5 % ('op', 'min', 'mean', 'median', 'max')) queues = [(multiprocessing.Queue(), multiprocessing.Queue()) for ip in range(concurrency)] if options.save: save_file = open(options.save, 'a') else: save_file = None if concurrency > 1 or save_file: processes = [ multiprocessing.Process( target=run_test, args=(db_factory, ip, queues[ip][0], queues[ip][1], object_size, transaction_size, repetitions, options.read_only), ) for ip in range(concurrency) ] for p in processes: p.daemon = True p.start() for iqueue, oqueue in queues: oqueue.get(timeout=9) # ready? for name in headings: for iqueue, oqueue in queues: iqueue.put(None) data = [ oqueue.get(timeout=999) / repetitions for _, oqueue in queues ] summarize(name, data) if save_file: save_file.write('\t'.join( map(str, (options.name, object_size, transaction_size, concurrency * options.client_hosts, repetitions, options.server_sync, options.ssl, options.demo_storage_baseline, options.address, name, sum(data) / len(data)))) + '\n') for p in processes: p.join(1) else: [(iqueue, oqueue)] = queues for name in headings: iqueue.put(None) if options.profile: import cProfile profiler = cProfile.Profile() profiler.enable() else: profiler = None run_test(db_factory, 0, iqueue, oqueue, object_size, transaction_size, repetitions, options.read_only) oqueue.get(timeout=9) # ready? if profiler is not None: profiler.disable() profiler.dump_stats(options.profile) for name in headings: summarize(name, [oqueue.get(timeout=999) / repetitions]) stop()