def test_count_prefix(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema3() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.authid] = user n = len(testset1) tests = [ (None, n), (u'', n), (u'test-', n), (u'test-1', 111), (u'test-11', 11), (u'test-111', 1), ] with zlmdb.Database(dbpath) as db: with db.begin() as txn: for prefix, num in tests: cnt = schema.users.count(txn, prefix) assert cnt == num
def test_truncate_table_with_index(testset1): """ Fill a table with records that has indexes, truncate the table and check that all index records have been deleted as well. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.oid] = user stats = zlmdb.TransactionStats() with zlmdb.Database(dbpath) as db: with db.begin(write=True, stats=stats) as txn: records = schema.users.truncate(txn) assert records == len(testset1) * ( len(schema.users.indexes()) + 1) assert stats.dels == records assert stats.puts == 0
def test_save_load_many_2(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema2() oids = [] with zlmdb.Database(dbpath) as db: # write records in a 1st transaction with db.begin(write=True) as txn: c = 0 for user in testset1: schema.users[txn, user.oid] = user oids.append(user.oid) c += 1 assert c == len(testset1) print('[1] successfully stored {} records'.format(c)) # in the same transaction, read back records c = 0 for oid in oids: user = schema.users[txn, oid] if user: c += 1 assert c == len(testset1) print('[1] successfully loaded {} records'.format(c)) # in a new transaction, read back records c = 0 with db.begin() as txn: for oid in oids: user = schema.users[txn, oid] if user: c += 1 assert c == len(testset1) print('[2] successfully loaded {} records'.format(c)) # in a new database environment (and hence new transaction), read back records with zlmdb.Database(dbpath) as db: with db.begin() as txn: count = schema.users.count(txn) assert count == len(testset1) print('total records:', count) c = 0 for oid in oids: user = schema.users[txn, oid] if user: c += 1 assert c == len(testset1) print('[3] successfully loaded {} records'.format(c))
def test_rebuild_all_indexes(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema4() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.oid] = user with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: records = schema.users.rebuild_indexes(txn) print('\nrebuilt all indexes on "users": {} records affected'.format(records))
def main(reactor): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = MySchema() db = zlmdb.Database(dbpath) # WRITE some native pandas data frames to zlmdb with db.begin(write=True) as txn: for i in range(10): if i % 2: key = 'key{}'.format(i) value = pd.DataFrame(np.random.randn(8, 4), columns=['A', 'B', 'C', 'D']) schema.samples[txn, key] = value # READ back native pandas data frames from zlmdb with db.begin() as txn: for i in range(10): key = 'key{}'.format(i) value = schema.samples[txn, key] print('key={} : value=\n{}'.format(key, value)) yield util.sleep(1)
def main2(reactor): dbpath = '/tmp/zlmdb1' print('Using database directory {}'.format(dbpath)) schema = UsersSchema() with zlmdb.Database(dbpath) as db: N = 1000 with db.begin() as txn: cnt_begin = schema.tab_oid_fbs.count(txn) stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for i in range(N): user = UserFbs.create_test_user() schema.tab_oid_fbs[txn, user.oid] = user assert stats.puts == N assert stats.dels == 0 stats.reset() with db.begin() as txn: cnt_end = schema.tab_oid_fbs.count(txn) cnt = cnt_end - cnt_begin assert cnt == N print('{} records written, {} records total'.format(cnt, cnt_end)) yield util.sleep(1)
def test_fill_non_unique_indexes(testset1): """ Insert records into a table with a non-unique, non-nullable indexed column. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for user in testset1: schema.users[txn, user.oid] = user # check non-unique indexes with db.begin() as txn: for j in range(10): user_oids = list( schema.idx_users_by_realm.select(txn, return_keys=False, from_key=(j, 0), to_key=(j + 1, 0))) assert list(range(j * 100, (j + 1) * 100)) == user_oids
def __init__(self, dbpath: str, config: Dict[str, Any]): """ Initialize a database-backed cookiestore. Example configuration: .. code-block:: json { "type": "database", "path": ".cookies", "purge_on_startup": false, "maxsize": 1048576, "readonly": false, "sync": true } :param dbpath: Filesystem path to database. :param config: Database cookie store configuration. """ self.log.info( '{func}: initializing database-backed cookiestore with config=\n{config}', func=hltype(CookieStoreDatabaseBacked.__init__), config=pformat(config)) CookieStore.__init__(self, config) maxsize = config['store'].get('maxsize', 1024 * 2**20) assert type(maxsize) == int, "maxsize must be an int, was {}".format( type(maxsize)) # allow maxsize 128kiB to 128GiB assert maxsize >= 128 * 1024 and maxsize <= 128 * 2**30, "maxsize must be >=128kiB and <=128GiB, was {}".format( maxsize) readonly = config['store'].get('readonly', False) assert type( readonly) == bool, "readonly must be a bool, was {}".format( type(readonly)) sync = config['store'].get('sync', True) assert type(sync) == bool, "sync must be a bool, was {}".format( type(sync)) if config['store'].get('purge_on_startup', False): zlmdb.Database.scratch(dbpath) self.log.warn( '{func}: scratched embedded database (purge_on_startup is enabled)!', func=hltype(CookieStoreDatabaseBacked.__init__)) self._db = zlmdb.Database(dbpath=dbpath, maxsize=maxsize, readonly=readonly, sync=sync) # self._db.__enter__() self._schema = cookiestore.CookieStoreSchema.attach(self._db) dbstats = self._db.stats(include_slots=True) self.log.info( '{func}: database-backed cookiestore opened from dbpath="{dbpath}" - dbstats=\n{dbstats}', func=hltype(CookieStoreDatabaseBacked.__init__), dbpath=hlval(dbpath), dbstats=pformat(dbstats))
def test_delete_nonunique_indexes(testset1): """ Insert records into a table with a non-unique index, delete data records and check that index records have been deleted as a consequence too. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for user in testset1: schema.users[txn, user.oid] = user with db.begin(write=True) as txn: for user in testset1: del schema.users[txn, user.oid] with db.begin() as txn: for j in range(10): user_oids = list( schema.idx_users_by_realm.select(txn, return_keys=False, from_key=(j, 0), to_key=(j + 1, 0))) assert [] == user_oids
def test_delete_nonindexes2(testset1): """ WARNING: quadratic run-time (in testset size) """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for user in testset1: schema.users[txn, user.oid] = user with db.begin(write=True) as txn: for j in range(10): fullset = set(range(j * 100, (j + 1) * 100)) for i in range(100): user_oid = j * 100 + i del schema.users[txn, user_oid] fullset.discard(user_oid) user_oids = set( schema.idx_users_by_realm.select(txn, return_keys=False, from_key=(j, 0), to_key=(j + 1, 0))) assert fullset == user_oids
def __init__(self, config): ApplicationSession.__init__(self, config) # ZLMDB database configuration # self._dbpath = os.path.abspath( config.extra.get('dbpath', './.xbrnetwork')) self._db = zlmdb.Database(dbpath=self._dbpath, maxsize=2**30, readonly=False, sync=True) self._db.__enter__() self._schema = Schema.attach(self._db) self._pubkey_by_session = {} self._member_by_session = {} self._sessions_by_member = {} with self._db.begin() as txn: cnt_user_keys = self._schema.user_keys.count(txn) self.log.info( 'Database opened from {dbpath} (cnt_user_keys={cnt_user_keys})', dbpath=self._dbpath, cnt_user_keys=cnt_user_keys)
def main(reactor): schema = zlmdb._database.Schema.parse(DBSCHEMA, KV_TYPE_TO_CLASS) print('Using database directory {} and schema {}:\n{}'.format( DBPATH, DBSCHEMA, schema)) with zlmdb.Database(DBPATH, schema) as db: with db.begin(write=True) as txn: users = schema['users'] users2 = schema['users2'] print('users', users) print('users2', users2) key = 'user1' for table in [users, users2]: user = table[txn, key] if user: print('user object already exists in {} for key {}: {}'. format(table, key, user)) else: print('user does not exist in {}, storing new object ..'. format(table)) user = User.create_test_user() table[txn, key] = user print('user object created for key {}: {}'.format( key, user)) yield util.sleep(1)
def test_truncate_table(): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema1() stats = zlmdb.TransactionStats() tabs = [ schema.tab_oid_json, schema.tab_str_json, schema.tab_uuid_json, schema.tab_oid_cbor, schema.tab_str_cbor, schema.tab_uuid_cbor, schema.tab_oid_pickle, schema.tab_str_pickle, schema.tab_uuid_pickle, ] with zlmdb.Database(dbpath) as db: with db.begin(write=True, stats=stats) as txn: for tab in tabs: tab.truncate(txn) print(stats.puts) print(stats.dels)
def test_fill_check(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema3() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.authid] = user with zlmdb.Database(dbpath) as db: with db.begin() as txn: for user in testset1: _user = schema.users[txn, user.authid] assert _user assert _user == user
def test_pmap_flatbuffers_count(): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = UsersSchema() # max DB size is 100 MB with zlmdb.Database(dbpath, maxsize=100 * (2**20)) as db: oids = set() oid_to_referred_by = {} stats = zlmdb.TransactionStats() # number of transactions M = 5 # number of insert rows per transaction N = 10000 for j in range(M): with db.begin(write=True, stats=stats) as txn: for i in range(N): user = User.create_test_user() schema.tab_oid_fbs[txn, user.oid] = user oids.add(user.oid) oid_to_referred_by[user.oid] = user.referred_by assert stats.puts == N assert stats.dels == 0 duration_ns = stats.duration duration_ms = int(duration_ns / 1000000.) rows_per_sec = int(round(float(stats.puts + stats.dels) * 1000. / float(duration_ms))) print('Transaction ended: puts={} / dels={} rows in {} ms, {} rows/sec'.format( stats.puts, stats.dels, duration_ms, rows_per_sec)) stats.reset() # count all rows with db.begin() as txn: cnt = schema.tab_oid_fbs.count(txn) assert cnt == N * M # retrieve with db.begin() as txn: for j in range(5): started = zlmdb.walltime() M = 100 for i in range(M): for oid in random.sample(oids, N): user = schema.tab_oid_fbs[txn, oid] assert user assert user.referred_by == oid_to_referred_by.get(oid, None) duration_ns = zlmdb.walltime() - started duration_ms = int(duration_ns / 1000000.) rows_per_sec = int(round(float(M * N) * 1000. / float(duration_ms))) print('Transaction ended: {} rows read in {} ms, {} rows/sec'.format( M * N, duration_ms, rows_per_sec))
def test_transaction(): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) with zlmdb.Database(dbpath) as db: with db.begin() as txn: print('transaction open', txn.id()) print('transaction committed') print('database closed')
def test_fill_indexes_nullable(testset1): """ Test filling a table with multiple indexes, some of which are on NULLable columns, and fill with records that have those column values actually NULL. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for user in testset1: _user = deepcopy(user) # "user.email" is an indexed column that is nullable _user.email = None # "user.mrealm" is an indexed (composite) column that is nullable _user.mrealm = None schema.users[txn, _user.oid] = _user # check indexes has been written to (in addition to the table itself) num_indexes = len(schema.users.indexes()) # because we have set 2 indexed columns to NULL, we need to subtract those 2 # from the total number of indexes assert stats.puts == len(testset1) * (1 + num_indexes - 2) # check saved objects with db.begin() as txn: for user in testset1: _user = deepcopy(user) _user.email = None _user.mrealm = None obj = schema.users[txn, _user.oid] assert _user == obj # check unique indexes with db.begin() as txn: for user in testset1: # check one of the indexes that was indeed filled user_oid = schema.idx_users_by_authid[txn, user.authid] assert user.oid == user_oid # check indexes that have NOT been filled user_oid = schema.idx_users_by_email[txn, user.email] assert user_oid is None user_oid = schema.idx_users_by_mrealm_authid[txn, (user.mrealm, user.authid)] assert user_oid is None
def __init__(self, personality, factory, config): """ :param personality: :param factory: :param config: Realm store configuration item. """ from twisted.internet import reactor self._reactor = reactor self._personality = personality self._factory = factory dbpath = config.get('path', None) assert type(dbpath) == str maxsize = config.get('maxsize', 128 * 2**20) assert type(maxsize) == int # allow maxsize 128kiB to 128GiB assert maxsize >= 128 * 1024 and maxsize <= 128 * 2**30 readonly = config.get('readonly', False) assert type(readonly) == bool sync = config.get('sync', True) assert type(sync) == bool self._config = config self._type = self._config.get('type', None) assert self._type == self.STORE_TYPE self._db = zlmdb.Database(dbpath=dbpath, maxsize=maxsize, readonly=readonly, sync=sync) self._db.__enter__() self._schema = RealmStore.attach(self._db) self._running = False self._process_buffers_thread = None self._max_buffer = config.get('max-buffer', 10000) self._buffer_flush = config.get('buffer-flush', 200) self._buffer = [] self._log_counter = 0 # map: registration.id -> deque( (session, call, registration, authorization) ) self._queued_calls = {} self.log.info( '{func} realm store initialized (type="{stype}", dbpath="{dbpath}", maxsize={maxsize}, ' 'readonly={readonly}, sync={sync})', func=hltype(self.__init__), stype=hlval(self._type), dbpath=dbpath, maxsize=maxsize, readonly=readonly, sync=sync)
def test_truncate_table_with_index(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema4() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.oid] = user stats = zlmdb.TransactionStats() with zlmdb.Database(dbpath) as db: with db.begin(write=True, stats=stats) as txn: records = schema.users.truncate(txn) print('table truncated:', records) print(stats.puts) print(stats.dels)
def test_select(testset1): testset1_keys = set([user.authid for user in testset1]) with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema3() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.authid] = user with zlmdb.Database(dbpath) as db: with db.begin() as txn: i = 0 for authid, user in schema.users.select(txn): i += 1 assert user assert authid == user.authid assert authid in testset1_keys
def test_count_all(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema3() with zlmdb.Database(dbpath) as db: # count on empty table with db.begin() as txn: cnt = schema.users.count(txn) assert cnt == 0 # fill (and count on each insert) with db.begin(write=True) as txn: i = 0 for user in testset1: schema.users[txn, user.authid] = user i += 1 # table count within filling transaction cnt = schema.users.count(txn) assert cnt == i # table count within transaction cnt = schema.users.count(txn) assert cnt == len(testset1) # table count in new transaction with db.begin() as txn: cnt = schema.users.count(txn) assert cnt == len(testset1) # table count in new connection with zlmdb.Database(dbpath) as db: with db.begin() as txn: cnt = schema.users.count(txn) assert cnt == len(testset1)
def __init__(self, config=None, reactor=None, personality=None): # WorkerController derives of NativeProcess, which will set self._reactor WorkerController.__init__(self, config=config, reactor=reactor, personality=personality) worker_options_extra = dict(config.extra.extra) self._database_config = worker_options_extra['database'] self._blockchain_config = worker_options_extra['blockchain'] self._ipfs_files_directory = worker_options_extra.get('ipfs_files_directory', './.ipfs_files') # xbrmm worker status self._status = None # map of market makers by ID self._makers = {} self._maker_adr2id = {} # open xbrmm worker database, containing a replicate of xbr on-chain data (other than # channels, which are market specific and stored in the market maker database of the maker of that market) self._dbpath = os.path.abspath( self._database_config.get('dbpath', './.xbrmm-{}-db'.format(config.extra.worker))) self._db = zlmdb.Database(dbpath=self._dbpath, maxsize=self._database_config.get('maxsize', 2**30), readonly=False, sync=True) self._db.__enter__() # generic database object metadata self._meta = cfxdb.meta.Schema.attach(self._db) # xbr database schema self._xbr = cfxdb.xbr.Schema.attach(self._db) # xbr market maker schema self._xbrmm = cfxdb.xbrmm.Schema.attach(self._db) # event object too coordinate exit of blockchain monitor background check self._run_monitor = None # blockchain gateway configuration self._bc_gw_config = self._blockchain_config['gateway'] self.log.info('Initializing Web3 from blockchain gateway configuration\n\n{gateway}\n', gateway=pformat(self._bc_gw_config)) self._w3 = make_w3(self._bc_gw_config) xbr.setProvider(self._w3) self._chain_id = self._blockchain_config.get('chain_id', 1) self.log.info('Using chain ID {chain_id}', chain_id=hlid(self._chain_id)) # To be initiated once cbdir variable gets available self._ipfs_files_dir = os.path.join(config.extra.cbdir, self._ipfs_files_directory)
def test_save_load_many_1(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema2() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: for user in testset1: schema.users[txn, user.oid] = user cnt = schema.users.count(txn) print('user saved:', cnt) assert cnt == len(testset1) with db.begin() as txn: cnt = schema.users.count(txn) assert cnt == len(testset1) with zlmdb.Database(dbpath) as db: with db.begin() as txn: cnt = schema.users.count(txn) assert cnt == len(testset1)
def test_delete_indexes(testset1): """ Insert records into a table with indexes, delete data records and check that index records have been deleted as a consequence too. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() # insert data records with db.begin(write=True, stats=stats) as txn: for user in testset1: schema.users[txn, user.oid] = user # check that all index records have been deleted as well with db.begin(write=True) as txn: for user in testset1: del schema.users[txn, user.oid] user_oid = schema.idx_users_by_authid[txn, user.authid] assert user_oid is None user_oid = schema.idx_users_by_email[txn, user.email] assert user_oid is None user_oid = schema.idx_users_by_realm[txn, (user.realm_oid, user.oid)] assert user_oid is None user_oid = schema.idx_users_by_icecream[txn, (user.icecream, user.oid)] assert user_oid is None user_oid = schema.idx_users_by_mrealm_authid[txn, (user.mrealm, user.authid)] assert user_oid is None user_oid = schema.idx_users_by_mrealm_notnull_authid[txn, ( user.mrealm_notnull, user.authid)] assert user_oid is None
def test_fill_with_indexes(testset1): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() with db.begin(write=True, stats=stats) as txn: for user in testset1: schema.users[txn, user.oid] = user # check indexes has been written to (in addition to the table itself) num_indexes = len(schema.users.indexes()) assert stats.puts == len(testset1) * (1 + num_indexes)
def test_set_notnull_indexes_nullable(testset1): """ Fill table with indexed column (unique-nullable) with indexed column values NON-NULL, then (in a 2nd transaction) set the indexed column to NULL value and check that index records are created. """ with TemporaryDirectory() as dbpath: schema = Schema4() with zlmdb.Database(dbpath) as db: stats = zlmdb.TransactionStats() # fill table with NULLs in indexed column with db.begin(write=True) as txn: for user in testset1: _user = deepcopy(user) _user.email = None schema.users[txn, _user.oid] = _user # now update table with NON-NULLs in indexed column with db.begin(write=True, stats=stats) as txn: for user in testset1: _user = schema.users[txn, user.oid] _user.email = user.email schema.users[txn, _user.oid] = _user # check that the table records have their indexed # column values updated to NON-NULLs with db.begin() as txn: for user in testset1: obj = schema.users[txn, user.oid] assert user == obj # check that the index records that previously not existed # have been created (as the indexed column values have been # set to NON-NULLs) with db.begin() as txn: for user in testset1: user_oid = schema.idx_users_by_authid[txn, user.authid] assert user.oid == user_oid user_oid = schema.idx_users_by_email[txn, user.email] assert user.oid == user_oid
def test_pmap_value_types(): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema1() n = 100 stats = zlmdb.TransactionStats() tabs = [ (schema.tab_oid_json, schema.tab_str_json, schema.tab_uuid_json), (schema.tab_oid_cbor, schema.tab_str_cbor, schema.tab_uuid_cbor), (schema.tab_oid_pickle, schema.tab_str_pickle, schema.tab_uuid_pickle), ] with zlmdb.Database(dbpath) as db: for tab_oid, tab_str, tab_uuid in tabs: with db.begin(write=True, stats=stats) as txn: for i in range(n): user = User.create_test_user(i) tab_oid[txn, user.oid] = user tab_str[txn, user.authid] = user tab_uuid[txn, user.uuid] = user print('transaction committed') assert stats.puts == n * 3 assert stats.dels == 0 stats.reset() with db.begin() as txn: cnt = tab_oid.count(txn) assert cnt == n cnt = tab_str.count(txn) assert cnt == n cnt = tab_uuid.count(txn) assert cnt == n print('database closed')
def __init__(self, dbpath): """ :param dbpath: Database file to open. """ self._dbpath = os.path.abspath(dbpath) self._db = zlmdb.Database(dbpath=self._dbpath, maxsize=2**30, readonly=False) self._db.__enter__() self._meta = cfxdb.meta.Schema.attach(self._db) self._globalschema = cfxdb.globalschema.GlobalSchema.attach(self._db) self._mrealmschema = cfxdb.mrealmschema.MrealmSchema.attach(self._db) self._xbr = cfxdb.xbr.Schema.attach(self._db) self._xbrmm = cfxdb.xbrmm.Schema.attach(self._db) self._xbrnetwork = cfxdb.xbrnetwork.Schema.attach(self._db) self._schemata = { 'meta': self._meta, 'globalschema': self._globalschema, 'mrealmschema': self._mrealmschema, 'xbr': self._xbr, 'xbrmm': self._xbrmm, 'xbrnetwork': self._xbrnetwork, } self._schema_tables = {} for schema_name, schema in self._schemata.items(): tables = {} first = None for k, v in schema.__annotations__.items(): for line in v.__doc__.splitlines(): line = line.strip() if line != "": first = line[:80] break tables[k] = first self._schema_tables[schema_name] = tables
def test_save_load(): with TemporaryDirectory() as dbpath: print('Using temporary directory {} for database'.format(dbpath)) schema = Schema2() user = User.create_test_user() with zlmdb.Database(dbpath) as db: with db.begin(write=True) as txn: schema.users[txn, user.oid] = user print('user saved') _user = schema.users[txn, user.oid] assert _user assert user == _user print('user loaded') print('transaction committed') print('database closed')
def __init__(self, personality, factory, config): """ :param config: Realm store configuration item. :type config: Mapping """ dbpath = config.get('path', None) assert type(dbpath) == str maxsize = config.get('maxsize', 128 * 2**20) assert type(maxsize) == int # allow maxsize 128kiB to 128GiB assert maxsize >= 128 * 1024 and maxsize <= 128 * 2**30 readonly = config.get('readonly', False) assert type(readonly) == bool sync = config.get('sync', True) assert type(sync) == bool self.log = personality.log self._db = zlmdb.Database(dbpath=dbpath, maxsize=maxsize, readonly=readonly, sync=sync) self._db.__enter__() self._schema = cfxdb.schema.Schema.attach(self._db) from twisted.internet import reactor self.event_store = CfxDbEventStore(reactor, self._db, self._schema, config) self.call_store = CfxDbCallQueue(reactor, self._db, self._schema, config) self.log.info( 'Realm store initialized (type=zlmdb, dbpath="{dbpath}", maxsize={maxsize}, readonly={readonly}, sync={sync})', dbpath=dbpath, maxsize=maxsize, readonly=readonly, sync=sync)