async def create_backend(path: Path, low_size: int, high_size: int, enable_archive: bool, disable_journal: bool ) -> 'Backend': """Create backend""" db = await database.create_database(path, disable_journal) try: first_id = await db.get_first_id() last_id = await db.get_last_id() except BaseException: await aio.uncancellable(db.async_close()) raise backend = Backend() backend._path = path backend._low_size = low_size backend._high_size = high_size backend._enable_archive = enable_archive backend._disable_journal = disable_journal backend._db = db backend._first_id = first_id backend._last_id = last_id backend._async_group = aio.Group() backend._change_cbs = util.CallbackRegistry() backend._msg_queue = aio.Queue(register_queue_size) backend._executor = aio.create_executor() backend._async_group.spawn(aio.call_on_cancel, db.async_close) backend._async_group.spawn(backend._loop) mlog.debug('created backend with database %s', path) return backend
async def create_view_manager(conf: json.Data ) -> 'ViewManager': """Create view manager""" manager = ViewManager() manager._view_confs = {view_conf['name']: view_conf for view_conf in conf['views']} manager._async_group = aio.Group() manager._executor = aio.create_executor() return manager
async def create_database(path: Path, disable_journal: bool) -> 'Database': """Create database""" async def close(): await executor(_ext_close, conn) mlog.debug('database %s closed', path) executor = aio.create_executor(1) conn = await executor(_ext_connect, path, disable_journal) async_group = aio.Group() async_group.spawn(aio.call_on_cancel, close) db = Database() db._path = path db._conn = conn db._async_group = async_group db._executor = executor mlog.debug('opened database %s', path) return db
async def create(conf: json.Data) -> 'LmdbBackend': backend = LmdbBackend() backend._sync_period = conf['sync_period'] backend._executor = aio.create_executor(1) backend._conditions = Conditions(conf['conditions']) backend._env = await backend._executor(_ext_create_env, Path(conf['db_path']), conf['max_db_size'], 2 + 2 * len(conf['ordered'])) backend._sys_db = await hat.event.server.backends.lmdb.systemdb.create( backend._executor, backend._env, 'system', conf['server_id']) subscription = common.Subscription( tuple(i) for i in conf['latest']['subscriptions']) backend._latest_db = await hat.event.server.backends.lmdb.latestdb.create( backend._executor, backend._env, 'latest', subscription, backend._conditions) backend._ordered_dbs = collections.deque() for i, i_conf in enumerate(conf['ordered']): order_by = common.OrderBy[i_conf['order_by']] subscription = common.Subscription( tuple(et) for et in i_conf['subscriptions']) limit = i_conf.get('limit') name = f'ordered_{i}' ordered_dbs = await hat.event.server.backends.lmdb.ordereddb.create( backend._executor, backend._env, name, subscription, backend._conditions, order_by, limit) backend._ordered_dbs.append(ordered_dbs) backend._async_group = aio.Group() backend._async_group.spawn(backend._write_loop) return backend
async def executor(): return aio.create_executor(1)
async def _create_connection(db_path): conn = _Connection() conn._executor = aio.create_executor(1) conn._db = await conn._executor(_ext_connect, db_path) return conn
async def test_create_executor_example(): executor1 = aio.create_executor() executor2 = aio.create_executor() tid1 = await executor1(threading.get_ident) tid2 = await executor2(threading.get_ident) assert tid1 != tid2
async def test_create_executor(): executor = aio.create_executor() result = await executor(lambda: threading.current_thread().name) assert threading.current_thread().name != result