async def do_run(self, event_bus: EndpointAPI) -> None: trinity_config = self._boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) if self._boot_info.args.enable_metrics: metrics_service = metrics_service_from_args( self._boot_info.args, AsyncioMetricsService) else: # Use a NoopMetricsService so that no code branches need to be taken if metrics # are disabled metrics_service = NOOP_METRICS_SERVICE with base_db: beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, chain_config.consensus_context_class, base_db, event_bus, metrics_service.registry, loop=asyncio.get_event_loop(), ) import_server = BlockImportServer(event_bus, beam_chain) async with background_asyncio_service(metrics_service): async with background_asyncio_service( import_server) as manager: await manager.wait_finished()
def do_start(self) -> None: trinity_config = self.boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() self._beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, DBClient.connect(trinity_config.database_ipc_path), self.event_bus, self._loop, ) import_server = BlockImportServer(self.event_bus, self._beam_chain) asyncio.ensure_future(exit_with_services(import_server, self._event_bus_service)) asyncio.ensure_future(import_server.run())
def do_start(self) -> None: trinity_config = self.boot_info.trinity_config chain_config = trinity_config.get_chain_config() db_manager = create_db_consumer_manager(trinity_config.database_ipc_path) self._beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, db_manager.get_db(), # type: ignore self.event_bus, ) asyncio.ensure_future(clean_up_endpoint(self.event_bus)) import_server = BlockImportServer(self.event_bus, self._beam_chain) asyncio.ensure_future(import_server.run())
def do_start(self) -> None: trinity_config = self.boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() db_manager = create_db_consumer_manager(trinity_config.database_ipc_path) self._beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, db_manager.get_db(), # type: ignore self.event_bus, self._loop, ) import_server = BlockImportServer(self.event_bus, self._beam_chain) asyncio.ensure_future(exit_with_services(import_server, self._event_bus_service)) asyncio.ensure_future(import_server.run())
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) with base_db: beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, base_db, event_bus, loop=asyncio.get_event_loop(), ) import_server = BlockImportServer(event_bus, beam_chain) async with run_service(import_server): await import_server.cancellation()
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config app_config = trinity_config.get_app_config(Eth1AppConfig) chain_config = app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) with base_db: beam_chain = make_pausing_beam_chain( chain_config.vm_configuration, chain_config.chain_id, chain_config.consensus_context_class, base_db, event_bus, loop=asyncio.get_event_loop(), ) import_server = BlockImportServer(event_bus, beam_chain) async with background_asyncio_service(import_server) as manager: await manager.wait_finished()
async def test_beam_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block, checkpoint=None): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_churner.db) peer_pair = ETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): # Need a name that will be unique per xdist-process, otherwise # lahja IPC endpoints in each process will clobber each other unique_process_name = uuid.uuid4() # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name( f"PausingEndpoint-{unique_process_name}") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name( f"GathererEndpoint-{unique_process_name}") client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( event_bus, AsyncChainDB(chaindb_churner.db) ), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: client_chain = make_pausing_beam_chain( ((0, PetersburgVM), ), chain_id=999, db=chaindb_fresh.db, event_bus=pausing_endpoint, loop=event_loop, ) client = BeamSyncer( client_chain, chaindb_fresh.db, AsyncChainDB(chaindb_fresh.db), client_peer_pool, gatherer_endpoint, force_beam_block_number=beam_to_block, checkpoint=checkpoint, ) client_peer.logger.info("%s is serving churner blocks", client_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer( pausing_endpoint, client_chain, token=client.cancel_token, ) asyncio.ensure_future(import_server.run()) await pausing_endpoint.connect_to_endpoints(gatherer_config) asyncio.ensure_future(client.run()) # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one) # Trying to keep the tests short-ish. A fuller test could always set the target header # to the chaindb_churner canonical head, and increase the timeout significantly target_block_number = min(beam_to_block + 10, 129) target_head = chaindb_churner.get_canonical_block_header_by_number( target_block_number) await wait_for_head(chaindb_fresh, target_head, sync_timeout=10) assert target_head.state_root in chaindb_fresh.db # first stop the import server, so it doesn't hang waiting for state data await import_server.cancel() await client.cancel()
async def test_beam_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_churner.db)) # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name("PausingEndpoint") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name("GathererEndpoint") client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( event_bus, FakeAsyncChainDB(chaindb_churner.db)), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: BeamPetersburgVM = pausing_vm_decorator(PetersburgVM, pausing_endpoint) class BeamPetersburgTestChain(FakeAsyncChain): vm_configuration = ((0, BeamPetersburgVM), ) network_id = 999 client_chain = BeamPetersburgTestChain(chaindb_fresh.db) client = BeamSyncer( client_chain, chaindb_fresh.db, client_chain.chaindb, client_peer_pool, gatherer_endpoint, beam_to_block, ) client_peer.logger.info("%s is serving churner blocks", client_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer(pausing_endpoint, client_chain, token=client.cancel_token) asyncio.ensure_future(import_server.run()) await pausing_endpoint.connect_to_endpoints(gatherer_config) asyncio.ensure_future(client.run()) # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one) # Trying to keep the tests short-ish. A fuller test could always set the target header # to the chaindb_churner canonical head, and increase the timeout significantly target_block_number = min(beam_to_block + 10, 129) target_head = chaindb_churner.get_canonical_block_header_by_number( target_block_number) await wait_for_head(chaindb_fresh, target_head, sync_timeout=4) assert target_head.state_root in chaindb_fresh.db # first stop the import server, so it doesn't hang waiting for state data await import_server.cancel() await client.cancel()
async def _beam_syncing( request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block, checkpoint=None, VM_at_0=PetersburgVM, enable_state_backfill=False, ): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_churner.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) backfiller = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer), backfiller as (client2_peer, backfill_peer): # Need a name that will be unique per xdist-process, otherwise # lahja IPC endpoints in each process will clobber each other unique_process_name = uuid.uuid4() # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name( f"PausingEndpoint-{unique_process_name}") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name( f"GathererEndpoint-{unique_process_name}") client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer, backfill_peer], event_bus=event_bus, ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) backfill_peer_pool = MockPeerPoolWithConnectedPeers( [client2_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), run_peer_pool_event_server( event_bus, backfill_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_churner.db)) ), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: client_chain = make_pausing_beam_chain( ((0, VM_at_0), ), chain_id=999, consensus_context_class=ConsensusContext, db=chaindb_fresh.db, event_bus=pausing_endpoint, metrics_registry=NoopMetricsRegistry(), loop=event_loop, ) client = BeamSyncer( client_chain, chaindb_fresh.db, AsyncChainDB(chaindb_fresh.db), client_peer_pool, gatherer_endpoint, NoopMetricsRegistry(), force_beam_block_number=beam_to_block, checkpoint=checkpoint, enable_state_backfill=enable_state_backfill, enable_backfill=False, ) client_peer.logger.info("%s is serving churner blocks", client_peer) backfill_peer.logger.info("%s is serving backfill state", backfill_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer( pausing_endpoint, client_chain, ) async with background_asyncio_service(import_server): await pausing_endpoint.connect_to_endpoints(gatherer_config) async with background_asyncio_service(client): yield client