async def test_beam_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_churner.db)) # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name("PausingEndpoint") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name("GathererEndpoint") client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( event_bus, FakeAsyncChainDB(chaindb_churner.db)), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: BeamPetersburgVM = pausing_vm_decorator(PetersburgVM, pausing_endpoint) class BeamPetersburgTestChain(FakeAsyncChain): vm_configuration = ((0, BeamPetersburgVM), ) network_id = 999 client_chain = BeamPetersburgTestChain(chaindb_fresh.db) client = BeamSyncer( client_chain, chaindb_fresh.db, client_chain.chaindb, client_peer_pool, gatherer_endpoint, beam_to_block, ) client_peer.logger.info("%s is serving churner blocks", client_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer(pausing_endpoint, client_chain, token=client.cancel_token) asyncio.ensure_future(import_server.run()) await pausing_endpoint.connect_to_endpoints(gatherer_config) asyncio.ensure_future(client.run()) # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one) # Trying to keep the tests short-ish. A fuller test could always set the target header # to the chaindb_churner canonical head, and increase the timeout significantly target_block_number = min(beam_to_block + 10, 129) target_head = chaindb_churner.get_canonical_block_header_by_number( target_block_number) await wait_for_head(chaindb_fresh, target_head, sync_timeout=4) assert target_head.state_root in chaindb_fresh.db # first stop the import server, so it doesn't hang waiting for state data await import_server.cancel() await client.cancel()
async def test_beam_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block, checkpoint=None): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_churner.db) peer_pair = ETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): # Need a name that will be unique per xdist-process, otherwise # lahja IPC endpoints in each process will clobber each other unique_process_name = uuid.uuid4() # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name( f"PausingEndpoint-{unique_process_name}") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name( f"GathererEndpoint-{unique_process_name}") client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( event_bus, AsyncChainDB(chaindb_churner.db) ), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: client_chain = make_pausing_beam_chain( ((0, PetersburgVM), ), chain_id=999, db=chaindb_fresh.db, event_bus=pausing_endpoint, loop=event_loop, ) client = BeamSyncer( client_chain, chaindb_fresh.db, AsyncChainDB(chaindb_fresh.db), client_peer_pool, gatherer_endpoint, force_beam_block_number=beam_to_block, checkpoint=checkpoint, ) client_peer.logger.info("%s is serving churner blocks", client_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer( pausing_endpoint, client_chain, token=client.cancel_token, ) asyncio.ensure_future(import_server.run()) await pausing_endpoint.connect_to_endpoints(gatherer_config) asyncio.ensure_future(client.run()) # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one) # Trying to keep the tests short-ish. A fuller test could always set the target header # to the chaindb_churner canonical head, and increase the timeout significantly target_block_number = min(beam_to_block + 10, 129) target_head = chaindb_churner.get_canonical_block_header_by_number( target_block_number) await wait_for_head(chaindb_fresh, target_head, sync_timeout=10) assert target_head.state_root in chaindb_fresh.db # first stop the import server, so it doesn't hang waiting for state data await import_server.cancel() await client.cancel()
async def _beam_syncing( request, event_loop, event_bus, chaindb_fresh, chaindb_churner, beam_to_block, checkpoint=None, VM_at_0=PetersburgVM, enable_state_backfill=False, ): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_churner.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) backfiller = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer), backfiller as (client2_peer, backfill_peer): # Need a name that will be unique per xdist-process, otherwise # lahja IPC endpoints in each process will clobber each other unique_process_name = uuid.uuid4() # manually add endpoint for beam vm to make requests pausing_config = ConnectionConfig.from_name( f"PausingEndpoint-{unique_process_name}") # manually add endpoint for trie data gatherer to serve requests gatherer_config = ConnectionConfig.from_name( f"GathererEndpoint-{unique_process_name}") client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer, backfill_peer], event_bus=event_bus, ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) backfill_peer_pool = MockPeerPoolWithConnectedPeers( [client2_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), run_peer_pool_event_server( event_bus, backfill_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_churner.db)) ), AsyncioEndpoint.serve( pausing_config) as pausing_endpoint, AsyncioEndpoint.serve( gatherer_config) as gatherer_endpoint: client_chain = make_pausing_beam_chain( ((0, VM_at_0), ), chain_id=999, consensus_context_class=ConsensusContext, db=chaindb_fresh.db, event_bus=pausing_endpoint, metrics_registry=NoopMetricsRegistry(), loop=event_loop, ) client = BeamSyncer( client_chain, chaindb_fresh.db, AsyncChainDB(chaindb_fresh.db), client_peer_pool, gatherer_endpoint, NoopMetricsRegistry(), force_beam_block_number=beam_to_block, checkpoint=checkpoint, enable_state_backfill=enable_state_backfill, enable_backfill=False, ) client_peer.logger.info("%s is serving churner blocks", client_peer) backfill_peer.logger.info("%s is serving backfill state", backfill_peer) server_peer.logger.info("%s is syncing up churner blocks", server_peer) import_server = BlockImportServer( pausing_endpoint, client_chain, ) async with background_asyncio_service(import_server): await pausing_endpoint.connect_to_endpoints(gatherer_config) async with background_asyncio_service(client): yield client