async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_20): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = HeaderChainSyncer( LatestTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db), )): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) async with background_asyncio_service(client): await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
async def sync(self, args: Namespace, logger: logging.Logger, chain: AsyncChainAPI, base_db: AtomicDatabaseAPI, peer_pool: BasePeerPool, event_bus: EndpointAPI) -> None: syncer = HeaderChainSyncer( chain, AsyncChainDB(base_db), cast(ETHPeerPool, peer_pool), args.sync_from_checkpoint, ) async with background_asyncio_service(syncer) as manager: await manager.wait_finished()
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_to_server, server_to_client): client = HeaderChainSyncer( LatestTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_to_server], event_bus=event_bus)) server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db), )): server_to_client.logger.info("%s is serving 1000 blocks", server_to_client) client_to_server.logger.info("%s is syncing up 1000", client_to_server) # Artificially split header sync into two parts, to verify that # cycling to the next sync works properly. Split by erasing the canonical # lookups in a middle chunk. We have to erase a range of them because of # how the skeleton syncer asks for every ~192 headers. The skeleton request # would skip right over a single missing header. erase_block_numbers = range(500, 700) erased_canonicals = [] for blocknum in erase_block_numbers: dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum) canonical_hash = chaindb_1000.db[dbkey] erased_canonicals.append((dbkey, canonical_hash)) del chaindb_1000.db[dbkey] async with background_asyncio_service(client): target_head = chaindb_1000.get_canonical_block_header_by_number( erase_block_numbers[0] - 1) await wait_for_head(chaindb_fresh, target_head) # gut check that we didn't skip past the erased range of blocks head = chaindb_fresh.get_canonical_head() assert head.block_number < erase_block_numbers[0] # TODO validate that the skeleton syncer has cycled?? # Replace the missing headers so that syncing can resume for dbkey, canonical_hash in erased_canonicals: chaindb_1000.db[dbkey] = canonical_hash complete_chain_tip = chaindb_1000.get_canonical_head() # Not entirely certain that sending new block hashes is necessary, but... # it shouldn't hurt anything. Trying to fix this flaky test: # https://app.circleci.com/pipelines/github/ethereum/trinity/6855/workflows/131f9b03-8c99-4419-8e88-d2ef216e3dbb/jobs/259263/steps # noqa: E501 server_to_client.eth_api.send_new_block_hashes( NewBlockHash(complete_chain_tip.hash, complete_chain_tip.block_number), ) await wait_for_head(chaindb_fresh, complete_chain_tip)
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = HeaderChainSyncer( LatestTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service(ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db), )): server_peer.logger.info("%s is serving 1000 blocks", server_peer) client_peer.logger.info("%s is syncing up 1000", client_peer) # Artificially split header sync into two parts, to verify that # cycling to the next sync works properly. Split by erasing the canonical # lookups in a middle chunk. We have to erase a range of them because of # how the skeleton syncer asks for every ~192 headers. The skeleton request # would skip right over a single missing header. erase_block_numbers = range(500, 700) erased_canonicals = [] for blocknum in erase_block_numbers: dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum) canonical_hash = chaindb_1000.db[dbkey] erased_canonicals.append((dbkey, canonical_hash)) del chaindb_1000.db[dbkey] async with background_asyncio_service(client): target_head = chaindb_1000.get_canonical_block_header_by_number( erase_block_numbers[0] - 1 ) await wait_for_head(chaindb_fresh, target_head) # gut check that we didn't skip past the erased range of blocks head = chaindb_fresh.get_canonical_head() assert head.block_number < erase_block_numbers[0] # TODO validate that the skeleton syncer has cycled?? # Replace the missing headers so that syncing can resume for dbkey, canonical_hash in erased_canonicals: chaindb_1000.db[dbkey] = canonical_hash # Do we have to do anything here to have the server notify the client # that it's capable of serving more headers now? ... Apparently not. await wait_for_head(chaindb_fresh, chaindb_1000.get_canonical_head())