async def test_skeleton_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = ETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) client = FastChainSyncer(LatestTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( event_bus, AsyncChainDB(chaindb_1000.db)): client_peer.logger.info("%s is serving 1000 blocks", client_peer) server_peer.logger.info("%s is syncing up 1000 blocks", server_peer) await asyncio.wait_for(client.run(), timeout=20) head = chaindb_fresh.get_canonical_head() assert head == chaindb_1000.get_canonical_head()
async def test_regular_syncer_fallback(request, event_loop, event_bus, chaindb_fresh, chaindb_20): """ Test the scenario where a header comes in that's not in memory (but is in the DB) """ client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = FallbackTesting_RegularChainSyncer( ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service(ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db) )): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) async with background_asyncio_service(client): await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head()) head = chaindb_fresh.get_canonical_head() assert head.state_root in chaindb_fresh.db
async def test_header_gapfill_syncer(request, event_loop, event_bus, chaindb_with_gaps, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = HeaderChainGapSyncer( LatestTestChain(chaindb_with_gaps.db), chaindb_with_gaps, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service(ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db), )): server_peer.logger.info("%s is serving 1000 blocks", server_peer) client_peer.logger.info("%s is syncing up 1000", client_peer) async with background_asyncio_service(client): await wait_for_head( # We check for 249 because 250 exists from the very beginning (the checkpoint) chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(249))
async def test_fast_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) client = FastChainSyncer(LatestTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db))): client_peer.logger.info("%s is serving 1000 blocks", client_peer) server_peer.logger.info("%s is syncing up 1000 blocks", server_peer) async with background_asyncio_service(client) as manager: await asyncio.wait_for(manager.wait_finished(), timeout=20) head = chaindb_fresh.get_canonical_head() assert head == chaindb_1000.get_canonical_head()
async def test_proxy_peer_requests(request, event_bus, other_event_bus, event_loop, chaindb_20, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( background_asyncio_service( ETHRequestServer(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db)))) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(client_proxy_peer_pool)) proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False) assert len(headers) == 1 block_header = headers[0] assert block_header.block_number == 0 receipts = await proxy_peer.eth_api.get_receipts(headers) assert len(receipts) == 1 receipt = receipts[0] assert receipt[1][0] == block_header.receipt_root block_bundles = await proxy_peer.eth_api.get_block_bodies(headers) assert len(block_bundles) == 1 first_bundle = block_bundles[0] assert first_bundle[1][0] == block_header.transaction_root node_data = await proxy_peer.eth_api.get_node_data( (block_header.state_root, )) assert node_data[0][0] == block_header.state_root
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_20): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db)) client = RegularChainSyncer( ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer])) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), run_request_server( event_bus, FakeAsyncChainDB(chaindb_20.db) ): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) def finalizer(): event_loop.run_until_complete(client.cancel()) # Yield control so that client/server.run() returns, otherwise asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) asyncio.ensure_future(client.run()) await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head()) head = chaindb_fresh.get_canonical_head() assert head.state_root in chaindb_fresh.db
async def test_light_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_20): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = LESV2PeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = LightChainSyncer( LatestTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=LESPeerPoolEventServer ), background_asyncio_service( LightRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db), )): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) async with background_asyncio_service(client): await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
async def test_regular_syncer_fallback(request, event_loop, chaindb_fresh, chaindb_20): """ Test the scenario where a header comes in that's not in memory (but is in the DB) """ client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db)) client = FallbackTesting_RegularChainSyncer( ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer])) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer]) server_request_handler = ETHRequestServer(FakeAsyncChainDB(chaindb_20.db), server_peer_pool) asyncio.ensure_future(server_request_handler.run()) server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) def finalizer(): event_loop.run_until_complete(client.cancel()) # Yield control so that client/server.run() returns, otherwise asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) asyncio.ensure_future(client.run()) await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head()) head = chaindb_fresh.get_canonical_head() assert head.state_root in chaindb_fresh.db
async def test_light_syncer(request, event_loop, chaindb_fresh, chaindb_20): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_peer_class=LESPeer, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db)) client = LightChainSyncer(ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer])) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer]) server_request_handler = LightRequestServer( FakeAsyncHeaderDB(chaindb_20.db), server_peer_pool) asyncio.ensure_future(server_request_handler.run()) server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) def finalizer(): event_loop.run_until_complete(client.cancel()) # Yield control so that client/server.run() returns, otherwise asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) asyncio.ensure_future(client.run()) await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
async def test_skeleton_syncer(request, event_loop, chaindb_fresh, chaindb_1000): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_1000.db)) client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer]) server_request_handler = ETHRequestServer( FakeAsyncChainDB(chaindb_1000.db), server_peer_pool) asyncio.ensure_future(server_request_handler.run()) client_peer.logger.info("%s is serving 1000 blocks", client_peer) server_peer.logger.info("%s is syncing up 1000 blocks", server_peer) await asyncio.wait_for(client.run(), timeout=20) head = chaindb_fresh.get_canonical_head() assert head == chaindb_1000.get_canonical_head() # Now download the state for the chain's head. state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool) await asyncio.wait_for(state_downloader.run(), timeout=20) assert head.state_root in chaindb_fresh.db
async def test_proxy_peer_requests_with_timeouts(request, event_bus, other_event_bus, event_loop, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with contextlib.AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) # We just want an ETHRequestServer that doesn't answer us but we still have to run # *something* to at least subscribe to the events. Otherwise Lahja's safety check will yell # at us for sending requests into the void. for event_type in ETHRequestServer(None, None, None)._subscribed_events: server_event_bus.subscribe(event_type, lambda _: None) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(client_proxy_peer_pool)) server_proxy_peer_pool = ETHProxyPeerPool( server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(server_proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_block_headers(0, 1, 0, False, timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_receipts((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_block_bodies((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_node_data((), timeout=0.01)
async def test_requests_when_peer_in_client_vanishs(request, event_bus, other_event_bus, event_loop, chaindb_20, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with contextlib.AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( background_asyncio_service( ETHRequestServer(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG, MainnetChain.vm_configuration, AsyncChainDB(chaindb_20.db)))) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(client_proxy_peer_pool)) server_proxy_peer_pool = ETHProxyPeerPool( server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(server_proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) # We remove the peer from the client and assume to see PeerConnectionLost exceptions raised client_peer_pool.connected_nodes.pop(client_peer.session) with pytest.raises(PeerConnectionLost): await proxy_peer.eth_api.get_block_headers(0, 1, 0, False) with pytest.raises(PeerConnectionLost): await proxy_peer.eth_api.get_receipts(()) with pytest.raises(PeerConnectionLost): await proxy_peer.eth_api.get_block_bodies(()) with pytest.raises(PeerConnectionLost): await proxy_peer.eth_api.get_node_data(())
async def two_connected_tx_pools(event_bus, other_event_bus, event_loop, funded_address_private_key, chain_with_block_validation, tx_validator, client_and_server): alice_event_bus = event_bus bob_event_bus = other_event_bus bob, alice = client_and_server bob_peer_pool = MockPeerPoolWithConnectedPeers([bob], event_bus=bob_event_bus) alice_peer_pool = MockPeerPoolWithConnectedPeers([alice], event_bus=alice_event_bus) async with contextlib.AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(bob_event_bus, bob_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(alice_event_bus, alice_peer_pool, handler_type=ETHPeerPoolEventServer)) bob_proxy_peer_pool = ETHProxyPeerPool(bob_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(bob_proxy_peer_pool)) alice_proxy_peer_pool = ETHProxyPeerPool( alice_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(alice_proxy_peer_pool)) alice_tx_pool = TxPool( alice_event_bus, alice_proxy_peer_pool, tx_validator, ) await stack.enter_async_context( background_asyncio_service(alice_tx_pool)) bob_tx_pool = TxPool( bob_event_bus, bob_proxy_peer_pool, tx_validator, ) await stack.enter_async_context(background_asyncio_service(bob_tx_pool) ) yield ( alice, alice_event_bus, alice_tx_pool, ), (bob, bob_event_bus, bob_tx_pool)
async def test_header_gap_fill_detects_invalid_attempt(caplog, event_loop, event_bus, chaindb_with_gaps, chaindb_1000, chaindb_uncle): client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db) server_context = ChainContextFactory(headerdb__db=chaindb_uncle.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = SequentialHeaderChainGapSyncer( LatestTestChain(chaindb_with_gaps.db), chaindb_with_gaps, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) uncle_chaindb = AsyncChainDB(chaindb_uncle.db) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service(ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, uncle_chaindb, )): server_peer.logger.info("%s is serving 1000 blocks", server_peer) client_peer.logger.info("%s is syncing up 1000", client_peer) # We check for 499 because 500 exists from the very beginning (the checkpoint) expected_block_number = 499 async with background_asyncio_service(client): try: await wait_for_head( chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(expected_block_number), sync_timeout=5, ) except asyncio.TimeoutError: assert "Attempted to fill gap with invalid header" in caplog.text # Monkey patch the uncle chaindb to effectively make the attacker peer # switch to the correct chain. uncle_chaindb.db = chaindb_1000.db await wait_for_head( chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(expected_block_number) ) else: raise AssertionError("Succeeded when it was expected to fail")
async def test_proxy_peer_requests(request, event_bus, other_event_bus, event_loop, chaindb_fresh, chaindb_20): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db), bob_headerdb=FakeAsyncChainDB(chaindb_20.db), ) client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with run_peer_pool_event_server( client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server( server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( server_event_bus, FakeAsyncChainDB(chaindb_20.db)), run_proxy_peer_pool( client_event_bus ) as client_proxy_peer_pool, run_proxy_peer_pool( server_event_bus): proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.remote) headers = await proxy_peer.requests.get_block_headers(0, 1, 0, False) assert len(headers) == 1 block_header = headers[0] assert block_header.block_number == 0 receipts = await proxy_peer.requests.get_receipts(headers) assert len(receipts) == 1 receipt = receipts[0] assert receipt[1][0] == block_header.receipt_root block_bundles = await proxy_peer.requests.get_block_bodies(headers) assert len(block_bundles) == 1 first_bundle = block_bundles[0] assert first_bundle[1][0] == block_header.transaction_root node_data = await proxy_peer.requests.get_node_data( (block_header.state_root, )) assert node_data[0][0] == block_header.state_root
async def test_get_pooled_transactions_request(request, event_bus, other_event_bus, event_loop, chaindb_20, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server if get_highest_eth_protocol_version(client_peer) < ETHProtocolV65.version: pytest.skip("Test not applicable below eth/65") client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with contextlib.AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(client_proxy_peer_pool)) proxy_peer_pool = ETHProxyPeerPool(server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context( background_asyncio_service(proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) # The reason we run this test separately from the other request tests is because # GetPooledTransactions requests should be answered from the tx pool which the previous # test does not depend on. await stack.enter_async_context( background_asyncio_service( TxPool(server_event_bus, proxy_peer_pool, lambda _: True))) # The tx pool always answers these with an empty response txs = await proxy_peer.eth_api.get_pooled_transactions((decode_hex( '0x9ea39df6210064648ecbc465cd628fe52f69af53792e1c2f27840133435159d4' ), )) assert len(txs) == 0
async def test_proxy_peer_requests_with_timeouts(request, event_bus, other_event_bus, event_loop, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with AsyncExitStack() as stack: await stack.enter_async_context( run_peer_pool_event_server(client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer)) await stack.enter_async_context( run_peer_pool_event_server(server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer)) client_proxy_peer_pool = ETHProxyPeerPool( client_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context(run_service(client_proxy_peer_pool)) server_proxy_peer_pool = ETHProxyPeerPool( server_event_bus, TO_NETWORKING_BROADCAST_CONFIG) await stack.enter_async_context(run_service(server_proxy_peer_pool)) proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_block_headers(0, 1, 0, False, timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_receipts((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_block_bodies((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.eth_api.get_node_data((), timeout=0.01)
async def test_requests_when_peer_in_client_vanishs(request, event_bus, other_event_bus, event_loop, chaindb_fresh, chaindb_20): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db), bob_headerdb=FakeAsyncChainDB(chaindb_20.db), ) client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with run_peer_pool_event_server( client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server( server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_request_server( server_event_bus, FakeAsyncChainDB(chaindb_20.db)), run_proxy_peer_pool( client_event_bus ) as client_proxy_peer_pool, run_proxy_peer_pool( server_event_bus): proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.remote) # We remove the peer from the client and assume to see PeerConnectionLost exceptions raised client_peer_pool.connected_nodes.pop(client_peer.remote) with pytest.raises(PeerConnectionLost): await proxy_peer.requests.get_block_headers(0, 1, 0, False) with pytest.raises(PeerConnectionLost): await proxy_peer.requests.get_receipts(()) with pytest.raises(PeerConnectionLost): await proxy_peer.requests.get_block_bodies(()) with pytest.raises(PeerConnectionLost): await proxy_peer.requests.get_node_data(())
async def test_peer_pool_iter(event_loop): factory_a = ETHPeerPairFactory() factory_b = ETHPeerPairFactory() factory_c = ETHPeerPairFactory() async with factory_a as (peer1, _), factory_b as (peer2, _), factory_c as (peer3, _): pool = MockPeerPoolWithConnectedPeers([peer1, peer2, peer3]) peers = list([peer async for peer in pool]) assert len(peers) == 3 assert peer1 in peers assert peer2 in peers assert peer3 in peers peers = [] asyncio.ensure_future( peer2.disconnect(DisconnectReason.DISCONNECT_REQUESTED)) async for peer in pool: peers.append(peer) assert len(peers) == 2 assert peer1 in peers assert peer2 not in peers assert peer3 in peers
async def test_tx_sending(request, event_loop, chain_with_block_validation, tx_validator): # This test covers the communication end to end whereas the previous # focusses on the rules of the transaction pool on when to send tx to whom peer1, peer2 = await get_directly_linked_peers( request, event_loop, ) peer2_subscriber = SamplePeerSubscriber() peer2.add_subscriber(peer2_subscriber) pool = TxPool(MockPeerPoolWithConnectedPeers([peer1, peer2]), tx_validator) asyncio.ensure_future(pool.run()) def finalizer(): event_loop.run_until_complete(pool.cancel()) request.addfinalizer(finalizer) txs = [create_random_tx(chain_with_block_validation)] peer1.sub_proto.send_transactions(txs) # Ensure that peer2 gets the transactions peer, cmd, msg = await asyncio.wait_for( peer2_subscriber.msg_queue.get(), timeout=0.1, ) assert peer == peer2 assert isinstance(cmd, Transactions) assert msg[0].hash == txs[0].hash
async def test_proxy_peer_requests_with_timeouts(request, event_bus, other_event_bus, event_loop, chaindb_fresh, chaindb_20): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncChainDB(chaindb_fresh.db), bob_headerdb=FakeAsyncChainDB(chaindb_20.db), ) client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with run_peer_pool_event_server( client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server( server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_proxy_peer_pool( client_event_bus ) as client_proxy_peer_pool, run_proxy_peer_pool( server_event_bus): proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.remote) with pytest.raises(TimeoutError): await proxy_peer.requests.get_block_headers(0, 1, 0, False, timeout=0.01) with pytest.raises(TimeoutError): await proxy_peer.requests.get_receipts((), timeout=0.01) with pytest.raises(TimeoutError): await proxy_peer.requests.get_block_bodies((), timeout=0.01) with pytest.raises(TimeoutError): await proxy_peer.requests.get_node_data((), timeout=0.01)
async def test_regular_syncer_fallback(request, event_loop, event_bus, chaindb_fresh, chaindb_20): """ Test the scenario where a header comes in that's not in memory (but is in the DB) """ client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = ETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = FallbackTesting_RegularChainSyncer( ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db))): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) def finalizer(): event_loop.run_until_complete(client.cancel()) # Yield control so that client/server.run() returns, otherwise # asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) asyncio.ensure_future(client.run()) await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head()) head = chaindb_fresh.get_canonical_head() assert head.state_root in chaindb_fresh.db
async def test_queening_queue_recovers_from_penalty_with_one_peer( event_bus, chaindb_fresh, chaindb_20, has_parallel_peasant_call): local_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) remote_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=local_context, bob_peer_context=remote_context, event_bus=event_bus, ) async with peer_pair as (connection_to_local, connection_to_remote): local_peer_pool = MockPeerPoolWithConnectedPeers( [connection_to_remote], event_bus=event_bus, ) async with run_peer_pool_event_server( event_bus, local_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service( ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db), )): queue = QueeningQueue(local_peer_pool) async with background_asyncio_service(queue): queen = await asyncio.wait_for(queue.get_queen_peer(), timeout=0.01) assert queen == connection_to_remote queue.penalize_queen(connection_to_remote, delay=0.1) assert queue.queen is None with pytest.raises(asyncio.TimeoutError): # The queen should be penalized for this entire period, and # there are no alternative peers, so this call should hang: await asyncio.wait_for(queue.get_queen_peer(), timeout=0.05) if has_parallel_peasant_call: waiting_on_peasant = asyncio.ensure_future( queue.pop_fastest_peasant()) # But after waiting long enough, even with just one peer, the blocking # call should return. Whether or not there is also a waiting call looking for # a peasant. final_queen = await asyncio.wait_for(queue.get_queen_peer(), timeout=0.075) assert final_queen == connection_to_remote if has_parallel_peasant_call: waiting_on_peasant.cancel() with pytest.raises(asyncio.CancelledError): await waiting_on_peasant
async def test_fast_syncer(request, event_bus, event_loop, chaindb_fresh, chaindb_20): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = ETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) client = FastChainSyncer(LatestTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer, ), run_request_server( event_bus, AsyncChainDB(chaindb_20.db), ): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) # FastChainSyncer.run() will return as soon as it's caught up with the peer. await asyncio.wait_for(client.run(), timeout=5) head = chaindb_fresh.get_canonical_head() assert head == chaindb_20.get_canonical_head() # Now download the state for the chain's head. state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool) await asyncio.wait_for(state_downloader.run(), timeout=5) assert head.state_root in chaindb_fresh.db
async def test_proxy_peer_requests_with_timeouts(request, event_bus, other_event_bus, event_loop, client_and_server): server_event_bus = event_bus client_event_bus = other_event_bus client_peer, server_peer = client_and_server client_peer_pool = MockPeerPoolWithConnectedPeers( [client_peer], event_bus=client_event_bus) server_peer_pool = MockPeerPoolWithConnectedPeers( [server_peer], event_bus=server_event_bus) async with run_peer_pool_event_server( client_event_bus, client_peer_pool, handler_type=ETHPeerPoolEventServer), run_peer_pool_event_server( server_event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer), run_proxy_peer_pool( client_event_bus ) as client_proxy_peer_pool, run_proxy_peer_pool( server_event_bus): proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer( client_peer.session) with pytest.raises(asyncio.TimeoutError): await proxy_peer.requests.get_block_headers(0, 1, 0, False, timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.requests.get_receipts((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.requests.get_block_bodies((), timeout=0.01) with pytest.raises(asyncio.TimeoutError): await proxy_peer.requests.get_node_data((), timeout=0.01)
async def test_skeleton_syncer(request, event_loop, chaindb_fresh, chaindb_1000): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_1000.db)) client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer]) server = RegularChainSyncer( ByzantiumTestChain(chaindb_1000.db), chaindb_1000, server_peer_pool, ) asyncio.ensure_future(server.run()) server_request_handler = ETHRequestServer( FakeAsyncChainDB(chaindb_1000.db), server_peer_pool) asyncio.ensure_future(server_request_handler.run()) client_peer.logger.info("%s is serving 1000 blocks", client_peer) server_peer.logger.info("%s is syncing up 1000 blocks", server_peer) def finalizer(): event_loop.run_until_complete(server.cancel()) # Yield control so that server.run() returns, otherwise asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) # FastChainSyncer.run() will return as soon as it's caught up with the peer. await asyncio.wait_for(client.run(), timeout=20) head = chaindb_fresh.get_canonical_head() assert head == chaindb_1000.get_canonical_head() # Now download the state for the chain's head. state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool) await asyncio.wait_for(state_downloader.run(), timeout=20) assert head.state_root in chaindb_fresh.db
async def test_sequential_header_gapfill_syncer(request, event_loop, event_bus, chaindb_with_gaps, chaindb_1000): client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db) server_context = ChainContextFactory(headerdb__db=chaindb_1000.db) peer_pair = LatestETHPeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): chain_with_gaps = LatestTestChain(chaindb_with_gaps.db) client = SequentialHeaderChainGapSyncer( chain_with_gaps, chaindb_with_gaps, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus) ) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer ), background_asyncio_service(ETHRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db), )): server_peer.logger.info("%s is serving 1000 blocks", server_peer) client_peer.logger.info("%s is syncing up 1000", client_peer) async with background_asyncio_service(client): await wait_for_head( # We check for 499 because 500 is there from the very beginning (the checkpoint) chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(499) ) # This test is supposed to only fill in headers, so the following should fail. # If this ever succeeds it probably means the fixture was re-created with trivial # blocks and the test will fail and remind us what kind of fixture we want here. with pytest.raises(BlockNotFound): chain_with_gaps.get_canonical_block_by_number(499)
async def test_light_syncer(request, event_loop, event_bus, chaindb_fresh, chaindb_20): client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db) server_context = ChainContextFactory(headerdb__db=chaindb_20.db) peer_pair = LESV2PeerPairFactory( alice_peer_context=client_context, bob_peer_context=server_context, event_bus=event_bus, ) async with peer_pair as (client_peer, server_peer): client = LightChainSyncer( LatestTestChain(chaindb_fresh.db), chaindb_fresh, MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=LESPeerPoolEventServer ), background_asyncio_service( LightRequestServer( event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db), )): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) def finalizer(): event_loop.run_until_complete(client.cancel()) # Yield control so that client/server.run() returns, otherwise # asyncio will complain. event_loop.run_until_complete(asyncio.sleep(0.1)) request.addfinalizer(finalizer) asyncio.ensure_future(client.run()) await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
async def test_admin_removePeer_for_not_connected_peers( jsonrpc_ipc_pipe_path, event_loop, event_bus, ipc_server): async with LatestETHPeerPairFactory() as (alice, bob): peer_pool = MockPeerPoolWithConnectedPeers([alice, bob], event_bus=event_bus) async with run_peer_pool_event_server(event_bus, peer_pool): enode = f'enode://{GOOD_KEY}@10.0.0.1:30303' request = build_request('admin_removePeer', [enode]) result = await get_ipc_response(jsonrpc_ipc_pipe_path, request, event_loop, event_bus) assert result == {'id': 3, 'jsonrpc': '2.0', 'result': False}
async def test_fast_syncer(request, event_bus, event_loop, chaindb_fresh, chaindb_20): client_peer, server_peer = await get_directly_linked_peers( request, event_loop, alice_headerdb=FakeAsyncHeaderDB(chaindb_fresh.db), bob_headerdb=FakeAsyncHeaderDB(chaindb_20.db)) client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer]) client = FastChainSyncer(ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh, client_peer_pool) server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus) async with run_peer_pool_event_server( event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer, ), run_request_server( event_bus, FakeAsyncChainDB(chaindb_20.db), ): server_peer.logger.info("%s is serving 20 blocks", server_peer) client_peer.logger.info("%s is syncing up 20", client_peer) # FastChainSyncer.run() will return as soon as it's caught up with the peer. await asyncio.wait_for(client.run(), timeout=2) head = chaindb_fresh.get_canonical_head() assert head == chaindb_20.get_canonical_head() # Now download the state for the chain's head. state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db, head.state_root, client_peer_pool) await asyncio.wait_for(state_downloader.run(), timeout=2) assert head.state_root in chaindb_fresh.db