Example #1
0
async def client_and_server(chaindb_fresh, chaindb_20):
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=ChainContextFactory(headerdb__db=chaindb_fresh.db),
        bob_peer_context=ChainContextFactory(headerdb__db=chaindb_20.db),
    )
    async with peer_pair as (client_peer, server_peer):
        yield client_peer, server_peer
Example #2
0
async def client_and_server(chaindb_fresh, chaindb_20, request):
    peer_pair = request.param(
        alice_peer_context=ChainContextFactory(headerdb__db=chaindb_fresh.db),
        bob_peer_context=ChainContextFactory(headerdb__db=chaindb_20.db),
    )
    async with peer_pair as (client_peer, server_peer):
        yield client_peer, server_peer
Example #3
0
async def test_fast_syncer(request, event_loop, event_bus, chaindb_fresh,
                           chaindb_1000):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = FastChainSyncer(LatestTestChain(chaindb_fresh.db),
                                 chaindb_fresh, client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_1000.db))):

            client_peer.logger.info("%s is serving 1000 blocks", client_peer)
            server_peer.logger.info("%s is syncing up 1000 blocks",
                                    server_peer)

            async with background_asyncio_service(client) as manager:
                await asyncio.wait_for(manager.wait_finished(), timeout=20)

            head = chaindb_fresh.get_canonical_head()
            assert head == chaindb_1000.get_canonical_head()
Example #4
0
async def client_and_server():
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=ChainContextFactory(),
        bob_peer_context=ChainContextFactory(),
    )
    async with peer_pair as (client_peer, server_peer):
        yield client_peer, server_peer
Example #5
0
async def test_skeleton_syncer(request, event_loop, event_bus, chaindb_fresh,
                               chaindb_1000):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = FastChainSyncer(LatestTestChain(chaindb_fresh.db),
                                 chaindb_fresh, client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_1000.db)):

            client_peer.logger.info("%s is serving 1000 blocks", client_peer)
            server_peer.logger.info("%s is syncing up 1000 blocks",
                                    server_peer)

            await asyncio.wait_for(client.run(), timeout=20)

            head = chaindb_fresh.get_canonical_head()
            assert head == chaindb_1000.get_canonical_head()
Example #6
0
async def test_header_gapfill_syncer(request,
                                     event_loop,
                                     event_bus,
                                     chaindb_with_gaps,
                                     chaindb_1000):

    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = HeaderChainGapSyncer(
            LatestTestChain(chaindb_with_gaps.db),
            chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(client):
                await wait_for_head(
                    # We check for 249 because 250 exists from the very beginning (the checkpoint)
                    chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(249))
Example #7
0
async def test_light_syncer(request, event_loop, event_bus, chaindb_fresh,
                            chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LESV2PeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = LightChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=LESPeerPoolEventServer
        ), background_asyncio_service(
                LightRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_20.db),
                )):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            async with background_asyncio_service(client):
                await wait_for_head(chaindb_fresh,
                                    chaindb_20.get_canonical_head())
Example #8
0
async def test_regular_syncer_fallback(request, event_loop, event_bus, chaindb_fresh, chaindb_20):
    """
    Test the scenario where a header comes in that's not in memory (but is in the DB)
    """
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = FallbackTesting_RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db),
            chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_20.db)
        )):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            async with background_asyncio_service(client):
                await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
                head = chaindb_fresh.get_canonical_head()
                assert head.state_root in chaindb_fresh.db
Example #9
0
async def alice_and_bob(alice_chain, bob_chain):
    pair_factory = ETHPeerPairFactory(
        alice_client_version='alice',
        alice_peer_context=ChainContextFactory(headerdb=AsyncHeaderDB(alice_chain.headerdb.db)),
        bob_client_version='bob',
        bob_peer_context=ChainContextFactory(headerdb=AsyncHeaderDB(bob_chain.headerdb.db)),
    )
    async with pair_factory as (alice, bob):
        yield alice, bob
Example #10
0
async def test_queening_queue_recovers_from_penalty_with_one_peer(
        event_bus, chaindb_fresh, chaindb_20, has_parallel_peasant_call):

    local_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    remote_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=local_context,
        bob_peer_context=remote_context,
        event_bus=event_bus,
    )
    async with peer_pair as (connection_to_local, connection_to_remote):

        local_peer_pool = MockPeerPoolWithConnectedPeers(
            [connection_to_remote],
            event_bus=event_bus,
        )

        async with run_peer_pool_event_server(
                event_bus, local_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_20.db),
                )):
            queue = QueeningQueue(local_peer_pool)

            async with background_asyncio_service(queue):
                queen = await asyncio.wait_for(queue.get_queen_peer(),
                                               timeout=0.01)
                assert queen == connection_to_remote

                queue.penalize_queen(connection_to_remote, delay=0.1)
                assert queue.queen is None
                with pytest.raises(asyncio.TimeoutError):
                    # The queen should be penalized for this entire period, and
                    #   there are no alternative peers, so this call should hang:
                    await asyncio.wait_for(queue.get_queen_peer(),
                                           timeout=0.05)

                if has_parallel_peasant_call:
                    waiting_on_peasant = asyncio.ensure_future(
                        queue.pop_fastest_peasant())

                # But after waiting long enough, even with just one peer, the blocking
                #   call should return. Whether or not there is also a waiting call looking for
                #   a peasant.
                final_queen = await asyncio.wait_for(queue.get_queen_peer(),
                                                     timeout=0.075)
                assert final_queen == connection_to_remote

                if has_parallel_peasant_call:
                    waiting_on_peasant.cancel()
                    with pytest.raises(asyncio.CancelledError):
                        await waiting_on_peasant
Example #11
0
async def alice_and_bob(common_base_chain, request):
    pair_factory = request.param(
        alice_client_version='alice',
        alice_peer_context=ChainContextFactory(headerdb=AsyncHeaderDB(
            common_base_chain.headerdb.db)),  # noqa: E501
        bob_client_version='bob',
        bob_peer_context=ChainContextFactory(
            headerdb=AsyncHeaderDB(common_base_chain.headerdb.db)),
    )
    async with pair_factory as (alice, bob):
        yield alice, bob
Example #12
0
async def test_header_gap_fill_detects_invalid_attempt(caplog,
                                                       event_loop,
                                                       event_bus,
                                                       chaindb_with_gaps,
                                                       chaindb_1000,
                                                       chaindb_uncle):

    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_uncle.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = SequentialHeaderChainGapSyncer(
            LatestTestChain(chaindb_with_gaps.db),
            chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)
        uncle_chaindb = AsyncChainDB(chaindb_uncle.db)
        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, uncle_chaindb,
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # We check for 499 because 500 exists from the very beginning (the checkpoint)
            expected_block_number = 499
            async with background_asyncio_service(client):
                try:
                    await wait_for_head(
                        chaindb_with_gaps,
                        chaindb_1000.get_canonical_block_header_by_number(expected_block_number),
                        sync_timeout=5,
                    )
                except asyncio.TimeoutError:
                    assert "Attempted to fill gap with invalid header" in caplog.text
                    # Monkey patch the uncle chaindb to effectively make the attacker peer
                    # switch to the correct chain.
                    uncle_chaindb.db = chaindb_1000.db
                    await wait_for_head(
                        chaindb_with_gaps,
                        chaindb_1000.get_canonical_block_header_by_number(expected_block_number)
                    )
                else:
                    raise AssertionError("Succeeded when it was expected to fail")
Example #13
0
async def test_regular_syncer_fallback(request, event_loop, event_bus,
                                       chaindb_fresh, chaindb_20):
    """
    Test the scenario where a header comes in that's not in memory (but is in the DB)
    """
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = FallbackTesting_RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_20.db))):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            def finalizer():
                event_loop.run_until_complete(client.cancel())
                # Yield control so that client/server.run() returns, otherwise
                # asyncio will complain.
                event_loop.run_until_complete(asyncio.sleep(0.1))

            request.addfinalizer(finalizer)

            asyncio.ensure_future(client.run())

            await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
            head = chaindb_fresh.get_canonical_head()
            assert head.state_root in chaindb_fresh.db
Example #14
0
async def test_handshake_with_incompatible_fork_id(alice_chain, bob_chain):

    alice_chain = build(alice_chain, mine_block())

    pair_factory = ETHPeerPairFactory(alice_peer_context=ChainContextFactory(
        headerdb=AsyncHeaderDB(alice_chain.headerdb.db),
        vm_configuration=((1, PetersburgVM), (2, MuirGlacierVM))), )
    with pytest.raises(WrongForkIDFailure):
        async with pair_factory as (alice, bob):
            pass
Example #15
0
async def test_fast_syncer(request, event_bus, event_loop, chaindb_fresh,
                           chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
        client = FastChainSyncer(LatestTestChain(chaindb_fresh.db),
                                 chaindb_fresh, client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus,
                server_peer_pool,
                handler_type=ETHPeerPoolEventServer,
        ), run_request_server(
                event_bus,
                AsyncChainDB(chaindb_20.db),
        ):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            # FastChainSyncer.run() will return as soon as it's caught up with the peer.
            await asyncio.wait_for(client.run(), timeout=5)

            head = chaindb_fresh.get_canonical_head()
            assert head == chaindb_20.get_canonical_head()

            # Now download the state for the chain's head.
            state_downloader = StateDownloader(chaindb_fresh, chaindb_fresh.db,
                                               head.state_root,
                                               client_peer_pool)
            await asyncio.wait_for(state_downloader.run(), timeout=5)

            assert head.state_root in chaindb_fresh.db
Example #16
0
async def test_light_syncer(request, event_loop, event_bus, chaindb_fresh,
                            chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LESV2PeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = LightChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=LESPeerPoolEventServer
        ), background_asyncio_service(
                LightRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_20.db),
                )):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            def finalizer():
                event_loop.run_until_complete(client.cancel())
                # Yield control so that client/server.run() returns, otherwise
                # asyncio will complain.
                event_loop.run_until_complete(asyncio.sleep(0.1))

            request.addfinalizer(finalizer)

            asyncio.ensure_future(client.run())

            await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
Example #17
0
async def test_sequential_header_gapfill_syncer(request,
                                                event_loop,
                                                event_bus,
                                                chaindb_with_gaps,
                                                chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        chain_with_gaps = LatestTestChain(chaindb_with_gaps.db)
        client = SequentialHeaderChainGapSyncer(
            chain_with_gaps,
            chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(client):
                await wait_for_head(
                    # We check for 499 because 500 is there from the very beginning (the checkpoint)
                    chaindb_with_gaps, chaindb_1000.get_canonical_block_header_by_number(499)
                )
                # This test is supposed to only fill in headers, so the following should fail.
                # If this ever succeeds it probably means the fixture was re-created with trivial
                # blocks and the test will fail and remind us what kind of fixture we want here.
                with pytest.raises(BlockNotFound):
                    chain_with_gaps.get_canonical_block_by_number(499)
Example #18
0
async def test_no_duplicate_node_data(request, event_loop, event_bus,
                                      chaindb_fresh, chaindb_20):
    """
    Test that when a peer calls GetNodeData to ETHRequestServer, with duplicate node hashes,
    that ETHRequestServer only responds with unique nodes.

    Note: A nice extension to the test would be to check that a warning is
        raised about sending the duplicate hashes in the first place.
    """
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_to_server, server_to_client):

        server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    MainnetChain.vm_configuration,
                    AsyncChainDB(chaindb_20.db),
                )):
            root_hash = chaindb_20.get_canonical_head().state_root
            state_root = chaindb_20.db[root_hash]

            returned_nodes = await client_to_server.eth_api.get_node_data(
                (root_hash, root_hash))
            assert returned_nodes == (
                # Server must not send back duplicates, just the single root node
                (root_hash, state_root), )
Example #19
0
async def test_regular_syncer(request, event_loop, event_bus, chaindb_fresh,
                              chaindb_20):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_20.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )

    async with peer_pair as (client_peer, server_peer):

        client = RegularChainSyncer(
            ByzantiumTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer]))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_20.db)):

            server_peer.logger.info("%s is serving 20 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 20", client_peer)

            def finalizer():
                event_loop.run_until_complete(client.cancel())
                # Yield control so that client/server.run() returns, otherwise
                # asyncio will complain.
                event_loop.run_until_complete(asyncio.sleep(0.1))

            request.addfinalizer(finalizer)

            asyncio.ensure_future(client.run())

            await wait_for_head(chaindb_fresh, chaindb_20.get_canonical_head())
            head = chaindb_fresh.get_canonical_head()
            assert head.state_root in chaindb_fresh.db
Example #20
0
async def test_no_wit_api_property_when_witness_not_supported():
    class ETHPeerFactoryWithoutWitness(ETHPeerFactory):
        def _get_wit_handshakers(self):
            return tuple()

    peer_context = ChainContextFactory()
    peer_pair_factory = PeerPairFactory(
        alice_peer_context=peer_context,
        alice_peer_factory_class=ETHPeerFactory,
        bob_peer_context=peer_context,
        bob_peer_factory_class=ETHPeerFactoryWithoutWitness,
    )
    async with peer_pair_factory as (alice, bob):
        assert not hasattr(bob, 'wit_api')
        assert not hasattr(alice, 'wit_api')
Example #21
0
async def test_block_gapfill_syncer(request, event_loop, event_bus,
                                    chaindb_with_block_gaps, chaindb_1000):
    client_context = ChainContextFactory(
        headerdb__db=chaindb_with_block_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        syncer = BodyChainGapSyncer(
            LatestTestChain(chaindb_with_block_gaps.db),
            chaindb_with_block_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus),
        )
        # In production, this would be the block time but we want our test to pause/resume swiftly
        syncer._idle_time = 0.01
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(syncer):
                chain_with_gaps = LatestTestChain(chaindb_with_block_gaps.db)
                fat_chain = LatestTestChain(chaindb_1000.db)

                # Ensure we can pause/resume immediately and not just after syncing has started
                syncer.pause()
                syncer.resume()

                # Sync the first 100 blocks, then check that pausing/resume works
                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(100))

                # Pause the syncer and take note how far we have synced at this point
                syncer.pause()
                # We need to give async code a moment to settle before we save the progress to
                # ensure it has stabilized before we save it.
                await asyncio.sleep(0.25)
                paused_chain_gaps = chain_with_gaps.chaindb.get_chain_gaps()

                # Consider it victory if after 0.5s no new blocks were written to the database
                await asyncio.sleep(0.5)
                assert paused_chain_gaps == chain_with_gaps.chaindb.get_chain_gaps(
                )

                # Resume syncing
                syncer.resume()

                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(1000),
                    sync_timeout=20)

                for block_num in range(1, 1001):
                    assert chain_with_gaps.get_canonical_block_by_number(
                        block_num) == fat_chain.get_canonical_block_by_number(
                            block_num)

                # We need to give the async calls a moment to settle before we can read the updated
                # chain gaps.
                await asyncio.sleep(0.25)
                assert chain_with_gaps.chaindb.get_chain_gaps() == ((), 1001)
Example #22
0
async def test_beam_syncer(request,
                           event_loop,
                           event_bus,
                           chaindb_fresh,
                           chaindb_churner,
                           beam_to_block,
                           checkpoint=None):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer])
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_churner.db)
                ), AsyncioEndpoint.serve(
                    pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                        gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, PetersburgVM), ),
                chain_id=999,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
                token=client.cancel_token,
            )
            asyncio.ensure_future(import_server.run())

            await pausing_endpoint.connect_to_endpoints(gatherer_config)
            asyncio.ensure_future(client.run())

            # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one)
            # Trying to keep the tests short-ish. A fuller test could always set the target header
            #   to the chaindb_churner canonical head, and increase the timeout significantly
            target_block_number = min(beam_to_block + 10, 129)
            target_head = chaindb_churner.get_canonical_block_header_by_number(
                target_block_number)
            await wait_for_head(chaindb_fresh, target_head, sync_timeout=10)
            assert target_head.state_root in chaindb_fresh.db

            # first stop the import server, so it doesn't hang waiting for state data
            await import_server.cancel()
            await client.cancel()
Example #23
0
async def _beam_syncing(
    request,
    event_loop,
    event_bus,
    chaindb_fresh,
    chaindb_churner,
    beam_to_block,
    checkpoint=None,
    VM_at_0=PetersburgVM,
    enable_state_backfill=False,
):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    backfiller = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer,
                             server_peer), backfiller as (client2_peer,
                                                          backfill_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers(
            [client_peer, backfill_peer],
            event_bus=event_bus,
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        backfill_peer_pool = MockPeerPoolWithConnectedPeers(
            [client2_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), run_peer_pool_event_server(
                event_bus, backfill_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_churner.db))
        ), AsyncioEndpoint.serve(
                pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                    gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, VM_at_0), ),
                chain_id=999,
                consensus_context_class=ConsensusContext,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                metrics_registry=NoopMetricsRegistry(),
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                NoopMetricsRegistry(),
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
                enable_state_backfill=enable_state_backfill,
                enable_backfill=False,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            backfill_peer.logger.info("%s is serving backfill state",
                                      backfill_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
            )
            async with background_asyncio_service(import_server):
                await pausing_endpoint.connect_to_endpoints(gatherer_config)
                async with background_asyncio_service(client):
                    yield client
Example #24
0
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_to_server, server_to_client):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_to_server],
                                           event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_to_client.logger.info("%s is serving 1000 blocks",
                                         server_to_client)
            client_to_server.logger.info("%s is syncing up 1000",
                                         client_to_server)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1)
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                complete_chain_tip = chaindb_1000.get_canonical_head()

                # Not entirely certain that sending new block hashes is necessary, but...
                #   it shouldn't hurt anything. Trying to fix this flaky test:
                # https://app.circleci.com/pipelines/github/ethereum/trinity/6855/workflows/131f9b03-8c99-4419-8e88-d2ef216e3dbb/jobs/259263/steps  # noqa: E501

                server_to_client.eth_api.send_new_block_hashes(
                    NewBlockHash(complete_chain_tip.hash,
                                 complete_chain_tip.block_number), )

                await wait_for_head(chaindb_fresh, complete_chain_tip)
Example #25
0
async def test_header_syncer(request,
                             event_loop,
                             event_bus,
                             chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db),
            chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1
                )
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                # Do we have to do anything here to have the server notify the client
                #   that it's capable of serving more headers now? ... Apparently not.

                await wait_for_head(chaindb_fresh, chaindb_1000.get_canonical_head())
Example #26
0
async def test_sequential_header_gapfill_syncer(request, event_loop, event_bus,
                                                chaindb_with_gaps,
                                                chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        chain_with_gaps = LatestTestChain(chaindb_with_gaps.db)
        client = SequentialHeaderChainGapSyncer(
            chain_with_gaps, chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        # Ensure we use small chunks to be able to test pause/resume properly
        client._max_backfill_header_at_once = 100
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(client):
                # We intentionally only sync up to a block *below* the first gap to have a more
                # difficult scenario for pause/resume. We want to make sure we not only can pause
                # at the times where we synced up to an actual gap. Instead we want to be sure
                # we can pause after we synced up to the `_max_backfill_header_at_once` limit which
                # may be shorter than the actual gap in the chain.
                await wait_for_head(
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(100))

                # Pause the syncer for a moment and check if it continued syncing (it should not!)
                client.pause()
                # Verify that we stopped the chain fast enough, before the gap was fully filled
                # This is a potential source of test flakiness
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                await asyncio.sleep(1)
                # Make sure that the gap filling doesn't complete for a while. We could
                # theoretically get false positives if it's not paused but very slow to fill headers
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                # Now resume syncing
                client.resume()

                await wait_for_head(
                    # We check for 499 because 500 is there from the very beginning (the checkpoint)
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(499))
                # This test is supposed to only fill in headers, so the following should fail.
                # If this ever succeeds it probably means the fixture was re-created with trivial
                # blocks and the test will fail and remind us what kind of fixture we want here.
                with pytest.raises(BlockNotFound):
                    chain_with_gaps.get_canonical_block_by_number(499)
Example #27
0
async def test_header_gap_fill_detects_invalid_attempt(caplog, event_loop,
                                                       event_bus,
                                                       chaindb_with_gaps,
                                                       chaindb_1000,
                                                       chaindb_uncle):

    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_uncle.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = SequentialHeaderChainGapSyncer(
            LatestTestChain(chaindb_with_gaps.db), chaindb_with_gaps,
            client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        uncle_chaindb = AsyncChainDB(chaindb_uncle.db)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    uncle_chaindb,
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # We check for 499 because 500 exists from the very beginning (the checkpoint)
            expected_block_number = 499
            final_header = chaindb_1000.get_canonical_block_header_by_number(
                expected_block_number)
            async with background_asyncio_service(client):
                try:
                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=5,
                    )
                except asyncio.TimeoutError:
                    assert "Attempted to fill gap with invalid header" in caplog.text
                    # Monkey patch the uncle chaindb to effectively make the attacker peer
                    # switch to the correct chain.
                    uncle_chaindb.db = chaindb_1000.db
                    # The hack goes on: Now that our attacker peer turned friendly we may be stuck
                    # waiting for a new skeleton peer forever. This isn't a real life scenario
                    # because: a.) an attacker probably won't turn friendly and b.) new blocks and
                    # peers will constantly yield new skeleton peers.
                    # This ugly hack will tick the chain tip monitor as we simulate a joining peer.
                    for subscriber in client_peer_pool._subscribers:
                        subscriber.register_peer(client_peer)

                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=20,
                    )
                else:
                    raise AssertionError(
                        "Succeeded when it was expected to fail")