예제 #1
0
async def _beam_syncing(
        request,
        event_loop,
        event_bus,
        chaindb_fresh,
        chaindb_churner,
        beam_to_block,
        checkpoint=None,
        VM_at_0=PetersburgVM,
):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    backfiller = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer), backfiller as (client2_peer, backfill_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers(
            [client_peer, backfill_peer],
            event_bus=event_bus,
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)
        backfill_peer_pool = MockPeerPoolWithConnectedPeers([client2_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), run_peer_pool_event_server(
            event_bus, backfill_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_churner.db)
        )), AsyncioEndpoint.serve(
            pausing_config
        ) as pausing_endpoint, AsyncioEndpoint.serve(gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, VM_at_0), ),
                chain_id=999,
                consensus_context_class=ConsensusContext,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
                enable_backfill=False,
            )

            client_peer.logger.info("%s is serving churner blocks", client_peer)
            backfill_peer.logger.info("%s is serving backfill state", client_peer)
            server_peer.logger.info("%s is syncing up churner blocks", server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
            )
            async with background_asyncio_service(import_server):
                await pausing_endpoint.connect_to_endpoints(gatherer_config)
                async with background_asyncio_service(client):
                    yield
예제 #2
0
async def test_header_syncer(request,
                             event_loop,
                             event_bus,
                             chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db),
            chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1
                )
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                # Do we have to do anything here to have the server notify the client
                #   that it's capable of serving more headers now? ... Apparently not.

                await wait_for_head(chaindb_fresh, chaindb_1000.get_canonical_head())
예제 #3
0
async def test_beam_syncer(request,
                           event_loop,
                           event_bus,
                           chaindb_fresh,
                           chaindb_churner,
                           beam_to_block,
                           checkpoint=None):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer), run_request_server(
                    event_bus, AsyncChainDB(chaindb_churner.db)
                ), AsyncioEndpoint.serve(
                    pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                        gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, PetersburgVM), ),
                chain_id=999,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
                token=client.cancel_token,
            )
            asyncio.ensure_future(import_server.run())

            await pausing_endpoint.connect_to_endpoints(gatherer_config)
            asyncio.ensure_future(client.run())

            # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one)
            # Trying to keep the tests short-ish. A fuller test could always set the target header
            #   to the chaindb_churner canonical head, and increase the timeout significantly
            target_block_number = min(beam_to_block + 10, 129)
            target_head = chaindb_churner.get_canonical_block_header_by_number(
                target_block_number)
            await wait_for_head(chaindb_fresh, target_head, sync_timeout=10)
            assert target_head.state_root in chaindb_fresh.db

            # first stop the import server, so it doesn't hang waiting for state data
            await import_server.cancel()
            await client.cancel()
예제 #4
0
async def test_header_gap_fill_detects_invalid_attempt(caplog, event_loop,
                                                       event_bus,
                                                       chaindb_with_gaps,
                                                       chaindb_1000,
                                                       chaindb_uncle):

    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_uncle.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = SequentialHeaderChainGapSyncer(
            LatestTestChain(chaindb_with_gaps.db), chaindb_with_gaps,
            client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        uncle_chaindb = AsyncChainDB(chaindb_uncle.db)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    uncle_chaindb,
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # We check for 499 because 500 exists from the very beginning (the checkpoint)
            expected_block_number = 499
            final_header = chaindb_1000.get_canonical_block_header_by_number(
                expected_block_number)
            async with background_asyncio_service(client):
                try:
                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=5,
                    )
                except asyncio.TimeoutError:
                    assert "Attempted to fill gap with invalid header" in caplog.text
                    # Monkey patch the uncle chaindb to effectively make the attacker peer
                    # switch to the correct chain.
                    uncle_chaindb.db = chaindb_1000.db
                    # The hack goes on: Now that our attacker peer turned friendly we may be stuck
                    # waiting for a new skeleton peer forever. This isn't a real life scenario
                    # because: a.) an attacker probably won't turn friendly and b.) new blocks and
                    # peers will constantly yield new skeleton peers.
                    # This ugly hack will tick the chain tip monitor as we simulate a joining peer.
                    for subscriber in client_peer_pool._subscribers:
                        subscriber.register_peer(client_peer)

                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=20,
                    )
                else:
                    raise AssertionError(
                        "Succeeded when it was expected to fail")
예제 #5
0
async def test_sequential_header_gapfill_syncer(request, event_loop, event_bus,
                                                chaindb_with_gaps,
                                                chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        chain_with_gaps = LatestTestChain(chaindb_with_gaps.db)
        client = SequentialHeaderChainGapSyncer(
            chain_with_gaps, chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        # Ensure we use small chunks to be able to test pause/resume properly
        client._max_backfill_header_at_once = 100
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(client):
                # We intentionally only sync up to a block *below* the first gap to have a more
                # difficult scenario for pause/resume. We want to make sure we not only can pause
                # at the times where we synced up to an actual gap. Instead we want to be sure
                # we can pause after we synced up to the `_max_backfill_header_at_once` limit which
                # may be shorter than the actual gap in the chain.
                await wait_for_head(
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(100))

                # Pause the syncer for a moment and check if it continued syncing (it should not!)
                client.pause()
                # Verify that we stopped the chain fast enough, before the gap was fully filled
                # This is a potential source of test flakiness
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                await asyncio.sleep(1)
                # Make sure that the gap filling doesn't complete for a while. We could
                # theoretically get false positives if it's not paused but very slow to fill headers
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                # Now resume syncing
                client.resume()

                await wait_for_head(
                    # We check for 499 because 500 is there from the very beginning (the checkpoint)
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(499))
                # This test is supposed to only fill in headers, so the following should fail.
                # If this ever succeeds it probably means the fixture was re-created with trivial
                # blocks and the test will fail and remind us what kind of fixture we want here.
                with pytest.raises(BlockNotFound):
                    chain_with_gaps.get_canonical_block_by_number(499)
예제 #6
0
async def test_block_gapfill_syncer(request, event_loop, event_bus,
                                    chaindb_with_block_gaps, chaindb_1000):
    client_context = ChainContextFactory(
        headerdb__db=chaindb_with_block_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        syncer = BodyChainGapSyncer(
            LatestTestChain(chaindb_with_block_gaps.db),
            chaindb_with_block_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus),
        )
        # In production, this would be the block time but we want our test to pause/resume swiftly
        syncer._idle_time = 0.01
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(syncer):
                chain_with_gaps = LatestTestChain(chaindb_with_block_gaps.db)
                fat_chain = LatestTestChain(chaindb_1000.db)

                # Ensure we can pause/resume immediately and not just after syncing has started
                syncer.pause()
                syncer.resume()

                # Sync the first 100 blocks, then check that pausing/resume works
                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(100))

                # Pause the syncer and take note how far we have synced at this point
                syncer.pause()
                # We need to give async code a moment to settle before we save the progress to
                # ensure it has stabilized before we save it.
                await asyncio.sleep(0.25)
                paused_chain_gaps = chain_with_gaps.chaindb.get_chain_gaps()

                # Consider it victory if after 0.5s no new blocks were written to the database
                await asyncio.sleep(0.5)
                assert paused_chain_gaps == chain_with_gaps.chaindb.get_chain_gaps(
                )

                # Resume syncing
                syncer.resume()

                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(1000),
                    sync_timeout=20)

                for block_num in range(1, 1001):
                    assert chain_with_gaps.get_canonical_block_by_number(
                        block_num) == fat_chain.get_canonical_block_by_number(
                            block_num)

                # We need to give the async calls a moment to settle before we can read the updated
                # chain gaps.
                await asyncio.sleep(0.25)
                assert chain_with_gaps.chaindb.get_chain_gaps() == ((), 1001)
예제 #7
0
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_to_server, server_to_client):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_to_server],
                                           event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_to_client.logger.info("%s is serving 1000 blocks",
                                         server_to_client)
            client_to_server.logger.info("%s is syncing up 1000",
                                         client_to_server)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1)
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                complete_chain_tip = chaindb_1000.get_canonical_head()

                # Not entirely certain that sending new block hashes is necessary, but...
                #   it shouldn't hurt anything. Trying to fix this flaky test:
                # https://app.circleci.com/pipelines/github/ethereum/trinity/6855/workflows/131f9b03-8c99-4419-8e88-d2ef216e3dbb/jobs/259263/steps  # noqa: E501

                server_to_client.eth_api.send_new_block_hashes(
                    NewBlockHash(complete_chain_tip.hash,
                                 complete_chain_tip.block_number), )

                await wait_for_head(chaindb_fresh, complete_chain_tip)