Ejemplo n.º 1
0
 async def do_run(self, event_bus: EndpointAPI) -> None:
     async with background_asyncio_service(IdleService(event_bus)):
         raise ComponentException(
             "This is a component that crashes after starting a service")
Ejemplo n.º 2
0
async def test_proxy_peer_requests(request, event_bus, other_event_bus,
                                   event_loop, chaindb_20, client_and_server):
    server_event_bus = event_bus
    client_event_bus = other_event_bus
    client_peer, server_peer = client_and_server

    client_peer_pool = MockPeerPoolWithConnectedPeers(
        [client_peer], event_bus=client_event_bus)
    server_peer_pool = MockPeerPoolWithConnectedPeers(
        [server_peer], event_bus=server_event_bus)

    async with contextlib.AsyncExitStack() as stack:
        await stack.enter_async_context(
            run_peer_pool_event_server(client_event_bus,
                                       client_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        await stack.enter_async_context(
            run_peer_pool_event_server(server_event_bus,
                                       server_peer_pool,
                                       handler_type=ETHPeerPoolEventServer))

        base_db = chaindb_20.db
        await stack.enter_async_context(
            background_asyncio_service(
                ETHRequestServer(
                    server_event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    MainnetChain.vm_configuration,
                    AsyncChainDB(base_db),
                )))
        await stack.enter_async_context(
            background_asyncio_service(
                WitRequestServer(
                    server_event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    base_db,
                )))

        client_proxy_peer_pool = ETHProxyPeerPool(
            client_event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(client_proxy_peer_pool))

        proxy_peer_pool = ETHProxyPeerPool(server_event_bus,
                                           TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(
            background_asyncio_service(proxy_peer_pool))

        proxy_peer = await client_proxy_peer_pool.ensure_proxy_peer(
            client_peer.session)

        headers = await proxy_peer.eth_api.get_block_headers(0, 1, 0, False)

        assert len(headers) == 1
        block_header = headers[0]
        assert block_header.block_number == 0

        receipts = await proxy_peer.eth_api.get_receipts(headers)
        assert len(receipts) == 1
        receipt = receipts[0]
        assert receipt[1][0] == block_header.receipt_root

        block_bundles = await proxy_peer.eth_api.get_block_bodies(headers)
        assert len(block_bundles) == 1
        first_bundle = block_bundles[0]
        assert first_bundle[1][0] == block_header.transaction_root

        node_data = await proxy_peer.eth_api.get_node_data(
            (block_header.state_root, ))
        assert node_data[0][0] == block_header.state_root

        block_hash = block_header.hash
        node_hashes = tuple(Hash32Factory.create_batch(5))
        # Populate the server's witness DB so that it can reply to our request.
        wit_db = AsyncWitnessDB(base_db)
        wit_db.persist_witness_hashes(block_hash, node_hashes)
        response = await proxy_peer.wit_api.get_block_witness_hashes(block_hash
                                                                     )
        assert set(response) == set(node_hashes)
Ejemplo n.º 3
0
 async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
     service = PeerCountReporter(event_bus)
     async with background_asyncio_service(service) as manager:
         await manager.wait_finished()
async def test_asyncio_service_external_api_works_while_running():
    service = ExternalAPIService()

    async with background_asyncio_service(service):
        assert await service.get_7() == 7
Ejemplo n.º 5
0
    async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None:
        port = boot_info.trinity_config.port
        upnp_service = UPnPService(port, event_bus)

        async with background_asyncio_service(upnp_service) as manager:
            await manager.wait_finished()
Ejemplo n.º 6
0
 async def apply(self, connection: ConnectionAPI) -> AsyncIterator[None]:
     service = PingAndDisconnectIfIdle(connection, self.idle_timeout)
     async with background_asyncio_service(service):
         yield
Ejemplo n.º 7
0
async def test_beam_syncer(
        request,
        event_loop,
        event_bus,
        chaindb_fresh,
        chaindb_churner,
        beam_to_block,
        checkpoint=None):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = ETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_churner.db)
        )), AsyncioEndpoint.serve(
            pausing_config
        ) as pausing_endpoint, AsyncioEndpoint.serve(gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, PetersburgVM), ),
                chain_id=999,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
            )

            client_peer.logger.info("%s is serving churner blocks", client_peer)
            server_peer.logger.info("%s is syncing up churner blocks", server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
                token=client.cancel_token,
            )
            asyncio.ensure_future(import_server.run())

            await pausing_endpoint.connect_to_endpoints(gatherer_config)
            asyncio.ensure_future(client.run())

            # We can sync at least 10 blocks in 1s at current speeds, (or reach the current one)
            # Trying to keep the tests short-ish. A fuller test could always set the target header
            #   to the chaindb_churner canonical head, and increase the timeout significantly
            target_block_number = min(beam_to_block + 10, 129)
            target_head = chaindb_churner.get_canonical_block_header_by_number(target_block_number)
            await wait_for_head(chaindb_fresh, target_head, sync_timeout=10)
            assert target_head.state_root in chaindb_fresh.db

            # first stop the import server, so it doesn't hang waiting for state data
            await import_server.cancel()
            await client.cancel()
Ejemplo n.º 8
0
async def test_header_gap_fill_detects_invalid_attempt(caplog, event_loop,
                                                       event_bus,
                                                       chaindb_with_gaps,
                                                       chaindb_1000,
                                                       chaindb_uncle):

    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_uncle.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client_peer_pool = MockPeerPoolWithConnectedPeers([client_peer],
                                                          event_bus=event_bus)
        client = SequentialHeaderChainGapSyncer(
            LatestTestChain(chaindb_with_gaps.db), chaindb_with_gaps,
            client_peer_pool)
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        uncle_chaindb = AsyncChainDB(chaindb_uncle.db)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    uncle_chaindb,
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # We check for 499 because 500 exists from the very beginning (the checkpoint)
            expected_block_number = 499
            final_header = chaindb_1000.get_canonical_block_header_by_number(
                expected_block_number)
            async with background_asyncio_service(client):
                try:
                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=5,
                    )
                except asyncio.TimeoutError:
                    assert "Attempted to fill gap with invalid header" in caplog.text
                    # Monkey patch the uncle chaindb to effectively make the attacker peer
                    # switch to the correct chain.
                    uncle_chaindb.db = chaindb_1000.db
                    # The hack goes on: Now that our attacker peer turned friendly we may be stuck
                    # waiting for a new skeleton peer forever. This isn't a real life scenario
                    # because: a.) an attacker probably won't turn friendly and b.) new blocks and
                    # peers will constantly yield new skeleton peers.
                    # This ugly hack will tick the chain tip monitor as we simulate a joining peer.
                    for subscriber in client_peer_pool._subscribers:
                        subscriber.register_peer(client_peer)

                    await wait_for_head(
                        chaindb_with_gaps,
                        final_header,
                        sync_timeout=20,
                    )
                else:
                    raise AssertionError(
                        "Succeeded when it was expected to fail")
Ejemplo n.º 9
0
async def test_tx_propagation(event_bus,
                              funded_address_private_key,
                              chain_with_block_validation,
                              tx_validator):

    initial_two_peers = TEST_NODES[:2]
    node_one = initial_two_peers[0]
    node_two = initial_two_peers[1]

    async with AsyncExitStack() as stack:
        await stack.enter_async_context(mock_request_response(
            GetConnectedPeersRequest,
            GetConnectedPeersResponseFactory.from_sessions(initial_two_peers),
            event_bus,
        ))

        peer_pool = ETHProxyPeerPool(event_bus, TO_NETWORKING_BROADCAST_CONFIG)
        await stack.enter_async_context(run_service(peer_pool))

        tx_pool = TxPool(event_bus, peer_pool, tx_validator)
        await stack.enter_async_context(background_asyncio_service(tx_pool))

        await asyncio.sleep(0.01)

        txs_broadcasted_by_peer1 = [
            create_random_tx(chain_with_block_validation, funded_address_private_key)
        ]

        # this needs to go here to ensure that the subscription is *after*
        # the one installed by the transaction pool so that the got_txns
        # event will get set after the other handlers have been called.
        outgoing_tx, got_txns = observe_outgoing_transactions(event_bus)

        # Peer1 sends some txs
        await event_bus.broadcast(
            TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1))
        )

        await asyncio.wait_for(got_txns.wait(), timeout=0.1)

        assert outgoing_tx == [
            (node_two, tuple(txs_broadcasted_by_peer1)),
        ]
        # Clear the recording, we asserted all we want and would like to have a fresh start
        outgoing_tx.clear()

        # Peer1 sends same txs again
        await event_bus.broadcast(
            TransactionsEvent(session=node_one, command=Transactions(txs_broadcasted_by_peer1))
        )
        await asyncio.wait_for(got_txns.wait(), timeout=0.1)
        # Check that Peer2 doesn't receive them again
        assert len(outgoing_tx) == 0

        # Peer2 sends exact same txs back
        await event_bus.broadcast(
            TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer1))
        )
        await asyncio.wait_for(got_txns.wait(), timeout=0.1)

        # Check that Peer1 won't get them as that is where they originally came from
        assert len(outgoing_tx) == 0

        txs_broadcasted_by_peer2 = [
            create_random_tx(chain_with_block_validation, funded_address_private_key),
            txs_broadcasted_by_peer1[0]
        ]

        # Peer2 sends old + new tx
        await event_bus.broadcast(
            TransactionsEvent(session=node_two, command=Transactions(txs_broadcasted_by_peer2))
        )
        await asyncio.wait_for(got_txns.wait(), timeout=0.1)
        # Not sure why this sleep is needed....
        await asyncio.sleep(0.01)

        # Check that Peer1 receives only the one tx that it didn't know about
        assert outgoing_tx == [
            (node_one, (txs_broadcasted_by_peer2[0],)),
        ]
Ejemplo n.º 10
0
async def test_block_gapfill_syncer(request, event_loop, event_bus,
                                    chaindb_with_block_gaps, chaindb_1000):
    client_context = ChainContextFactory(
        headerdb__db=chaindb_with_block_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        syncer = BodyChainGapSyncer(
            LatestTestChain(chaindb_with_block_gaps.db),
            chaindb_with_block_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus),
        )
        # In production, this would be the block time but we want our test to pause/resume swiftly
        syncer._idle_time = 0.01
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(syncer):
                chain_with_gaps = LatestTestChain(chaindb_with_block_gaps.db)
                fat_chain = LatestTestChain(chaindb_1000.db)

                # Ensure we can pause/resume immediately and not just after syncing has started
                syncer.pause()
                syncer.resume()

                # Sync the first 100 blocks, then check that pausing/resume works
                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(100))

                # Pause the syncer and take note how far we have synced at this point
                syncer.pause()
                # We need to give async code a moment to settle before we save the progress to
                # ensure it has stabilized before we save it.
                await asyncio.sleep(0.25)
                paused_chain_gaps = chain_with_gaps.chaindb.get_chain_gaps()

                # Consider it victory if after 0.5s no new blocks were written to the database
                await asyncio.sleep(0.5)
                assert paused_chain_gaps == chain_with_gaps.chaindb.get_chain_gaps(
                )

                # Resume syncing
                syncer.resume()

                await wait_for_block(
                    chain_with_gaps,
                    fat_chain.get_canonical_block_by_number(1000),
                    sync_timeout=20)

                for block_num in range(1, 1001):
                    assert chain_with_gaps.get_canonical_block_by_number(
                        block_num) == fat_chain.get_canonical_block_by_number(
                            block_num)

                # We need to give the async calls a moment to settle before we can read the updated
                # chain gaps.
                await asyncio.sleep(0.25)
                assert chain_with_gaps.chaindb.get_chain_gaps() == ((), 1001)
Ejemplo n.º 11
0
async def test_sequential_header_gapfill_syncer(request, event_loop, event_bus,
                                                chaindb_with_gaps,
                                                chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_with_gaps.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        chain_with_gaps = LatestTestChain(chaindb_with_gaps.db)
        client = SequentialHeaderChainGapSyncer(
            chain_with_gaps, chaindb_with_gaps,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus))
        # Ensure we use small chunks to be able to test pause/resume properly
        client._max_backfill_header_at_once = 100
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            async with background_asyncio_service(client):
                # We intentionally only sync up to a block *below* the first gap to have a more
                # difficult scenario for pause/resume. We want to make sure we not only can pause
                # at the times where we synced up to an actual gap. Instead we want to be sure
                # we can pause after we synced up to the `_max_backfill_header_at_once` limit which
                # may be shorter than the actual gap in the chain.
                await wait_for_head(
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(100))

                # Pause the syncer for a moment and check if it continued syncing (it should not!)
                client.pause()
                # Verify that we stopped the chain fast enough, before the gap was fully filled
                # This is a potential source of test flakiness
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                await asyncio.sleep(1)
                # Make sure that the gap filling doesn't complete for a while. We could
                # theoretically get false positives if it's not paused but very slow to fill headers
                with pytest.raises(HeaderNotFound):
                    chaindb_with_gaps.get_canonical_block_header_by_number(249)
                # Now resume syncing
                client.resume()

                await wait_for_head(
                    # We check for 499 because 500 is there from the very beginning (the checkpoint)
                    chaindb_with_gaps,
                    chaindb_1000.get_canonical_block_header_by_number(499))
                # This test is supposed to only fill in headers, so the following should fail.
                # If this ever succeeds it probably means the fixture was re-created with trivial
                # blocks and the test will fail and remind us what kind of fixture we want here.
                with pytest.raises(BlockNotFound):
                    chain_with_gaps.get_canonical_block_by_number(499)
Ejemplo n.º 12
0
async def test_header_syncer(request, event_loop, event_bus, chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_to_server, server_to_client):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db), chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_to_server],
                                           event_bus=event_bus))
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_to_client],
                                                          event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(
                    event_bus,
                    TO_NETWORKING_BROADCAST_CONFIG,
                    AsyncChainDB(chaindb_1000.db),
                )):

            server_to_client.logger.info("%s is serving 1000 blocks",
                                         server_to_client)
            client_to_server.logger.info("%s is syncing up 1000",
                                         client_to_server)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1)
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                complete_chain_tip = chaindb_1000.get_canonical_head()

                # Not entirely certain that sending new block hashes is necessary, but...
                #   it shouldn't hurt anything. Trying to fix this flaky test:
                # https://app.circleci.com/pipelines/github/ethereum/trinity/6855/workflows/131f9b03-8c99-4419-8e88-d2ef216e3dbb/jobs/259263/steps  # noqa: E501

                server_to_client.eth_api.send_new_block_hashes(
                    NewBlockHash(complete_chain_tip.hash,
                                 complete_chain_tip.block_number), )

                await wait_for_head(chaindb_fresh, complete_chain_tip)
Ejemplo n.º 13
0
async def test_header_syncer(request,
                             event_loop,
                             event_bus,
                             chaindb_fresh,
                             chaindb_1000):
    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_1000.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer, server_peer):

        client = HeaderChainSyncer(
            LatestTestChain(chaindb_fresh.db),
            chaindb_fresh,
            MockPeerPoolWithConnectedPeers([client_peer], event_bus=event_bus)
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
            event_bus, server_peer_pool, handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(ETHRequestServer(
            event_bus, TO_NETWORKING_BROADCAST_CONFIG, AsyncChainDB(chaindb_1000.db),
        )):

            server_peer.logger.info("%s is serving 1000 blocks", server_peer)
            client_peer.logger.info("%s is syncing up 1000", client_peer)

            # Artificially split header sync into two parts, to verify that
            #   cycling to the next sync works properly. Split by erasing the canonical
            #   lookups in a middle chunk. We have to erase a range of them because of
            #   how the skeleton syncer asks for every ~192 headers. The skeleton request
            #   would skip right over a single missing header.
            erase_block_numbers = range(500, 700)
            erased_canonicals = []
            for blocknum in erase_block_numbers:
                dbkey = SchemaV1.make_block_number_to_hash_lookup_key(blocknum)
                canonical_hash = chaindb_1000.db[dbkey]
                erased_canonicals.append((dbkey, canonical_hash))
                del chaindb_1000.db[dbkey]

            async with background_asyncio_service(client):
                target_head = chaindb_1000.get_canonical_block_header_by_number(
                    erase_block_numbers[0] - 1
                )
                await wait_for_head(chaindb_fresh, target_head)

                # gut check that we didn't skip past the erased range of blocks
                head = chaindb_fresh.get_canonical_head()
                assert head.block_number < erase_block_numbers[0]

                # TODO validate that the skeleton syncer has cycled??

                # Replace the missing headers so that syncing can resume
                for dbkey, canonical_hash in erased_canonicals:
                    chaindb_1000.db[dbkey] = canonical_hash

                # Do we have to do anything here to have the server notify the client
                #   that it's capable of serving more headers now? ... Apparently not.

                await wait_for_head(chaindb_fresh, chaindb_1000.get_canonical_head())
Ejemplo n.º 14
0
    async def _do_run(self) -> None:
        with child_process_logging(self._boot_info):
            endpoint_name = self.get_endpoint_name()
            event_bus_service = AsyncioEventBusService(
                self._boot_info.trinity_config,
                endpoint_name,
            )
            async with background_asyncio_service(
                    event_bus_service) as eventbus_manager:
                event_bus = await event_bus_service.get_event_bus()
                loop_monitoring_task = create_task(
                    self._loop_monitoring_task(event_bus),
                    f'AsyncioIsolatedComponent/{self.name}/loop_monitoring_task'
                )

                do_run_task = create_task(
                    self.do_run(event_bus),
                    f'AsyncioIsolatedComponent/{self.name}/do_run')
                eventbus_task = create_task(
                    eventbus_manager.wait_finished(),
                    f'AsyncioIsolatedComponent/{self.name}/eventbus/wait_finished'
                )
                try:
                    max_wait_after_cancellation = 2
                    tasks = [do_run_task, eventbus_task, loop_monitoring_task]
                    if self._boot_info.profile:
                        with profiler(f'profile_{self.get_endpoint_name()}'):
                            try:
                                await wait_first(
                                    tasks,
                                    max_wait_after_cancellation,
                                )
                            except asyncio.TimeoutError:
                                self.logger.warning(
                                    "Timed out waiting for tasks to "
                                    "terminate after cancellation: %s", tasks)

                    else:
                        # XXX: When open_in_process() injects a KeyboardInterrupt into us (via
                        # coro.throw()), we hang forever here, until open_in_process() times
                        # out and sends us a SIGTERM, at which point we exit without executing
                        # either the except or the finally blocks below.
                        # See https://github.com/ethereum/trinity/issues/1711 for more.
                        try:
                            await wait_first(
                                tasks,
                                max_wait_after_cancellation,
                            )
                        except asyncio.TimeoutError:
                            self.logger.warning(
                                "Timed out waiting for tasks to terminate after cancellation: %s",
                                tasks)

                except KeyboardInterrupt:
                    self.logger.debug("%s: KeyboardInterrupt", self)
                    # Currently we never reach this code path, but when we fix the issue above
                    # it will be needed.
                    return
                finally:
                    # Once we start seeing this in the logs after a Ctrl-C, we'll likely have
                    # figured out the issue above.
                    self.logger.debug("%s: do_run() finished", self)
Ejemplo n.º 15
0
    async def run(self) -> None:

        try:
            await self._launch_strategy.fulfill_prerequisites()
        except asyncio.TimeoutError as exc:
            self.logger.exception(
                "Timed out while trying to fulfill prerequisites of "
                f"sync launch strategy: {exc} from {self._launch_strategy}")
            self.manager.cancel()
            return

        self.manager.run_daemon_child_service(self._block_importer)
        self.manager.run_daemon_child_service(self._header_syncer)

        # Kick off the body syncer early (it hangs on the launchpoint header syncer anyway)
        # It needs to start early because we want to "re-run" the header at the tip,
        # which it gets grumpy about. (it doesn't want to receive the canonical header tip
        # as a header to process)
        self.manager.run_daemon_child_service(self._body_syncer)

        # Launch the state syncer endpoint early
        self.manager.run_daemon_child_service(self._data_hunter)

        # Only persist headers at start
        async with background_asyncio_service(
            self._header_persister) as manager:
            await manager.wait_finished()
        # When header store exits, we have caught up

        # We want to trigger beam sync on the last block received,
        # not wait for the next one to be broadcast
        final_headers = self._header_persister.get_final_headers()

        # First, download block bodies for previous 6 blocks, for validation
        await self._download_blocks(final_headers[0])

        # Now, tell the MissingDataEventHandler about the minimum acceptable block number for
        # data requests. This helps during pivots to quickly reject requests from old block imports
        self._data_hunter.minimum_beam_block_number = min(
            header.block_number for header in final_headers)

        # Now let the beam sync importer kick in
        self._launchpoint_header_syncer.set_launchpoint_headers(final_headers)

        # We wait until beam sync has launched before starting backfill, because
        #   they both request block bodies, but beam sync needs them urgently.
        if self._enable_backfill:
            # There's no chance to introduce new gaps after this point. Therefore we can run this
            # until it has filled all gaps and let it finish.
            self.manager.run_child_service(self._header_backfill)

            # In contrast, block gap fill needs to run indefinitely because of beam sync pivoting.
            self.manager.run_daemon_child_service(self._block_backfill)

            # Now we can check the lag (presumably ~0) and start backfill
            self.manager.run_daemon_task(self._monitor_historical_backfill)

        # Will start the state background service or the basic queen queue
        self.manager.run_child_service(self._queen_queue)

        # TODO wait until first header with a body comes in?...
        # Start state downloader service
        self.manager.run_daemon_child_service(self._state_downloader)

        # run sync until cancelled
        await self.manager.wait_finished()
Ejemplo n.º 16
0
 async def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Task[Any]]:
     service = PingAndDisconnectIfIdle(connection, self.idle_timeout)
     async with background_asyncio_service(service) as manager:
         task_name = f'PingAndDisconnectIfIdleService/{connection.remote}'
         yield create_task(manager.wait_finished(), name=task_name)
Ejemplo n.º 17
0
async def _beam_syncing(
    request,
    event_loop,
    event_bus,
    chaindb_fresh,
    chaindb_churner,
    beam_to_block,
    checkpoint=None,
    VM_at_0=PetersburgVM,
    enable_state_backfill=False,
):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    backfiller = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer,
                             server_peer), backfiller as (client2_peer,
                                                          backfill_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers(
            [client_peer, backfill_peer],
            event_bus=event_bus,
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        backfill_peer_pool = MockPeerPoolWithConnectedPeers(
            [client2_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), run_peer_pool_event_server(
                event_bus, backfill_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_churner.db))
        ), AsyncioEndpoint.serve(
                pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                    gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, VM_at_0), ),
                chain_id=999,
                consensus_context_class=ConsensusContext,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                metrics_registry=NoopMetricsRegistry(),
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                NoopMetricsRegistry(),
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
                enable_state_backfill=enable_state_backfill,
                enable_backfill=False,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            backfill_peer.logger.info("%s is serving backfill state",
                                      backfill_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
            )
            async with background_asyncio_service(import_server):
                await pausing_endpoint.connect_to_endpoints(gatherer_config)
                async with background_asyncio_service(client):
                    yield client
Ejemplo n.º 18
0
 async def apply(self, connection: ConnectionAPI) -> AsyncIterator[asyncio.Future[None]]:
     service = PingAndDisconnectIfIdle(connection, self.idle_timeout)
     async with background_asyncio_service(service) as manager:
         yield asyncio.create_task(manager.wait_finished())