示例#1
0
 async def _pivot_loop(self) -> None:
     if self.sync_metrics_registry:
         metrics_registry = self.sync_metrics_registry.metrics_service.registry
     else:
         metrics_registry = NoopMetricsRegistry()
     while self.manager.is_running:
         beam_syncer = BeamSyncer(
             self.chain,
             self.base_db,
             self.chaindb,
             self.peer_pool,
             self.event_bus,
             metrics_registry,
             self.checkpoint,
             self.force_beam_block_number,
             self.enable_header_backfill,
         )
         self.manager.run_child_service(beam_syncer)
         do_pivot = await self._monitor_for_pivot(beam_syncer)
         if do_pivot:
             self.logger.info("Pivoting Beam Sync to a newer header...")
             if self.sync_metrics_registry:
                 latest_block = beam_syncer._body_syncer._latest_block_number
                 await self.sync_metrics_registry.record_pivot(latest_block)
         else:
             self.logger.info(
                 "No pivot requested. Leaving Beam Syncer closed...")
             break
示例#2
0
    async def do_run(self, event_bus: EndpointAPI) -> None:
        trinity_config = self._boot_info.trinity_config
        app_config = trinity_config.get_app_config(Eth1AppConfig)
        chain_config = app_config.get_chain_config()

        base_db = DBClient.connect(trinity_config.database_ipc_path)

        with base_db:
            loop = asyncio.get_event_loop()

            beam_chain = make_pausing_beam_chain(
                chain_config.vm_configuration,
                chain_config.chain_id,
                chain_config.consensus_context_class,
                base_db,
                event_bus,
                # We only want to collect metrics about blocks being imported, so here we use the
                # NoopMetricsRegistry.
                NoopMetricsRegistry(),
                # these preview executions are lower priority than the primary block import
                loop=loop,
                urgent=False,
            )

            preview_server = BlockPreviewServer(event_bus, beam_chain,
                                                self.shard_num)

            async with background_asyncio_service(preview_server) as manager:
                await manager.wait_finished()
示例#3
0
    def __init__(self,
                 influx_server: str = '',
                 influx_user: str = '',
                 influx_password: str = '',
                 influx_database: str = '',
                 host: str = '',
                 reporting_frequency: int = 10):

        self._registry = NoopMetricsRegistry()
示例#4
0
async def _beam_syncing(
    request,
    event_loop,
    event_bus,
    chaindb_fresh,
    chaindb_churner,
    beam_to_block,
    checkpoint=None,
    VM_at_0=PetersburgVM,
    enable_state_backfill=False,
):

    client_context = ChainContextFactory(headerdb__db=chaindb_fresh.db)
    server_context = ChainContextFactory(headerdb__db=chaindb_churner.db)
    peer_pair = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    backfiller = LatestETHPeerPairFactory(
        alice_peer_context=client_context,
        bob_peer_context=server_context,
        event_bus=event_bus,
    )
    async with peer_pair as (client_peer,
                             server_peer), backfiller as (client2_peer,
                                                          backfill_peer):

        # Need a name that will be unique per xdist-process, otherwise
        #   lahja IPC endpoints in each process will clobber each other
        unique_process_name = uuid.uuid4()

        # manually add endpoint for beam vm to make requests
        pausing_config = ConnectionConfig.from_name(
            f"PausingEndpoint-{unique_process_name}")

        # manually add endpoint for trie data gatherer to serve requests
        gatherer_config = ConnectionConfig.from_name(
            f"GathererEndpoint-{unique_process_name}")

        client_peer_pool = MockPeerPoolWithConnectedPeers(
            [client_peer, backfill_peer],
            event_bus=event_bus,
        )
        server_peer_pool = MockPeerPoolWithConnectedPeers([server_peer],
                                                          event_bus=event_bus)
        backfill_peer_pool = MockPeerPoolWithConnectedPeers(
            [client2_peer], event_bus=event_bus)

        async with run_peer_pool_event_server(
                event_bus, server_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), run_peer_pool_event_server(
                event_bus, backfill_peer_pool,
                handler_type=ETHPeerPoolEventServer
        ), background_asyncio_service(
                ETHRequestServer(event_bus, TO_NETWORKING_BROADCAST_CONFIG,
                                 AsyncChainDB(chaindb_churner.db))
        ), AsyncioEndpoint.serve(
                pausing_config) as pausing_endpoint, AsyncioEndpoint.serve(
                    gatherer_config) as gatherer_endpoint:

            client_chain = make_pausing_beam_chain(
                ((0, VM_at_0), ),
                chain_id=999,
                consensus_context_class=ConsensusContext,
                db=chaindb_fresh.db,
                event_bus=pausing_endpoint,
                metrics_registry=NoopMetricsRegistry(),
                loop=event_loop,
            )

            client = BeamSyncer(
                client_chain,
                chaindb_fresh.db,
                AsyncChainDB(chaindb_fresh.db),
                client_peer_pool,
                gatherer_endpoint,
                NoopMetricsRegistry(),
                force_beam_block_number=beam_to_block,
                checkpoint=checkpoint,
                enable_state_backfill=enable_state_backfill,
                enable_backfill=False,
            )

            client_peer.logger.info("%s is serving churner blocks",
                                    client_peer)
            backfill_peer.logger.info("%s is serving backfill state",
                                      backfill_peer)
            server_peer.logger.info("%s is syncing up churner blocks",
                                    server_peer)

            import_server = BlockImportServer(
                pausing_endpoint,
                client_chain,
            )
            async with background_asyncio_service(import_server):
                await pausing_endpoint.connect_to_endpoints(gatherer_config)
                async with background_asyncio_service(client):
                    yield client
示例#5
0
                 influx_server: str = '',
                 influx_user: str = '',
                 influx_password: str = '',
                 influx_database: str = '',
                 host: str = '',
                 reporting_frequency: int = 10):

        self._registry = NoopMetricsRegistry()

    @property
    def registry(self) -> NoopMetricsRegistry:
        """
        Return the :class:`trinity.components.builtin.metrics.registry.NoopMetricsRegistry` at which
        metrics instruments can be registered and retrieved.
        """
        return self._registry

    async def run(self) -> None:
        self.logger.info("Running NoopMetricsService")
        await self.manager.wait_finished()

    async def continuously_report(self) -> None:
        pass

    async def send_annotation(self, annotation_data: str) -> None:
        pass


NOOP_METRICS_SERVICE = NoopMetricsService()
NOOP_METRICS_REGISTRY = NoopMetricsRegistry()
示例#6
0
 def metrics_registry(self) -> MetricsRegistry:
     if self.sync_metrics_registry:
         return self.sync_metrics_registry.metrics_service.registry
     else:
         return NoopMetricsRegistry()