def on_ready(self, manager_eventbus: EndpointAPI) -> None: args = self.boot_info.args if not args.ethstats: return if not (args.ethstats_server_url or self.get_default_server_url()): self.logger.error( 'You must provide ethstats server url using the `--ethstats-server-url`' ) manager_eventbus.broadcast_nowait( ShutdownRequest("Missing EthStats Server URL")) return if not args.ethstats_server_secret: self.logger.error( 'You must provide ethstats server secret using `--ethstats-server-secret`' ) manager_eventbus.broadcast_nowait( ShutdownRequest("Missing EthStats Server Secret")) return if (args.ethstats_server_url): self.server_url = args.ethstats_server_url else: self.server_url = self.get_default_server_url() self.server_secret = args.ethstats_server_secret self.node_id = args.ethstats_node_id self.node_contact = args.ethstats_node_contact self.stats_interval = args.ethstats_interval self.start()
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: trinity_config = boot_info.trinity_config beacon_app_config = trinity_config.get_app_config(BeaconAppConfig) chain_config = beacon_app_config.get_chain_config() base_db = DBClient.connect(trinity_config.database_ipc_path) # TODO: For now we use fake eth1 monitor. # if boot_info.args.eth1client_rpc: # w3: Web3 = Web3.HTTPProvider(boot_info.args.eth1client_rpc) # else: # w3: Web3 = None # TODO: For now we use fake eth1 monitor. So we load validators data from # interop setting and hardcode the deposit data into fake eth1 data provider. chain = chain_config.beacon_chain_class(base_db, chain_config.genesis_config) config = chain.get_state_machine().config key_set = load_yaml_at( Path( "eth2/beacon/scripts/quickstart_state/keygen_16_validators.yaml" )) pubkeys, privkeys, withdrawal_credentials = create_keypair_and_mock_withdraw_credentials( config, key_set # type: ignore ) initial_deposits = (create_mock_deposit_data( config=config, pubkey=pubkey, privkey=privkey, withdrawal_credentials=withdrawal_credential, ) for pubkey, privkey, withdrawal_credential in zip( pubkeys, privkeys, withdrawal_credentials)) # Set the timestamp of start block earlier enough so that eth1 monitor # can query up to 2 * `ETH1_FOLLOW_DISTANCE` of blocks in the beginning. start_block_timestamp = (chain_config.genesis_data.genesis_time - 3 * ETH1_FOLLOW_DISTANCE * AVERAGE_BLOCK_TIME) with base_db: fake_eth1_data_provider = FakeEth1DataProvider( start_block_number=START_BLOCK_NUMBER, start_block_timestamp=Timestamp(start_block_timestamp), num_deposits_per_block=NUM_DEPOSITS_PER_BLOCK, initial_deposits=tuple(initial_deposits), ) eth1_monitor_service: Service = Eth1Monitor( eth1_data_provider=fake_eth1_data_provider, num_blocks_confirmed=NUM_BLOCKS_CONFIRMED, polling_period=POLLING_PERIOD, start_block_number=BlockNumber(START_BLOCK_NUMBER - 1), event_bus=event_bus, base_db=base_db, ) try: await TrioManager.run_service(eth1_monitor_service) except Exception: await event_bus.broadcast( ShutdownRequest("Eth1 Monitor ended unexpectedly")) raise
def request_shutdown(self, reason: str) -> None: """ Perfom a graceful shutdown of Trinity. Can be called from any process. """ self.broadcast_nowait( ShutdownRequest(reason), BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: config = boot_info.trinity_config db = DBClient.connect(config.database_ipc_path) if boot_info.args.disable_discovery: discovery_service: async_service.Service = StaticDiscoveryService( event_bus, config.preferred_nodes, ) else: vm_config = config.get_app_config(Eth1AppConfig).get_chain_config().vm_configuration headerdb = TrioHeaderDB(db) eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(("0.0.0.0", config.port)) base_db = LevelDB(config.node_db_dir) node_db = NodeDB(default_identity_scheme_registry, base_db) discovery_service = PreferredNodeDiscoveryService( config.nodekey, config.port, config.port, config.bootstrap_nodes, config.preferred_nodes, event_bus, socket, node_db, (eth_cap_provider,), ) try: with db: await async_service.run_trio_service(discovery_service) except Exception: event_bus.broadcast_nowait(ShutdownRequest("Discovery ended unexpectedly")) raise
async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: config = boot_info.trinity_config external_ip = "0.0.0.0" address = Address(external_ip, config.port, config.port) if boot_info.args.disable_discovery: discovery_service: Service = StaticDiscoveryService( event_bus, config.preferred_nodes, ) else: external_ip = "0.0.0.0" socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind((external_ip, config.port)) discovery_service = PreferredNodeDiscoveryService( boot_info.trinity_config.nodekey, address, config.bootstrap_nodes, config.preferred_nodes, event_bus, socket, ) try: await TrioManager.run_service(discovery_service) except Exception: await event_bus.broadcast(ShutdownRequest("Discovery ended unexpectedly")) raise
async def _run(self) -> None: external_ip = "0.0.0.0" address = Address(external_ip, self.trinity_config.port, self.trinity_config.port) discovery_protocol = PreferredNodeDiscoveryProtocol( self.trinity_config.nodekey, address, self.trinity_config.bootstrap_nodes, self.trinity_config.preferred_nodes, self.cancel_token, ) if self.is_discovery_disabled: discovery_service: BaseService = StaticDiscoveryService( self.event_bus, self.trinity_config.preferred_nodes, self.cancel_token, ) else: discovery_service = DiscoveryService( discovery_protocol, self.trinity_config.port, self.event_bus, self.cancel_token, ) try: await discovery_service.run() except Exception: await self.event_bus.broadcast( ShutdownRequest("Discovery ended unexpectedly"))
def shutdown_host(self, reason: str) -> None: """ Shutdown ``Trinity`` by broadcasting a :class:`~trinity.events.ShutdownRequest` on the :class:`~lahja.eventbus.EventBus`. The actual shutdown routine is executed and coordinated by the main application process who listens for this event. """ self.event_bus.broadcast( ShutdownRequest(reason), BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT))
async def run(self) -> None: with self._base_db: self.manager.run_daemon_task(self.handle_network_id_requests) self.manager.run_daemon_child_service(self.get_p2p_server()) self.manager.run_daemon_child_service(self.get_event_server()) self.manager.run_daemon_child_service(self.metrics_service) try: await self.manager.wait_finished() finally: self.event_bus.broadcast_nowait( ShutdownRequest("Node exiting. Triggering shutdown"))
async def launch_sync(self, node: Node[BasePeer]) -> None: await node.events.started.wait() await self.active_strategy.sync(self.boot_info.args, self.logger, node.get_chain(), node.base_db, node.get_peer_pool(), self.event_bus, node.cancel_token) if self.active_strategy.shutdown_node_on_halt: self.logger.error("Sync ended unexpectedly. Shutting down trinity") await self.event_bus.broadcast( ShutdownRequest("Sync ended unexpectedly"))
async def launch_sync(cls, node: Node[BasePeer], strategy: BaseSyncStrategy, boot_info: BootInfo, event_bus: EndpointAPI) -> None: await node.get_manager().wait_started() await strategy.sync(boot_info.args, cls.logger, node.get_chain(), node.base_db, node.get_peer_pool(), event_bus, node.master_cancel_token) if strategy.shutdown_node_on_halt: cls.logger.error("Sync ended unexpectedly. Shutting down trinity") event_bus.broadcast_nowait( ShutdownRequest("Sync ended unexpectedly"))
def on_ready(self, manager_eventbus: EndpointAPI) -> None: light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT is_enabled = self.boot_info.args.tx_pool and not light_mode unsupported = self.boot_info.args.tx_pool and light_mode if is_enabled and not unsupported: self.start() elif unsupported: unsupported_msg = "Transaction pool not available in light mode" self.logger.error(unsupported_msg) manager_eventbus.broadcast_nowait( ShutdownRequest(unsupported_msg, ))
def on_ready(self, manager_eventbus: EndpointAPI) -> None: if self.boot_info.args.disable_networkdb_component: self.logger.warning("Network Database disabled via CLI flag") # Allow this component to be disabled for extreme cases such as the # user swapping in an equivalent experimental version. return else: try: get_tracking_database(get_networkdb_path(self.boot_info.trinity_config)) except BadDatabaseError as err: manager_eventbus.broadcast_nowait(ShutdownRequest( "Error loading network database. Trying removing database " f"with `remove-network-db` command:\n{err}" )) else: self.start()
async def _do_run(self) -> None: with child_process_logging(self._boot_info): endpoint_name = self.get_endpoint_name() event_bus_service = AsyncioEventBusService( self._boot_info.trinity_config, endpoint_name, ) async with background_asyncio_service(event_bus_service): event_bus = await event_bus_service.get_event_bus() try: if self._boot_info.profile: with profiler(f'profile_{self.get_endpoint_name()}'): await self.do_run(event_bus) else: # XXX: When open_in_process() injects a KeyboardInterrupt into us (via # coro.throw()), we hang forever here, until open_in_process() times out # and sends us a SIGTERM, at which point we exit without executing either # the except or the finally blocks below. # See https://github.com/ethereum/trinity/issues/1711 for more. await self.do_run(event_bus) except KeyboardInterrupt: # Currently we never reach this code path, but when we fix the issue above it # will be needed. return except BaseException: # Leaving trinity running after a component crashes can lead to unexpected # behavior that'd be hard to debug/reproduce, so for now we shut it down if # any component crashes unexpectedly. event_bus.broadcast_nowait(ShutdownRequest(f"Unexpected error in {self}")) # Because of an issue in the ComponentManager (see comment in # _cleanup_component_task), when a component crashes and requests trinity to # shutdown, there's still a chance its exception could be lost, so we log it # here as well. self.logger.exception( "Unexpected error in component %s, shutting down trinity", self) raise finally: # Once we start seeing this in the logs after a Ctrl-C, we'll likely have # figured out the issue above. self.logger.debug("%s: do_run() finished", self)
async def run_process(self, event_bus: EndpointAPI) -> None: try: if self._boot_info.profile: with profiler(f'profile_{self.get_endpoint_name()}'): await self.do_run(event_bus) else: await self.do_run(event_bus) except (trio.Cancelled, trio.MultiError): # These are expected, when trinity is terminating because of a Ctrl-C raise except BaseException: # Leaving trinity running after a component crashes can lead to unexpected # behavior that'd be hard to debug/reproduce, so for now we shut it down if # any component crashes unexpectedly. event_bus.broadcast_nowait( ShutdownRequest(f"Unexpected error in {self}")) # Because of an issue in the ComponentManager (see comment in # _cleanup_component_task), when a component crashes and requests trinity to # shutdown, there's still a chance its exception could be lost, so we log it # here as well. self.logger.exception( "Unexpected error in component %s, shutting down trinity", self) raise
def shutdown_host(self, reason: str) -> None: self.event_bus.broadcast( ShutdownRequest(reason), BroadcastConfig(filter_endpoint=MAIN_EVENTBUS_ENDPOINT) )
async def _cleanup(self) -> None: await self.event_bus.broadcast( ShutdownRequest("Node finished unexpectedly"))