async def do_run(cls, boot_info: BootInfo, event_bus: EndpointAPI) -> None: config = boot_info.trinity_config db = DBClient.connect(config.database_ipc_path) if boot_info.args.disable_discovery: discovery_service: async_service.Service = StaticDiscoveryService( event_bus, config.preferred_nodes, ) else: vm_config = config.get_app_config(Eth1AppConfig).get_chain_config().vm_configuration headerdb = TrioHeaderDB(db) eth_cap_provider = functools.partial(generate_eth_cap_enr_field, vm_config, headerdb) socket = trio.socket.socket(family=trio.socket.AF_INET, type=trio.socket.SOCK_DGRAM) await socket.bind(("0.0.0.0", config.port)) base_db = LevelDB(config.node_db_dir) node_db = NodeDB(default_identity_scheme_registry, base_db) discovery_service = PreferredNodeDiscoveryService( config.nodekey, config.port, config.port, config.bootstrap_nodes, config.preferred_nodes, event_bus, socket, node_db, (eth_cap_provider,), ) try: with db: await async_service.run_trio_service(discovery_service) except Exception: event_bus.broadcast_nowait(ShutdownRequest("Discovery ended unexpectedly")) raise
async def launch_sync(cls, node: Node[BasePeer], strategy: BaseSyncStrategy, boot_info: BootInfo, event_bus: EndpointAPI) -> None: await node.get_manager().wait_started() await strategy.sync(boot_info.args, cls.logger, node.get_chain(), node.base_db, node.get_peer_pool(), event_bus, node.master_cancel_token) if strategy.shutdown_node_on_halt: cls.logger.error("Sync ended unexpectedly. Shutting down trinity") event_bus.broadcast_nowait( ShutdownRequest("Sync ended unexpectedly"))
def on_ready(self, manager_eventbus: EndpointAPI) -> None: light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT is_enabled = self.boot_info.args.tx_pool and not light_mode unsupported = self.boot_info.args.tx_pool and light_mode if is_enabled and not unsupported: self.start() elif unsupported: unsupported_msg = "Transaction pool not available in light mode" self.logger.error(unsupported_msg) manager_eventbus.broadcast_nowait( ShutdownRequest(unsupported_msg, ))
def _broadcast_import_complete( event_bus: EndpointAPI, block: BlockAPI, broadcast_config: BroadcastConfig, future: 'asyncio.Future[BlockImportResult]') -> None: completed = not future.cancelled() event_bus.broadcast_nowait( StatelessBlockImportDone( block, completed, future.result() if completed else None, future.exception() if completed else None, ), broadcast_config, )
def on_ready(self, manager_eventbus: EndpointAPI) -> None: if self.boot_info.args.disable_networkdb_component: self.logger.warning("Network Database disabled via CLI flag") # Allow this component to be disabled for extreme cases such as the # user swapping in an equivalent experimental version. return else: try: get_tracking_database(get_networkdb_path(self.boot_info.trinity_config)) except BadDatabaseError as err: manager_eventbus.broadcast_nowait(ShutdownRequest( "Error loading network database. Trying removing database " f"with `remove-network-db` command:\n{err}" )) else: self.start()
async def run_process(self, event_bus: EndpointAPI) -> None: try: if self._boot_info.profile: with profiler(f'profile_{self.get_endpoint_name()}'): await self.do_run(event_bus) else: await self.do_run(event_bus) except (trio.Cancelled, trio.MultiError): # These are expected, when trinity is terminating because of a Ctrl-C raise except BaseException: # Leaving trinity running after a component crashes can lead to unexpected # behavior that'd be hard to debug/reproduce, so for now we shut it down if # any component crashes unexpectedly. event_bus.broadcast_nowait( ShutdownRequest(f"Unexpected error in {self}")) # Because of an issue in the ComponentManager (see comment in # _cleanup_component_task), when a component crashes and requests trinity to # shutdown, there's still a chance its exception could be lost, so we log it # here as well. self.logger.exception( "Unexpected error in component %s, shutting down trinity", self) raise