Exemplo n.º 1
0
    def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
        args = self.context.args

        if not args.ethstats:
            return

        if not (args.ethstats_server_url or self.get_default_server_url()):
            self.logger.error(
                'You must provide ethstats server url using the `--ethstats-server-url`'
            )
            manager_eventbus.request_shutdown("Missing EthStats Server URL")
            return

        if not args.ethstats_server_secret:
            self.logger.error(
                'You must provide ethstats server secret using `--ethstats-server-secret`'
            )
            manager_eventbus.request_shutdown("Missing EthStats Server Secret")
            return

        if (args.ethstats_server_url):
            self.server_url = args.ethstats_server_url
        else:
            self.server_url = self.get_default_server_url()

        self.server_secret = args.ethstats_server_secret

        self.node_id = args.ethstats_node_id
        self.node_contact = args.ethstats_node_contact
        self.stats_interval = args.ethstats_interval

        self.start()
Exemplo n.º 2
0
async def launch_node_coro(args: Namespace,
                           trinity_config: TrinityConfig) -> None:
    endpoint = TrinityEventBusEndpoint()
    NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
    node = NodeClass(endpoint, trinity_config)

    networking_connection_config = ConnectionConfig.from_name(
        NETWORKING_EVENTBUS_ENDPOINT, trinity_config.ipc_dir)

    await endpoint.start_serving(networking_connection_config)
    endpoint.auto_connect_new_announced_endpoints()
    await endpoint.connect_to_endpoints(
        ConnectionConfig.from_name(MAIN_EVENTBUS_ENDPOINT,
                                   trinity_config.ipc_dir),
        # Plugins that run within the networking process broadcast and receive on the
        # the same endpoint
        networking_connection_config,
    )
    await endpoint.announce_endpoint()

    # This is a second PluginManager instance governing plugins in a shared process.
    plugin_manager = PluginManager(SharedProcessScope(endpoint),
                                   get_all_plugins())
    plugin_manager.prepare(args, trinity_config)

    asyncio.ensure_future(
        handle_networking_exit(node, plugin_manager, endpoint))
    asyncio.ensure_future(node.run())
Exemplo n.º 3
0
async def clean_up_endpoint(endpoint: TrinityEventBusEndpoint) -> None:
    """
    Used when the event bus is the only thing to exit. This should probably
    be changed when lahja is more sync-friendly.
    """
    loop = asyncio.get_event_loop()
    async with exit_signal(loop):
        endpoint.stop()
Exemplo n.º 4
0
async def handle_networking_exit(service: BaseService,
                                 plugin_manager: PluginManager,
                                 endpoint: TrinityEventBusEndpoint) -> None:

    async with exit_signal_with_service(service):
        await plugin_manager.shutdown()
        endpoint.stop()
        # Retrieve and shutdown the global executor that was created at startup
        ensure_global_asyncio_executor().shutdown(wait=True)
Exemplo n.º 5
0
async def event_bus(event_loop):
    endpoint = TrinityEventBusEndpoint()
    # Tests run concurrently, therefore we need unique IPC paths
    ipc_path = Path(f"networking-{uuid.uuid4()}.ipc")
    networking_connection_config = ConnectionConfig(
        name=NETWORKING_EVENTBUS_ENDPOINT, path=ipc_path)
    await endpoint.start_serving(networking_connection_config, event_loop)
    await endpoint.connect_to_endpoints(networking_connection_config)
    try:
        yield endpoint
    finally:
        endpoint.stop()
Exemplo n.º 6
0
    def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:

        light_mode = self.context.args.sync_mode == SYNC_LIGHT
        self.is_enabled = self.context.args.tx_pool and not light_mode

        unsupported = self.context.args.tx_pool and light_mode

        if unsupported:
            unsupported_msg = "Transaction pool not available in light mode"
            self.logger.error(unsupported_msg)
            manager_eventbus.request_shutdown(unsupported_msg)

        self.event_bus.subscribe(ResourceAvailableEvent, self.handle_event)
Exemplo n.º 7
0
    def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:

        light_mode = self.boot_info.args.sync_mode == SYNC_LIGHT
        is_enabled = self.boot_info.args.tx_pool and not light_mode

        unsupported = self.boot_info.args.tx_pool and light_mode

        if is_enabled and not unsupported:
            self.start()
        elif unsupported:
            unsupported_msg = "Transaction pool not available in light mode"
            self.logger.error(unsupported_msg)
            manager_eventbus.request_shutdown(unsupported_msg)
Exemplo n.º 8
0
def _broadcast_import_complete(
        event_bus: TrinityEventBusEndpoint, block: BaseBlock,
        broadcast_config: BroadcastConfig,
        future: 'asyncio.Future[ImportBlockType]') -> None:
    completed = not future.cancelled()
    event_bus.broadcast_nowait(
        StatelessBlockImportDone(
            block,
            completed,
            future.result() if completed else None,
            future.exception() if completed else None,
        ),
        broadcast_config,
    )
Exemplo n.º 9
0
 def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
     if self.boot_info.args.disable_networkdb_plugin:
         self.logger.warning("Network Database disabled via CLI flag")
         # Allow this plugin to be disabled for extreme cases such as the
         # user swapping in an equivalent experimental version.
         return
     else:
         try:
             get_tracking_database(
                 get_networkdb_path(self.boot_info.trinity_config))
         except BadDatabaseError as err:
             manager_eventbus.request_shutdown(
                 "Error loading network database.  Trying removing database "
                 f"with `remove-network-db` command:\n{err}")
         else:
             self.start()
Exemplo n.º 10
0
async def monitoring(normalized_name: str,
                     trinity_config: TrinityConfig) -> None:
    event_bus = TrinityEventBusEndpoint("monitoring_ui")
    connection_config = ConnectionConfig.from_name(
        normalized_name,
        trinity_config.ipc_dir,
    )
    await event_bus.start()
    await event_bus.start_server(connection_config.path)
    await event_bus.connect_to_endpoints(
        ConnectionConfig.from_name(MAIN_EVENTBUS_ENDPOINT,
                                   trinity_config.ipc_dir))
    await event_bus.announce_endpoint()
    await event_bus.broadcast(PluginStartedEvent(type(MonitoringPlugin)))

    asyncio.ensure_future(event_bus.auto_connect_new_announced_endpoints())
    event_bus.subscribe(NewBlockHashesEvent,
                        lambda event: logging.info(event.msg))
def run_service(ready_to_kill_event):
    loop = asyncio.get_event_loop()

    endpoint = TrinityEventBusEndpoint()
    service = SimpleService(ready_to_kill_event, loop=loop)

    asyncio.ensure_future(exit_with_endpoint_and_services(endpoint, service))
    asyncio.ensure_future(service.run())

    loop.run_forever()
    loop.close()

    assert service.is_cancelled
    assert endpoint._running is False
Exemplo n.º 12
0
def launch_node(args: Namespace, trinity_config: TrinityConfig) -> None:
    with trinity_config.process_id_file('networking'):

        endpoint = TrinityEventBusEndpoint()
        NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
        node = NodeClass(endpoint, trinity_config)
        # The `networking` process creates a process pool executor to offload cpu intensive
        # tasks. We should revisit that when we move the sync in its own process
        ensure_global_asyncio_executor()
        loop = node.get_event_loop()

        networking_connection_config = ConnectionConfig.from_name(
            NETWORKING_EVENTBUS_ENDPOINT, trinity_config.ipc_dir)
        endpoint.start_serving_nowait(
            networking_connection_config,
            loop,
        )
        endpoint.auto_connect_new_announced_endpoints()
        endpoint.connect_to_endpoints_blocking(
            ConnectionConfig.from_name(MAIN_EVENTBUS_ENDPOINT,
                                       trinity_config.ipc_dir),
            # Plugins that run within the networking process broadcast and receive on the
            # the same endpoint
            networking_connection_config,
        )
        endpoint.announce_endpoint()
        # This is a second PluginManager instance governing plugins in a shared process.
        plugin_manager = setup_plugins(SharedProcessScope(endpoint),
                                       get_all_plugins())
        plugin_manager.prepare(args, trinity_config)

        asyncio.ensure_future(handle_networking_exit(node, plugin_manager,
                                                     endpoint),
                              loop=loop)
        asyncio.ensure_future(node.run(), loop=loop)
        loop.run_forever()
        loop.close()
Exemplo n.º 13
0
    def create_plugin_context(self, plugin: BasePlugin,
                              boot_info: TrinityBootInfo) -> None:
        """
        Create a :class:`~trinity.extensibility.plugin.PluginContext` that creates a new
        :class:`~lahja.endpoint.Endpoint` dedicated to the isolated plugin that runs in its own
        process. The :class:`~lahja.endpoint.Endpoint` enable application wide event-driven
        communication even across process boundaries.
        """

        if isinstance(plugin, BaseIsolatedPlugin):
            # Isolated plugins use their own Endpoint that lives in the new process. It is only
            # created here for API symmetry. Endpoints are pickleable *before* they are connected,
            # which means, this Endpoint will be pickled and transferred into the new process
            # together with the rest of the `PluginContext`.
            plugin.set_context(
                PluginContext(
                    TrinityEventBusEndpoint(),
                    boot_info,
                ))
Exemplo n.º 14
0
async def launch_node_coro(args: Namespace,
                           trinity_config: TrinityConfig) -> None:
    networking_connection_config = ConnectionConfig.from_name(
        NETWORKING_EVENTBUS_ENDPOINT, trinity_config.ipc_dir)
    async with TrinityEventBusEndpoint.serve(
            networking_connection_config) as endpoint:
        NodeClass = trinity_config.get_app_config(Eth1AppConfig).node_class
        node = NodeClass(endpoint, trinity_config)

        asyncio.ensure_future(endpoint.auto_connect_new_announced_endpoints())
        await endpoint.connect_to_endpoints(
            ConnectionConfig.from_name(MAIN_EVENTBUS_ENDPOINT,
                                       trinity_config.ipc_dir), )
        await endpoint.announce_endpoint()

        # This is a second PluginManager instance governing plugins in a shared process.
        plugin_manager = PluginManager(SharedProcessScope(endpoint),
                                       get_plugins_for_eth1_client())
        plugin_manager.prepare(args, trinity_config)

        asyncio.ensure_future(
            handle_networking_exit(node, plugin_manager, endpoint))
        await node.run()
Exemplo n.º 15
0
    async def serve(self, event_bus: TrinityEventBusEndpoint,
                    beam_chain: BaseAsyncChain) -> None:
        """
        Listen to DoStatelessBlockImport events, and import block when received.
        Reply with StatelessBlockImportDone when import is complete.
        """

        async for event in self.wait_iter(
                event_bus.stream(DoStatelessBlockImport)):
            # launch in new thread, so we don't block the event loop!
            import_completion = asyncio.get_event_loop().run_in_executor(
                # Maybe build the pausing chain inside the new process?
                None,
                partial(
                    beam_chain.import_block,
                    event.block,
                    perform_validation=True,
                ),
            )

            # Intentionally don't use .wait() below, because we want to hang the service from
            #   shutting down until block import is complete.
            # In the tests, for example, we await cancel() this service, so that we know
            #   that the in-progress block is complete. Then below, we do not send back
            #   the import completion (so the import server won't get triggered again).
            await import_completion

            if self.is_running:
                _broadcast_import_complete(  # type: ignore
                    event_bus,
                    event.block,
                    event.broadcast_config(),
                    import_completion,
                )
            else:
                break
Exemplo n.º 16
0
async def exit_with_endpoint_and_services(
        endpoint: TrinityEventBusEndpoint,
        *services_to_exit: BaseService) -> None:
    async with exit_signal_with_services(*services_to_exit):
        endpoint.stop()
Exemplo n.º 17
0
 def event_bus(self) -> TrinityEventBusEndpoint:
     if self._event_bus is None:
         self._event_bus = TrinityEventBusEndpoint(self.normalized_name)
     return self._event_bus
Exemplo n.º 18
0
 def event_bus(self) -> TrinityEventBusEndpoint:
     if self._event_bus is None:
         self._event_bus = TrinityEventBusEndpoint()
     return self._event_bus
Exemplo n.º 19
0
async def exit_with_service_and_endpoint(service_to_exit: BaseService,
                                         endpoint: TrinityEventBusEndpoint) -> None:
    async with exit_signal_with_service(service_to_exit):
        endpoint.stop()