Exemple #1
0
async def test_asyncio_isolated_component(boot_info, log_listener):
    # Test the lifecycle management for isolated process components to be sure
    # they start and stop as expected
    component_manager = ComponentManager(boot_info,
                                         (AsyncioComponentForTest, ),
                                         lambda reason: None)

    async with background_asyncio_service(component_manager):
        event_bus = await component_manager.get_event_bus()

        got_started = asyncio.Future()

        event_bus.subscribe(IsStarted,
                            lambda ev: got_started.set_result(ev.path))

        touch_path = await asyncio.wait_for(got_started, timeout=10)
        assert not touch_path.exists()
        component_manager.shutdown('exiting component manager')

    for _ in range(10000):
        if not touch_path.exists():
            await asyncio.sleep(0.001)
        else:
            break
    else:
        assert touch_path.exists()
Exemple #2
0
async def test_isolated_component(boot_info, log_listener, component, request,
                                  monkeypatch):
    # On overloaded CI machines it can sometimes take a while for a component's process to start,
    # so we need a high timeout here.
    component_timeout = 10
    monkeypatch.setenv('ASYNCIO_RUN_IN_PROCESS_STARTUP_TIMEOUT',
                       str(component_timeout))
    # Test the lifecycle management for isolated process components to be sure
    # they start and stop as expected
    component_manager = ComponentManager(boot_info, (component, ))

    async with background_asyncio_service(component_manager) as cm_manager:
        event_bus = await component_manager.get_event_bus()

        got_started = asyncio.Future()

        event_bus.subscribe(IsStarted,
                            lambda ev: got_started.set_result(ev.path))

        touch_path = await asyncio.wait_for(got_started,
                                            timeout=component_timeout)

        def delete_touch_path():
            if touch_path.exists():
                touch_path.unlink()

        request.addfinalizer(delete_touch_path)
        assert not touch_path.exists()
        component_manager.shutdown('exiting component manager')
        await cm_manager.wait_finished()

    assert touch_path.exists()
async def test_isolated_component(boot_info, log_listener, component, request):
    # Test the lifecycle management for isolated process components to be sure
    # they start and stop as expected
    component_manager = ComponentManager(boot_info, (component, ))

    async with background_asyncio_service(component_manager) as cm_manager:
        event_bus = await component_manager.get_event_bus()

        got_started = asyncio.Future()

        event_bus.subscribe(IsStarted,
                            lambda ev: got_started.set_result(ev.path))

        touch_path = await asyncio.wait_for(got_started, timeout=10)

        def delete_touch_path():
            if touch_path.exists():
                touch_path.unlink()

        request.addfinalizer(delete_touch_path)
        assert not touch_path.exists()
        component_manager.shutdown('exiting component manager')
        await cm_manager.wait_finished()

    assert touch_path.exists()
Exemple #4
0
class ComponentManagerService(BaseService):
    _endpoint: EndpointAPI

    def __init__(self,
                 trinity_boot_info: TrinityBootInfo,
                 components: Sequence[Type[BaseComponent]],
                 kill_trinity_fn: Callable[[str], Any],
                 cancel_token: CancelToken = None,
                 loop: asyncio.AbstractEventLoop = None) -> None:
        self._boot_info = trinity_boot_info
        self._components = components
        self._kill_trinity_fn = kill_trinity_fn
        super().__init__(cancel_token, loop)

    async def _run(self) -> None:
        self._connection_config = ConnectionConfig.from_name(
            MAIN_EVENTBUS_ENDPOINT, self._boot_info.trinity_config.ipc_dir)
        async with AsyncioEndpoint.serve(self._connection_config) as endpoint:
            self._endpoint = endpoint

            # start the background process that tracks and propagates available
            # endpoints to the other connected endpoints
            self.run_daemon_task(
                self._track_and_propagate_available_endpoints())
            self.run_daemon_task(self._handle_shutdown_request())

            # start the component manager
            self.component_manager = ComponentManager(endpoint,
                                                      self._components)
            self.component_manager.prepare(self._boot_info)
            await self.cancellation()

    async def _handle_shutdown_request(self) -> None:
        req = await self.wait(self._endpoint.wait_for(ShutdownRequest))
        self._kill_trinity_fn(req.reason)
        self.cancel_nowait()

    async def _cleanup(self) -> None:
        self.component_manager.shutdown_blocking()

    _available_endpoints: Tuple[ConnectionConfig, ...] = ()

    async def _track_and_propagate_available_endpoints(self) -> None:
        """
        Track new announced endpoints and propagate them across all other existing endpoints.
        """
        async for ev in self.wait_iter(
                self._endpoint.stream(EventBusConnected)):
            self._available_endpoints = self._available_endpoints + (
                ev.connection_config, )
            self.logger.debug("New EventBus Endpoint connected %s",
                              ev.connection_config.name)
            # Broadcast available endpoints to all connected endpoints, giving them
            # a chance to cross connect
            await self._endpoint.broadcast(
                AvailableEndpointsUpdated(self._available_endpoints))
            self.logger.debug("Connected EventBus Endpoints %s",
                              self._available_endpoints)
Exemple #5
0
    async def _run(self) -> None:
        self._connection_config = ConnectionConfig.from_name(
            MAIN_EVENTBUS_ENDPOINT, self._boot_info.trinity_config.ipc_dir)
        async with AsyncioEndpoint.serve(self._connection_config) as endpoint:
            self._endpoint = endpoint

            # start the background process that tracks and propagates available
            # endpoints to the other connected endpoints
            self.run_daemon_task(
                self._track_and_propagate_available_endpoints())
            self.run_daemon_task(self._handle_shutdown_request())

            # start the component manager
            self.component_manager = ComponentManager(endpoint,
                                                      self._components)
            self.component_manager.prepare(self._boot_info)
            await self.cancellation()
Exemple #6
0
def run(component_types: Tuple[Type[BaseComponentAPI], ...],
        boot_info: BootInfo, get_base_db_fn: Callable[[BootInfo],
                                                      LevelDB]) -> None:
    runtime_component_types = tuple(
        cast(Type[BaseIsolatedComponent], component_cls)
        for component_cls in component_types
        if issubclass(component_cls, ComponentAPI))

    trinity_config = boot_info.trinity_config

    component_manager_service = ComponentManager(
        boot_info,
        runtime_component_types,
    )
    component_manager_manager = AsyncioManager(component_manager_service)

    loop = asyncio.get_event_loop()
    loop.add_signal_handler(
        signal.SIGTERM,
        component_manager_manager.cancel,
        'SIGTERM',
    )
    loop.add_signal_handler(
        signal.SIGINT,
        component_manager_service.shutdown,
        'CTRL+C',
    )

    logger = logging.getLogger()
    try:
        loop.run_until_complete(
            _run(boot_info, get_base_db_fn, component_manager_manager))
    except BaseException:
        logger.exception("Error during trinity run")
        raise
    finally:
        reason = component_manager_service.reason
        hint = f" ({reason})" if reason else f""
        logger.info('Shutting down Trinity%s', hint)
        remove_dangling_ipc_files(logger, trinity_config.ipc_dir)
        argparse.ArgumentParser().exit(
            message=f"Trinity shutdown complete{hint}\n")
        if trinity_config.trinity_tmp_root_dir:
            shutil.rmtree(trinity_config.trinity_root_dir)
async def test_isolated_component_crash(boot_info, log_listener, component):
    component_manager = ComponentManager(boot_info, (component, ))
    async with background_asyncio_service(component_manager):
        event_bus = await component_manager.get_event_bus()
        component_started = asyncio.Event()
        event_bus.subscribe(IsStarted, lambda ev: component_started.set())
        await asyncio.wait_for(component_started.wait(), timeout=10)
        try:
            await asyncio.wait_for(
                component_manager._trigger_component_exit.wait(), timeout=1)
        except asyncio.TimeoutError:
            # XXX: For some reason, when this test fails this AssertionError gets somewhat
            # obfuscated in the RemoteTraceback raised by asyncio-run-in-process, but the
            # traceback itself point to this line as the cause of the failure.
            raise AssertionError(
                "ComponentManager did not get ShutdownRequest")

        # Sleep a bit to give the component a chance to terminate, otherwise when shutting down
        # the ComponentManager we end up double-killing it, which leads to asyncio warnings.
        await asyncio.sleep(0.5)
Exemple #8
0
def _run_asyncio_components(component_types: Tuple[Type[BaseComponentAPI], ...],
                            boot_info: BootInfo,
                            processes: Tuple[multiprocessing.Process, ...]) -> None:
    runtime_component_types = tuple(
        component_cls
        for component_cls in component_types
        if issubclass(component_cls, ComponentAPI)
    )

    trinity_config = boot_info.trinity_config

    component_manager_service = ComponentManager(
        boot_info,
        runtime_component_types,
    )
    manager = AsyncioManager(component_manager_service)

    loop = asyncio.get_event_loop()
    loop.add_signal_handler(
        signal.SIGTERM,
        manager.cancel,
        'SIGTERM',
    )
    loop.add_signal_handler(
        signal.SIGINT,
        component_manager_service.shutdown,
        'CTRL+C',
    )

    try:
        loop.run_until_complete(manager.run())
    except BaseException as err:
        logger = logging.getLogger()
        logger.error("Error during trinity run: %r", err)
        raise
    finally:
        kill_trinity_with_reason(trinity_config, processes, component_manager_service.reason)
        if trinity_config.trinity_tmp_root_dir:
            shutil.rmtree(trinity_config.trinity_root_dir)
Exemple #9
0
async def test_isolated_component_crash(boot_info, log_listener, component,
                                        monkeypatch):
    # On overloaded CI machines it can sometimes take a while for a component's process to start,
    # so we need a high timeout here.
    component_timeout = 10
    monkeypatch.setenv('ASYNCIO_RUN_IN_PROCESS_STARTUP_TIMEOUT',
                       str(component_timeout))
    component_manager = ComponentManager(boot_info, (component, ))
    with pytest.raises(ComponentException):
        async with background_asyncio_service(component_manager):
            event_bus = await component_manager.get_event_bus()
            component_started = asyncio.Event()
            event_bus.subscribe(IsStarted, lambda ev: component_started.set())
            await asyncio.wait_for(component_started.wait(),
                                   timeout=component_timeout)
            try:
                await asyncio.wait_for(
                    component_manager.manager.wait_finished(), timeout=1)
            except asyncio.TimeoutError:
                # XXX: For some reason, when this test fails this AssertionError gets somewhat
                # obfuscated in the RemoteTraceback raised by asyncio-run-in-process, but the
                # traceback itself point to this line as the cause of the failure.
                raise AssertionError("ComponentManager did not stop")
Exemple #10
0
def main_entry(trinity_boot: BootFn,
               app_identifier: str,
               component_types: Tuple[Type[BaseComponentAPI], ...],
               sub_configs: Sequence[Type[BaseAppConfig]]) -> None:
    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    for component_cls in component_types:
        component_cls.configure_parser(parser, subparser)

    argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        parser.error(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet or ropsten, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`"
        )

    # The `common_log_level` is derived from `--log-level <Level>` / `-l <Level>` without
    # specifying any module. If present, it is used for both `stderr` and `file` logging.
    common_log_level = args.log_levels and args.log_levels.get(None)
    has_ambigous_logging_config = ((
        common_log_level is not None and
        args.stderr_log_level is not None
    ) or (
        common_log_level is not None and
        args.file_log_level is not None
    ))

    if has_ambigous_logging_config:
        parser.error(
            f"""\n
            Ambiguous logging configuration: The `--log-level (-l)` flag sets the
            log level for both file and stderr logging.
            To configure different log level for file and stderr logging,
            remove the `--log-level` flag and use `--stderr-log-level` and/or
            `--file-log-level` separately.
            Alternatively, remove the `--stderr-log-level` and/or `--file-log-level`
            flags to share one single log level across both handlers.
            """
        )

    try:
        trinity_config = TrinityConfig.from_parser_args(args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path"
            )

    # +---------------+
    # | LOGGING SETUP |
    # +---------------+

    # Setup logging to stderr
    stderr_logger_level = (
        args.stderr_log_level
        if args.stderr_log_level is not None
        else (common_log_level if common_log_level is not None else logging.INFO)
    )
    handler_stderr = setup_stderr_logging(stderr_logger_level)

    # Setup file based logging
    file_logger_level = (
        args.file_log_level
        if args.file_log_level is not None
        else (common_log_level if common_log_level is not None else logging.DEBUG)
    )
    handler_file = setup_file_logging(trinity_config.logfile_path, file_logger_level)

    # Set the individual logger levels that have been specified.
    logger_levels = {} if args.log_levels is None else args.log_levels
    set_logger_levels(logger_levels)

    # get the root logger and set it to the level of the stderr logger.
    logger = logging.getLogger()
    logger.setLevel(stderr_logger_level)

    # This prints out the ASCII "trinity" header in the terminal
    display_launch_logs(trinity_config)

    # Setup the log listener which child processes relay their logs through
    log_listener = IPCListener(handler_stderr, handler_file)

    # Determine what logging level child processes should use.
    child_process_log_level = min(
        stderr_logger_level,
        file_logger_level,
        *logger_levels.values(),
    )

    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        child_process_log_level=child_process_log_level,
        logger_levels=logger_levels,
        profile=bool(args.profile),
    )

    # Let the components do runtime validation
    for component_cls in component_types:
        component_cls.validate_cli(boot_info)

    # Components can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
        return

    if hasattr(args, 'munge_func'):
        args.munge_func(args, trinity_config)

    runtime_component_types = tuple(
        component_cls
        for component_cls in component_types
        if issubclass(component_cls, ComponentAPI)
    )

    with log_listener.run(trinity_config.logging_ipc_path):

        processes = trinity_boot(boot_info)

        loop = asyncio.get_event_loop()

        def kill_trinity_with_reason(reason: str) -> None:
            kill_trinity_gracefully(
                trinity_config,
                logger,
                processes,
                reason=reason
            )

        component_manager_service = ComponentManager(
            boot_info,
            runtime_component_types,
            kill_trinity_with_reason,
        )
        manager = AsyncioManager(component_manager_service)

        loop.add_signal_handler(
            signal.SIGTERM,
            manager.cancel,
            'SIGTERM',
        )
        loop.add_signal_handler(
            signal.SIGINT,
            component_manager_service.shutdown,
            'CTRL+C',
        )

        try:
            loop.run_until_complete(manager.run())
        except BaseException as err:
            logger.error("Error during trinity run: %r", err)
            raise
        finally:
            kill_trinity_with_reason(component_manager_service.reason)
            if trinity_config.trinity_tmp_root_dir:
                shutil.rmtree(trinity_config.trinity_root_dir)