def test_cli_log_level_error_for_multiple_globals():
    with pytest.raises(SystemExit):
        parser.parse_args([
            '--log-level', 'DEBUG',
            '--log-level', 'modue=DEBUG',
            '--log-level', 'ERROR',
        ])
def test_cli_log_level_error_for_repeated_name():
    with pytest.raises(SystemExit):
        parser.parse_args([
            '--log-level', 'DEBUG',
            '--log-level', 'modue_a=DEBUG',
            '--log-level', 'modue_b=DEBUG',
            '--log-level', 'modue_a=DEBUG',
        ])
def test_cli_log_level_error_for_multiple_globals(capsys):
    with pytest.raises(SystemExit):
        parser.parse_args([
            '--log-level', 'DEBUG',
            '--log-level', 'modue=DEBUG',
            '--log-level', 'ERROR',
        ])
    # this prevents the messaging that this error prints to stdout from
    # escaping the test run.
    capsys.readouterr()
def test_cli_log_level_error_for_repeated_name(capsys):
    with pytest.raises(SystemExit):
        parser.parse_args([
            '--log-level', 'DEBUG',
            '--log-level', 'modue_a=DEBUG',
            '--log-level', 'modue_b=DEBUG',
            '--log-level', 'modue_a=DEBUG',
        ])
    # this prevents the messaging that this error prints to stdout from
    # escaping the test run.
    capsys.readouterr()
Beispiel #5
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if cleanup command, try to shutdown dangling processes and exit
    if args.subcommand == 'fix-unclean-shutdown':
        fix_unclean_shutdown(chain_config, logger)
        sys.exit(0)

    display_launch_logs(chain_config)

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, listener, logger)
Beispiel #6
0
def _setup_standalone_component(
    component_type: Union[Type['TrioIsolatedComponent'],
                          Type['AsyncioIsolatedComponent']],
    app_identifier: str,
) -> Tuple[Union['TrioIsolatedComponent', 'AsyncioIsolatedComponent'], Tuple[
        str, ...]]:
    if app_identifier == APP_IDENTIFIER_ETH1:
        app_cfg: Type[BaseAppConfig] = Eth1AppConfig
    elif app_identifier == APP_IDENTIFIER_BEACON:
        app_cfg = BeaconAppConfig
    else:
        raise ValueError("Unknown app identifier: %s", app_identifier)

    # Require a root dir to be specified as we don't want to mess with the default one.
    for action in parser._actions:
        if action.dest == 'trinity_root_dir':
            action.required = True
            break

    component_type.configure_parser(parser, subparser)
    parser.add_argument(
        '--connect-to-endpoints',
        help=
        "A list of event bus IPC files for components we should connect to",
        nargs='+',
        default=tuple(),
    )
    args = parser.parse_args()
    # FIXME: Figure out a way to avoid having to set this.
    args.sync_mode = SYNC_FULL
    args.enable_metrics = False

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    if args.log_levels is not None:
        for name, level in args.log_levels.items():
            if name is None:
                name = ''
            get_logger(name).setLevel(level)

    trinity_config = TrinityConfig.from_parser_args(args, app_identifier,
                                                    (app_cfg, ))
    trinity_config.trinity_root_dir.mkdir(exist_ok=True)
    if not is_data_dir_initialized(trinity_config):
        initialize_data_dir(trinity_config)
    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        min_log_level=None,
        logger_levels=None,
        profile=False,
    )
    return component_type(boot_info), args.connect_to_endpoints
Beispiel #7
0
def parse_and_validate_cli() -> argparse.Namespace:
    argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        parser.error(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet, ropsten or goerli, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`")

    return args
Beispiel #8
0
def _run() -> None:
    from eth.db.backends.level import LevelDB
    from eth.db.chain import ChainDB
    from trinity.cli_parser import parser
    from trinity.config import Eth1AppConfig, TrinityConfig
    from trinity.constants import APP_IDENTIFIER_ETH1
    from trinity.initialization import (
        initialize_data_dir,
        is_data_dir_initialized,
        is_database_initialized,
        initialize_database,
        ensure_eth1_dirs,
    )

    # Require a root dir to be specified as we don't want to mess with the default one.
    for action in parser._actions:
        if action.dest == 'trinity_root_dir':
            action.required = True
            break

    args = parser.parse_args()
    # FIXME: Figure out a way to avoid having to set this.
    args.sync_mode = "full"
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    for name, level in args.log_levels.items():
        logging.getLogger(name).setLevel(level)
    trinity_config = TrinityConfig.from_parser_args(args, APP_IDENTIFIER_ETH1,
                                                    (Eth1AppConfig, ))
    trinity_config.trinity_root_dir.mkdir(exist_ok=True)
    if not is_data_dir_initialized(trinity_config):
        initialize_data_dir(trinity_config)

    with trinity_config.process_id_file('database'):
        app_config = trinity_config.get_app_config(Eth1AppConfig)
        ensure_eth1_dirs(app_config)

        base_db = LevelDB(db_path=app_config.database_dir)
        chaindb = ChainDB(base_db)

        if not is_database_initialized(chaindb):
            chain_config = app_config.get_chain_config()
            initialize_database(chain_config, chaindb, base_db)

        manager = DBManager(base_db)
        with manager.run(trinity_config.database_ipc_path):
            try:
                manager.wait_stopped()
            except KeyboardInterrupt:
                pass
def test_cli_log_level_global_values(level, expected):
    ns = parser.parse_args(['--log-level', level])
    assert ns.log_levels == {None: expected}
Beispiel #10
0
def main_entry(trinity_boot: BootFn, app_identifier: str,
               components: Tuple[Type[BaseComponent], ...],
               sub_configs: Sequence[Type[BaseAppConfig]]) -> None:
    for component_type in components:
        component_type.configure_parser(parser, subparser)

    argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet or ropsten, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`")

    # The `common_log_level` is derived from `--log-level <Level>` / `-l <Level>` without
    # specifying any module. If present, it is used for both `stderr` and `file` logging.
    common_log_level = args.log_levels and args.log_levels.get(None)
    has_ambigous_logging_config = (
        (common_log_level is not None and args.stderr_log_level is not None)
        or (common_log_level is not None and args.file_log_level is not None))

    if has_ambigous_logging_config:
        parser.error(f"""\n
            Ambiguous logging configuration: The `--log-level (-l)` flag sets the
            log level for both file and stderr logging.
            To configure different log level for file and stderr logging,
            remove the `--log-level` flag and use `--stderr-log-level` and/or
            `--file-log-level` separately.
            Alternatively, remove the `--stderr-log-level` and/or `--file-log-level`
            flags to share one single log level across both handlers.
            """)

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level or common_log_level)

    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        trinity_config = TrinityConfig.from_parser_args(
            args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path")

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        handler_stream,
        trinity_config.logfile_path,
        args.file_log_level or common_log_level,
    )

    display_launch_logs(trinity_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(stderr_logger.level, file_logger.level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'log_levels': args.log_levels if args.log_levels else {},
        'profile': args.profile,
    }

    # Components can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
        return

    if hasattr(args, 'munge_func'):
        args.munge_func(args, trinity_config)

    processes = trinity_boot(
        args,
        trinity_config,
        extra_kwargs,
        listener,
        stderr_logger,
    )

    def kill_trinity_with_reason(reason: str) -> None:
        kill_trinity_gracefully(trinity_config,
                                stderr_logger,
                                processes,
                                component_manager_service,
                                reason=reason)

    boot_info = TrinityBootInfo(args, trinity_config, extra_kwargs)
    component_manager_service = ComponentManagerService(
        boot_info, components, kill_trinity_with_reason)

    try:
        loop = asyncio.get_event_loop()
        asyncio.ensure_future(exit_with_services(component_manager_service))
        asyncio.ensure_future(component_manager_service.run())
        loop.add_signal_handler(signal.SIGTERM,
                                lambda: kill_trinity_with_reason("SIGTERM"))
        loop.run_forever()
        loop.close()
    except KeyboardInterrupt:
        kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
    finally:
        if trinity_config.trinity_tmp_root_dir:
            import shutil
            shutil.rmtree(trinity_config.trinity_root_dir)
Beispiel #11
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs={'log_queue': log_queue}
    )

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(
        target=run_networking_process,
        args=(chain_config, sync_mode, pool_class),
        kwargs={'log_queue': log_queue}
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #12
0
def main_entry(trinity_boot: BootFn,
               app_identifier: str,
               component_types: Tuple[Type[BaseComponentAPI], ...],
               sub_configs: Sequence[Type[BaseAppConfig]]) -> None:
    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    for component_cls in component_types:
        component_cls.configure_parser(parser, subparser)

    argcomplete.autocomplete(parser)

    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        parser.error(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet or ropsten, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`"
        )

    # The `common_log_level` is derived from `--log-level <Level>` / `-l <Level>` without
    # specifying any module. If present, it is used for both `stderr` and `file` logging.
    common_log_level = args.log_levels and args.log_levels.get(None)
    has_ambigous_logging_config = ((
        common_log_level is not None and
        args.stderr_log_level is not None
    ) or (
        common_log_level is not None and
        args.file_log_level is not None
    ))

    if has_ambigous_logging_config:
        parser.error(
            f"""\n
            Ambiguous logging configuration: The `--log-level (-l)` flag sets the
            log level for both file and stderr logging.
            To configure different log level for file and stderr logging,
            remove the `--log-level` flag and use `--stderr-log-level` and/or
            `--file-log-level` separately.
            Alternatively, remove the `--stderr-log-level` and/or `--file-log-level`
            flags to share one single log level across both handlers.
            """
        )

    try:
        trinity_config = TrinityConfig.from_parser_args(args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path"
            )

    # +---------------+
    # | LOGGING SETUP |
    # +---------------+

    # Setup logging to stderr
    stderr_logger_level = (
        args.stderr_log_level
        if args.stderr_log_level is not None
        else (common_log_level if common_log_level is not None else logging.INFO)
    )
    handler_stderr = setup_stderr_logging(stderr_logger_level)

    # Setup file based logging
    file_logger_level = (
        args.file_log_level
        if args.file_log_level is not None
        else (common_log_level if common_log_level is not None else logging.DEBUG)
    )
    handler_file = setup_file_logging(trinity_config.logfile_path, file_logger_level)

    # Set the individual logger levels that have been specified.
    logger_levels = {} if args.log_levels is None else args.log_levels
    set_logger_levels(logger_levels)

    # get the root logger and set it to the level of the stderr logger.
    logger = logging.getLogger()
    logger.setLevel(stderr_logger_level)

    # This prints out the ASCII "trinity" header in the terminal
    display_launch_logs(trinity_config)

    # Setup the log listener which child processes relay their logs through
    log_listener = IPCListener(handler_stderr, handler_file)

    # Determine what logging level child processes should use.
    child_process_log_level = min(
        stderr_logger_level,
        file_logger_level,
        *logger_levels.values(),
    )

    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        child_process_log_level=child_process_log_level,
        logger_levels=logger_levels,
        profile=bool(args.profile),
    )

    # Let the components do runtime validation
    for component_cls in component_types:
        component_cls.validate_cli(boot_info)

    # Components can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
        return

    if hasattr(args, 'munge_func'):
        args.munge_func(args, trinity_config)

    runtime_component_types = tuple(
        component_cls
        for component_cls in component_types
        if issubclass(component_cls, ComponentAPI)
    )

    with log_listener.run(trinity_config.logging_ipc_path):

        processes = trinity_boot(boot_info)

        loop = asyncio.get_event_loop()

        def kill_trinity_with_reason(reason: str) -> None:
            kill_trinity_gracefully(
                trinity_config,
                logger,
                processes,
                reason=reason
            )

        component_manager_service = ComponentManager(
            boot_info,
            runtime_component_types,
            kill_trinity_with_reason,
        )
        manager = AsyncioManager(component_manager_service)

        loop.add_signal_handler(
            signal.SIGTERM,
            manager.cancel,
            'SIGTERM',
        )
        loop.add_signal_handler(
            signal.SIGINT,
            component_manager_service.shutdown,
            'CTRL+C',
        )

        try:
            loop.run_until_complete(manager.run())
        except BaseException as err:
            logger.error("Error during trinity run: %r", err)
            raise
        finally:
            kill_trinity_with_reason(component_manager_service.reason)
            if trinity_config.trinity_tmp_root_dir:
                shutil.rmtree(trinity_config.trinity_root_dir)
Beispiel #13
0
async def run_standalone_component(
    component: Union[Type['TrioIsolatedComponent'],
                     Type['AsyncioIsolatedComponent']],
    app_identifier: str,
) -> None:
    from trinity.extensibility.trio import TrioIsolatedComponent  # noqa: F811
    from trinity.extensibility.asyncio import AsyncioIsolatedComponent  # noqa: F811
    if issubclass(component, TrioIsolatedComponent):
        endpoint_type: Union[Type[TrioEndpoint],
                             Type[AsyncioEndpoint]] = TrioEndpoint
    elif issubclass(component, AsyncioIsolatedComponent):
        endpoint_type = AsyncioEndpoint
    else:
        raise ValueError("Unknown component type: %s", type(component))

    if app_identifier == APP_IDENTIFIER_ETH1:
        app_cfg: Type[BaseAppConfig] = Eth1AppConfig
    elif app_identifier == APP_IDENTIFIER_BEACON:
        app_cfg = BeaconAppConfig
    else:
        raise ValueError("Unknown app identifier: %s", app_identifier)

    # Require a root dir to be specified as we don't want to mess with the default one.
    for action in parser._actions:
        if action.dest == 'trinity_root_dir':
            action.required = True
            break

    component.configure_parser(parser, subparser)
    parser.add_argument(
        '--connect-to-endpoints',
        help=
        "A list of event bus IPC files for components we should connect to",
        nargs='+',
        default=tuple(),
    )
    args = parser.parse_args()
    # FIXME: Figure out a way to avoid having to set this.
    args.sync_mode = SYNC_FULL

    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s %(levelname)s: %(message)s',
                        datefmt='%H:%M:%S')
    if args.log_levels is not None:
        for name, level in args.log_levels.items():
            get_logger(name).setLevel(level)

    trinity_config = TrinityConfig.from_parser_args(args, app_identifier,
                                                    (app_cfg, ))
    trinity_config.trinity_root_dir.mkdir(exist_ok=True)
    if not is_data_dir_initialized(trinity_config):
        initialize_data_dir(trinity_config)
    boot_info = BootInfo(
        args=args,
        trinity_config=trinity_config,
        child_process_log_level=None,
        logger_levels=None,
        profile=False,
    )
    conn_config = ConnectionConfig.from_name(component.get_endpoint_name(),
                                             trinity_config.ipc_dir)

    async with endpoint_type.serve(conn_config) as event_bus:
        for endpoint in args.connect_to_endpoints:
            path = pathlib.Path(endpoint)
            if not path.is_socket():
                raise ValueError("Invalid IPC path: {path}")
            connection_config = ConnectionConfig(name=path.stem, path=path)
            logger.info("Attempting to connect to eventbus endpoint at %s",
                        connection_config)
            await event_bus.connect_to_endpoints(connection_config)
        await component.do_run(boot_info, event_bus)
Beispiel #14
0
def main_entry(trinity_boot: BootFn, app_identifier: str,
               plugins: Iterable[Type[BasePlugin]],
               sub_configs: Iterable[Type[BaseAppConfig]]) -> None:

    main_endpoint = TrinityMainEventBusEndpoint()

    plugin_manager = PluginManager(MainAndIsolatedProcessScope(main_endpoint),
                                   plugins)
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if not args.genesis and args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            f"Unsupported network id: {args.network_id}. To use a network besides "
            "mainnet or ropsten, you must supply a genesis file with a flag, like "
            "`--genesis path/to/genesis.json`, also you must specify a data "
            "directory with `--data-dir path/to/data/directory`")

    has_ambigous_logging_config = (args.log_levels is not None
                                   and None in args.log_levels
                                   and args.stderr_log_level is not None)
    if has_ambigous_logging_config:
        parser.error(
            "\n"
            "Ambiguous logging configuration: The logging level for stderr was "
            "configured with both `--stderr-log-level` and `--log-level`. "
            "Please remove one of these flags", )

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level
        or (args.log_levels and args.log_levels.get(None)))

    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        trinity_config = TrinityConfig.from_parser_args(
            args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path")

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        trinity_config.logfile_path,
        args.file_log_level,
    )

    display_launch_logs(trinity_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(stderr_logger.level, file_logger.level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'log_levels': args.log_levels if args.log_levels else {},
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
        return

    processes = trinity_boot(
        args,
        trinity_config,
        extra_kwargs,
        plugin_manager,
        listener,
        main_endpoint,
        stderr_logger,
    )

    def kill_trinity_with_reason(reason: str) -> None:
        kill_trinity_gracefully(trinity_config,
                                stderr_logger,
                                processes,
                                plugin_manager,
                                main_endpoint,
                                reason=reason)

    try:
        loop = asyncio.get_event_loop()
        asyncio.ensure_future(
            trinity_boot_coro(
                kill_trinity_with_reason,
                main_endpoint,
                trinity_config,
                plugin_manager,
                args,
                extra_kwargs,
            ))
        loop.add_signal_handler(signal.SIGTERM,
                                lambda: kill_trinity_with_reason("SIGTERM"))
        loop.run_forever()
        loop.close()
    except KeyboardInterrupt:
        kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
Beispiel #15
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint)
    )
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id)
        )

    has_ambigous_logging_config = (
        args.log_levels is not None and
        None in args.log_levels and
        args.stderr_log_level is not None
    )
    if has_ambigous_logging_config:
        parser.error(
            "\n"
            "Ambiguous logging configuration: The logging level for stderr was "
            "configured with both `--stderr-log-level` and `--log-level`. "
            "Please remove one of these flags",
        )

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level or (args.log_levels and args.log_levels.get(None))
    )

    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        trinity_config = TrinityConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path"
            )

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        trinity_config,
        args.file_log_level,
    )

    display_launch_logs(trinity_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(
        stderr_logger.level,
        file_logger.level,
        *(args.log_levels or {}).values()
    )

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
    else:
        trinity_boot(
            args,
            trinity_config,
            extra_kwargs,
            plugin_manager,
            listener,
            event_bus,
            main_endpoint,
            stderr_logger,
        )
Beispiel #16
0
def main() -> None:
    args = parser.parse_args()

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    if args.sync_mode != SYNC_LIGHT:
        raise NotImplementedError(
            "Only light sync is supported.  Run with `--sync-mode=light` or `--light`"
        )

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, args.sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #17
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id,
                               chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #18
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #19
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def test_cli_log_level_module_value(level, expected):
    ns = parser.parse_args(['--log-level', "module={0}".format(level)])
    assert ns.log_levels == {'module': expected}
Beispiel #21
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    display_launch_logs(chain_config)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        run_console(chain_config, not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(
            args,
            chain_config,
        ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)",
                database_server_process.pid)
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()
    logger.info("Started networking process (pid=%d)", networking_process.pid)

    try:
        if args.subcommand == 'console':
            run_console(chain_config, not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the
        # foreground *process group*, so both our networking and database processes will terminate
        # at the same time and not sequentially as we'd like. That shouldn't be a problem but if
        # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in
        # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking
        # process' signal handler to wait until the DB process has terminated before doing its
        # thing.
        # Notice that we still need the kill_process_gracefully() calls here, for when the user
        # simply uses 'kill' to send a signal to the main process, but also because they will
        # perform a non-gracefull shutdown if the process takes too long to terminate.
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(database_server_process, logger)
        logger.info('DB server process (pid=%d) terminated',
                    database_server_process.pid)
        # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to
        # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940
        import time
        time.sleep(0.2)  # noqa: E702
        kill_process_gracefully(networking_process, logger)
        logger.info('Networking process (pid=%d) terminated',
                    networking_process.pid)
Beispiel #22
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint))
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level)
    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger,
        formatter,
        handler_stream,
        chain_config,
        args.file_log_level,
    )

    display_launch_logs(chain_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(args.stderr_log_level, args.file_log_level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, plugin_manager,
                     listener, event_bus, main_endpoint, logger)
def test_cli_log_level_not_specified():
    ns = parser.parse_args([])
    assert ns.log_levels is None
Beispiel #24
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    logger, log_queue, listener = setup_trinity_logging(
        chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #25
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())
    logger, log_queue, listener = setup_trinity_logging(log_level)

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    # TODO: Combine run_fullnode_process/run_lightnode_process into a single function that simply
    # passes the sync mode to p2p.Server, which then selects the appropriate sync service.
    networking_proc_fn = run_fullnode_process
    if args.sync_mode == SYNC_LIGHT:
        networking_proc_fn = run_lightnode_process
    networking_process = ctx.Process(
        target=networking_proc_fn,
        args=(chain_config, pool_class),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Beispiel #26
0
def main_entry(trinity_boot: BootFn, app_identifier: str,
               plugins: Iterable[BasePlugin],
               sub_configs: Iterable[Type[BaseAppConfig]]) -> None:

    main_endpoint = TrinityMainEventBusEndpoint()

    plugin_manager = setup_plugins(MainAndIsolatedProcessScope(main_endpoint),
                                   plugins)
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            f"Unsupported network id: {args.network_id}.  Only the ropsten and mainnet "
            "networks are supported.")

    has_ambigous_logging_config = (args.log_levels is not None
                                   and None in args.log_levels
                                   and args.stderr_log_level is not None)
    if has_ambigous_logging_config:
        parser.error(
            "\n"
            "Ambiguous logging configuration: The logging level for stderr was "
            "configured with both `--stderr-log-level` and `--log-level`. "
            "Please remove one of these flags", )

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level
        or (args.log_levels and args.log_levels.get(None)))

    if args.log_levels:
        setup_log_levels(args.log_levels)

    main_endpoint.track_and_propagate_available_endpoints()
    try:
        trinity_config = TrinityConfig.from_parser_args(
            args, app_identifier, sub_configs)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path")

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        trinity_config.logfile_path,
        args.file_log_level,
    )

    display_launch_logs(trinity_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(stderr_logger.level, file_logger.level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
    else:
        # We postpone EventBus connection until here because we don't want one in cases where
        # a plugin just redefines the `trinity` command such as `trinity fix-unclean-shutdown`
        main_connection_config = ConnectionConfig.from_name(
            MAIN_EVENTBUS_ENDPOINT, trinity_config.ipc_dir)
        main_endpoint.start_serving_nowait(main_connection_config)

        # We listen on events such as `ShutdownRequested` which may or may not originate on
        # the `main_endpoint` which is why we connect to our own endpoint here
        main_endpoint.connect_to_endpoints_blocking(main_connection_config)
        trinity_boot(
            args,
            trinity_config,
            extra_kwargs,
            plugin_manager,
            listener,
            main_endpoint,
            stderr_logger,
        )