Ejemplo n.º 1
0
def test_initializing_data_dir_from_nothing(chain_config):
    assert not os.path.exists(chain_config.data_dir)
    assert not is_data_dir_initialized(chain_config)

    initialize_data_dir(chain_config)

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 2
0
def test_initializing_data_dir_from_empty_data_dir(chain_config, data_dir):
    assert not os.path.exists(chain_config.database_dir)
    assert not is_data_dir_initialized(chain_config)

    initialize_data_dir(chain_config)

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 3
0
def test_initializing_data_dir_with_missing_nodekey(chain_config, data_dir, database_dir):
    assert not os.path.exists(chain_config.nodekey_path)
    assert not is_data_dir_initialized(chain_config)

    initialize_data_dir(chain_config)

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 4
0
def test_initializing_data_dir_from_empty_data_dir(trinity_config, data_dir):
    assert not os.path.exists(trinity_config.database_dir)
    assert not is_data_dir_initialized(trinity_config)

    initialize_data_dir(trinity_config)

    assert is_data_dir_initialized(trinity_config)
Ejemplo n.º 5
0
def test_full_initialized_data_dir(chain_config, data_dir, database_dir,
                                   nodekey):
    assert os.path.exists(chain_config.data_dir)
    assert os.path.exists(chain_config.database_dir)
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 6
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if cleanup command, try to shutdown dangling processes and exit
    if args.subcommand == 'fix-unclean-shutdown':
        fix_unclean_shutdown(chain_config, logger)
        sys.exit(0)

    display_launch_logs(chain_config)

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, listener, logger)
def test_full_initialized_data_dir_with_custom_nodekey():
    chain_config = ChainConfig('test_chain', nodekey=NODEKEY)

    os.makedirs(chain_config.data_dir, exist_ok=True)
    os.makedirs(chain_config.database_dir, exist_ok=True)

    assert chain_config.nodekey_path is None
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 8
0
def test_full_initialized_data_dir_with_custom_nodekey():
    chain_config = ChainConfig(network_id=1, nodekey=NODEKEY)

    os.makedirs(chain_config.data_dir, exist_ok=True)
    os.makedirs(chain_config.database_dir, exist_ok=True)

    assert chain_config.nodekey_path is None
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 9
0
def test_full_initialized_data_dir_with_custom_nodekey():
    trinity_config = TrinityConfig(network_id=1, nodekey=NODEKEY)

    os.makedirs(trinity_config.data_dir, exist_ok=True)
    os.makedirs(trinity_config.database_dir, exist_ok=True)
    os.makedirs(trinity_config.logfile_path, exist_ok=True)
    trinity_config.logfile_path.touch()

    assert trinity_config.nodekey_path is None
    assert trinity_config.nodekey is not None

    assert is_data_dir_initialized(trinity_config)
Ejemplo n.º 10
0
def run_networking_process(chain_config, sync_mode):
    class DBManager(BaseManager):
        pass

    DBManager.register('get_db', proxytype=DBProxy)
    DBManager.register('get_chaindb', proxytype=ChainDBProxy)

    manager = DBManager(address=chain_config.database_ipc_path)
    manager.connect()

    chaindb = manager.get_chaindb()

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    if not is_database_initialized(chaindb):
        initialize_database(chain_config, chaindb)

    chain_class = get_chain_protocol_class(chain_config, sync_mode=sync_mode)
    peer_pool = PeerPool(LESPeer, chaindb, chain_config.network_id,
                         chain_config.nodekey)

    async def run():
        try:
            asyncio.ensure_future(peer_pool.run())
            # chain.run() will run in a loop until our atexit handler is called, at which point it returns
            # and we cleanly stop the pool and chain.
            await chain.run()
        finally:
            await peer_pool.stop()
            await chain.stop()

    chain = chain_class(chaindb, peer_pool)

    loop = asyncio.get_event_loop()

    try:
        loop.run_until_complete(run())
    except KeyboardInterrupt:
        pass

    def cleanup():
        # This is to instruct chain.run() to exit, which will cause the event loop to stop.
        chain._should_stop.set()

        loop.close()

    atexit.register(cleanup)
Ejemplo n.º 11
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id,
                               chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Ejemplo n.º 12
0
def test_not_initialized_without_database_dir(chain_config, data_dir):
    assert not os.path.exists(chain_config.database_dir)
    assert not is_data_dir_initialized(chain_config)
Ejemplo n.º 13
0
def test_not_initialized_without_data_dir(trinity_config):
    assert not os.path.exists(trinity_config.data_dir)
    assert not is_data_dir_initialized(trinity_config)
Ejemplo n.º 14
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Ejemplo n.º 15
0
def test_not_initialized_without_logfile_dir(trinity_config, data_dir,
                                             database_dir, nodekey):
    assert not os.path.exists(trinity_config.logfile_path.parent)
    assert not is_data_dir_initialized(trinity_config)
Ejemplo n.º 16
0
def test_not_initialized_without_nodekey_file(trinity_config, data_dir,
                                              database_dir):
    assert not os.path.exists(trinity_config.nodekey_path)
    assert not is_data_dir_initialized(trinity_config)
Ejemplo n.º 17
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint))
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level)
    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger,
        formatter,
        handler_stream,
        chain_config,
        args.file_log_level,
    )

    display_launch_logs(chain_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(args.stderr_log_level, args.file_log_level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, plugin_manager,
                     listener, event_bus, main_endpoint, logger)
Ejemplo n.º 18
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint)
    )
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id)
        )

    has_ambigous_logging_config = (
        args.log_levels is not None and
        None in args.log_levels and
        args.stderr_log_level is not None
    )
    if has_ambigous_logging_config:
        parser.error(
            "\n"
            "Ambiguous logging configuration: The logging level for stderr was "
            "configured with both `--stderr-log-level` and `--log-level`. "
            "Please remove one of these flags",
        )

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level or (args.log_levels and args.log_levels.get(None))
    )

    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        trinity_config = TrinityConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(trinity_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(trinity_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path"
            )

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        trinity_config,
        args.file_log_level,
    )

    display_launch_logs(trinity_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(
        stderr_logger.level,
        file_logger.level,
        *(args.log_levels or {}).values()
    )

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, trinity_config)
    else:
        trinity_boot(
            args,
            trinity_config,
            extra_kwargs,
            plugin_manager,
            listener,
            event_bus,
            main_endpoint,
            stderr_logger,
        )
Ejemplo n.º 19
0
def main() -> None:
    args = parser.parse_args()

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    if args.sync_mode != SYNC_LIGHT:
        raise NotImplementedError(
            "Only light sync is supported.  Run with `--sync-mode=light` or `--light`"
        )

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, args.sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Ejemplo n.º 20
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    display_launch_logs(chain_config)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        run_console(chain_config, not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(
            args,
            chain_config,
        ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)",
                database_server_process.pid)
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()
    logger.info("Started networking process (pid=%d)", networking_process.pid)

    try:
        if args.subcommand == 'console':
            run_console(chain_config, not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the
        # foreground *process group*, so both our networking and database processes will terminate
        # at the same time and not sequentially as we'd like. That shouldn't be a problem but if
        # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in
        # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking
        # process' signal handler to wait until the DB process has terminated before doing its
        # thing.
        # Notice that we still need the kill_process_gracefully() calls here, for when the user
        # simply uses 'kill' to send a signal to the main process, but also because they will
        # perform a non-gracefull shutdown if the process takes too long to terminate.
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(database_server_process, logger)
        logger.info('DB server process (pid=%d) terminated',
                    database_server_process.pid)
        # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to
        # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940
        import time
        time.sleep(0.2)  # noqa: E702
        kill_process_gracefully(networking_process, logger)
        logger.info('Networking process (pid=%d) terminated',
                    networking_process.pid)
Ejemplo n.º 21
0
def test_not_initialized_without_logfile_path(chain_config, data_dir,
                                              database_dir, nodekey,
                                              logfile_dir):
    assert not os.path.exists(chain_config.logfile_path)
    assert not is_data_dir_initialized(chain_config)
Ejemplo n.º 22
0
def test_full_initialized_data_dir(chain_config, data_dir, database_dir,
                                   nodekey, logfile_dir, logfile_path):
    assert is_data_dir_initialized(chain_config)
Ejemplo n.º 23
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(target=run_database_process,
                                          args=(
                                              chain_config,
                                              LevelDB,
                                          ),
                                          kwargs={'log_queue': log_queue})

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(target=run_networking_process,
                                     args=(chain_config, sync_mode,
                                           pool_class),
                                     kwargs={'log_queue': log_queue})

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Ejemplo n.º 24
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    logger, log_queue, listener = setup_trinity_logging(
        chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def test_not_initialized_without_database_dir(chain_config, data_dir):
    assert not os.path.exists(chain_config.database_dir)
    assert not is_data_dir_initialized(chain_config)
Ejemplo n.º 26
0
def main() -> None:
    args = parser.parse_args()

    if args.ropsten:
        chain_identifier = ROPSTEN
    else:
        # TODO: mainnet
        chain_identifier = ROPSTEN

    if args.light:
        sync_mode = SYNC_LIGHT
    else:
        # TODO: actually use args.sync_mode (--sync-mode)
        sync_mode = SYNC_LIGHT

    chain_config = ChainConfig.from_parser_args(
        chain_identifier,
        args,
    )

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    pool_class = PeerPool
    if args.local_geth:
        pool_class = LocalGethPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'console':
        use_ipython = not args.vanilla_shell
        debug = args.log_level.upper() == 'DEBUG'

        # TODO: this should use the base `Chain` class rather than the protocol
        # class since it's just a repl with access to the chain.
        chain_class = get_chain_protocol_class(chain_config, sync_mode)
        chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir))
        if not is_database_initialized(chaindb):
            initialize_database(chain_config, chaindb)
        peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey)

        chain = chain_class(chaindb, peer_pool)
        console(chain, use_ipython=use_ipython, debug=debug)
        sys.exit(0)

    logger, log_queue, listener = setup_trinity_logging(args.log_level.upper())

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs={'log_queue': log_queue}
    )

    # For now we just run the light sync against ropsten by default.
    networking_process = ctx.Process(
        target=run_networking_process,
        args=(chain_config, sync_mode, pool_class),
        kwargs={'log_queue': log_queue}
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def test_not_initialized_without_nodekey_file(chain_config, data_dir, database_dir):
    assert not os.path.exists(chain_config.nodekey_path)
    assert not is_data_dir_initialized(chain_config)
Ejemplo n.º 28
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())
    logger, log_queue, listener = setup_trinity_logging(log_level)

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    # TODO: needs to be made generic once we have non-light modes.
    pool_class = HardCodedNodesPeerPool

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    # TODO: Combine run_fullnode_process/run_lightnode_process into a single function that simply
    # passes the sync mode to p2p.Server, which then selects the appropriate sync service.
    networking_proc_fn = run_fullnode_process
    if args.sync_mode == SYNC_LIGHT:
        networking_proc_fn = run_lightnode_process
    networking_process = ctx.Process(
        target=networking_proc_fn,
        args=(chain_config, pool_class),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def test_full_initialized_data_dir(chain_config, data_dir, database_dir, nodekey):
    assert os.path.exists(chain_config.data_dir)
    assert os.path.exists(chain_config.database_dir)
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)