Example #1
0
def initialize_database(chain_config: ChainConfig,
                        chaindb: BaseChainDB,
                        base_db: BaseAtomicDB) -> None:
    try:
        chaindb.get_canonical_head()
    except CanonicalHeadNotFound:
        chain_config.initialize_chain(base_db)
def database_server_ipc_path():
    core_db = MemoryDB()
    core_db[b'key-a'] = b'value-a'

    chaindb = ChainDB(core_db)
    # TODO: use a custom chain class only for testing.
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)

    with tempfile.TemporaryDirectory() as temp_dir:
        chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID,
                                   data_dir=temp_dir)

        manager = get_chaindb_manager(chain_config, core_db)
        chaindb_server_process = multiprocessing.Process(
            target=serve_chaindb,
            args=(manager, ),
        )
        chaindb_server_process.start()

        wait_for_ipc(chain_config.database_ipc_path)

        try:
            yield chain_config.database_ipc_path
        finally:
            kill_process_gracefully(chaindb_server_process,
                                    logging.getLogger())
Example #3
0
def test_chain_config_explicit_properties():
    chain_config = ChainConfig(network_id=1,
                               data_dir='./data-dir',
                               nodekey_path='./nodekey')

    assert is_same_path(chain_config.data_dir, './data-dir')
    assert is_same_path(chain_config.nodekey_path, './nodekey')
Example #4
0
def test_chain_config_explictely_provided_nodekey(nodekey_bytes, as_bytes):
    chain_config = ChainConfig(
        network_id=1,
        nodekey=nodekey_bytes if as_bytes else keys.PrivateKey(nodekey_bytes),
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
Example #5
0
def test_chain_config_nodekey_loading(nodekey_bytes, nodekey_path):
    chain_config = ChainConfig(
        network_id=1,
        nodekey_path=nodekey_path,
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
Example #6
0
def test_chain_config_explicit_properties():
    chain_config = ChainConfig(network_id=1,
                               data_dir='./data-dir',
                               nodekey_path='./nodekey')

    assert chain_config.data_dir == Path('./data-dir').resolve()
    assert chain_config.nodekey_path == Path('./nodekey').resolve()
Example #7
0
def launch_node(args: Namespace, chain_config: ChainConfig,
                endpoint: Endpoint) -> None:
    with chain_config.process_id_file('networking'):

        endpoint.connect()

        NodeClass = chain_config.node_class
        # Temporary hack: We setup a second instance of the PluginManager.
        # The first instance was only to configure the ArgumentParser whereas
        # for now, the second instance that lives inside the networking process
        # performs the bulk of the work. In the future, the PluginManager
        # should probably live in its own process and manage whether plugins
        # run in the shared plugin process or spawn their own.

        plugin_manager = setup_plugins(SharedProcessScope(endpoint))
        plugin_manager.prepare(args, chain_config)
        plugin_manager.broadcast(TrinityStartupEvent(args, chain_config))

        node = NodeClass(plugin_manager, chain_config)
        loop = node.get_event_loop()
        asyncio.ensure_future(handle_networking_exit(node, plugin_manager,
                                                     endpoint),
                              loop=loop)
        asyncio.ensure_future(node.run(), loop=loop)
        loop.run_forever()
        loop.close()
Example #8
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if cleanup command, try to shutdown dangling processes and exit
    if args.subcommand == 'fix-unclean-shutdown':
        fix_unclean_shutdown(chain_config, logger)
        sys.exit(0)

    display_launch_logs(chain_config)

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, listener, logger)
Example #9
0
def test_chain_config_computed_properties(xdg_trinity_root):
    data_dir = get_local_data_dir('muffin', xdg_trinity_root)
    chain_config = ChainConfig(network_id=1234, data_dir=data_dir)

    assert chain_config.network_id == 1234
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == data_dir / DATABASE_DIR_NAME / "full"
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
Example #10
0
def test_chain_config_computed_properties_custom_xdg(tmpdir, xdg_trinity_root):
    alt_xdg_root = tmpdir.mkdir('trinity-custom')
    assert not is_under_path(alt_xdg_root, xdg_trinity_root)

    data_dir = get_data_dir_for_network_id(1, alt_xdg_root)
    chain_config = ChainConfig(trinity_root_dir=alt_xdg_root, network_id=1)

    assert chain_config.network_id == 1
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == data_dir / DATABASE_DIR_NAME / "full"
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
def test_full_initialized_data_dir_with_custom_nodekey():
    chain_config = ChainConfig(network_id=1, max_peers=1, nodekey=NODEKEY)

    os.makedirs(chain_config.data_dir, exist_ok=True)
    os.makedirs(chain_config.database_dir, exist_ok=True)
    os.makedirs(chain_config.logfile_path, exist_ok=True)
    chain_config.logfile_path.touch()

    assert chain_config.nodekey_path is None
    assert chain_config.nodekey is not None

    assert is_data_dir_initialized(chain_config)
Example #12
0
def launch_node(args: Namespace, chain_config: ChainConfig) -> None:
    with chain_config.process_id_file('networking'):
        NodeClass = chain_config.node_class
        # Temporary hack: We setup a second instance of the PluginManager.
        # The first instance was only to configure the ArgumentParser whereas
        # for now, the second instance that lives inside the networking process
        # performs the bulk of the work. In the future, the PluginManager
        # should probably live in its own process and manage whether plugins
        # run in the shared plugin process or spawn their own.
        plugin_manager = setup_plugins()
        plugin_manager.broadcast(TrinityStartupEvent(args, chain_config))

        node = NodeClass(plugin_manager, chain_config)

        run_service_until_quit(node)
def test_chain_config_from_preconfigured_network(network_id):
    chain_config = ChainConfig.from_preconfigured_network(network_id)
    chain = chain_config.initialize_chain(AtomicDB(MemoryDB()))

    if network_id == MAINNET_NETWORK_ID:
        assert chain_config.chain_id == MainnetChain.chain_id
        assert_vm_configuration_equal(chain_config.vm_configuration,
                                      MainnetChain.vm_configuration)
        assert chain.get_canonical_head() == MAINNET_GENESIS_HEADER
    elif network_id == ROPSTEN_NETWORK_ID:
        assert chain_config.chain_id == RopstenChain.chain_id
        assert_vm_configuration_equal(chain_config.vm_configuration,
                                      RopstenChain.vm_configuration)
        assert chain.get_canonical_head() == ROPSTEN_GENESIS_HEADER
    else:
        assert False, "Invariant: unreachable code path"
Example #14
0
def run_database_process(chain_config: ChainConfig, db_class: Type[BaseDB]) -> None:
    with chain_config.process_id_file('database'):
        base_db = db_class(db_path=chain_config.database_dir)

        manager = get_chaindb_manager(chain_config, base_db)
        server = manager.get_server()  # type: ignore

        def _sigint_handler(*args: Any) -> None:
            server.stop_event.set()

        signal.signal(signal.SIGINT, _sigint_handler)

        try:
            server.serve_forever()
        except SystemExit:
            server.stop_event.set()
            raise
def test_chain_config_from_eip1085_genesis_config():
    chain_config = ChainConfig.from_eip1085_genesis_config(
        EIP1085_GENESIS_CONFIG)

    assert chain_config.chain_id == 1234
    assert chain_config.vm_configuration == ((0, ConstantinopleVM), )

    params = chain_config.genesis_params

    assert params.nonce == decode_hex(
        EIP1085_GENESIS_CONFIG['genesis']['nonce'])
    assert params.difficulty == to_int(
        hexstr=EIP1085_GENESIS_CONFIG['genesis']['difficulty'])
    assert params.coinbase == decode_hex(
        EIP1085_GENESIS_CONFIG['genesis']['author'])
    assert params.timestamp == to_int(
        hexstr=EIP1085_GENESIS_CONFIG['genesis']['timestamp'])
    assert params.extra_data == decode_hex(
        EIP1085_GENESIS_CONFIG['genesis']['extraData'])
    assert params.gas_limit == to_int(
        hexstr=EIP1085_GENESIS_CONFIG['genesis']['gasLimit'])
Example #16
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint))
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level)
    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger,
        formatter,
        handler_stream,
        chain_config,
        args.file_log_level,
    )

    display_launch_logs(chain_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(args.stderr_log_level, args.file_log_level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(args, chain_config, extra_kwargs, plugin_manager,
                     listener, event_bus, main_endpoint, logger)
Example #17
0
def main() -> None:
    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint))
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    has_ambigous_logging_config = (args.log_levels is not None
                                   and None in args.log_levels
                                   and args.stderr_log_level is not None)
    if has_ambigous_logging_config:
        parser.error(
            "\n"
            "Ambiguous logging configuration: The logging level for stderr was "
            "configured with both `--stderr-log-level` and `--log-level`. "
            "Please remove one of these flags", )

    if is_prerelease():
        # this modifies the asyncio logger, but will be overridden by any custom settings below
        enable_warnings_by_default()

    stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging(
        args.stderr_log_level
        or (args.log_levels and args.log_levels.get(None)))

    if args.log_levels:
        setup_log_levels(args.log_levels)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Trinity does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_TRINITY_ROOT path")

    file_logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        chain_config,
        args.file_log_level,
    )

    display_launch_logs(chain_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(stderr_logger.level, file_logger.level,
                                   *(args.log_levels or {}).values())

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        trinity_boot(
            args,
            chain_config,
            extra_kwargs,
            plugin_manager,
            listener,
            event_bus,
            main_endpoint,
            stderr_logger,
        )
Example #18
0
def chain_config():
    _chain_config = ChainConfig(network_id=1, max_peers=1)
    initialize_data_dir(_chain_config)
    return _chain_config
Example #19
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
def chain_config():
    return ChainConfig(network_id=1)
Example #21
0
import argparse

from eth_utils import encode_hex

from eth.chains.mainnet import MAINNET_NETWORK_ID
from eth.chains.ropsten import ROPSTEN_NETWORK_ID
from eth.db.chain import ChainDB
from eth.db.backends.level import LevelDB

from trinity.config import ChainConfig
from trinity.constants import SYNC_FULL, SYNC_LIGHT

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('-ropsten', action='store_true')
    parser.add_argument('-light', action='store_true')
    args = parser.parse_args()

    network_id = MAINNET_NETWORK_ID
    if args.ropsten:
        network_id = ROPSTEN_NETWORK_ID
    sync_mode = SYNC_FULL
    if args.light:
        sync_mode = SYNC_LIGHT

    cfg = ChainConfig(network_id, sync_mode=sync_mode)
    chaindb = ChainDB(LevelDB(cfg.database_dir))
    head = chaindb.get_canonical_head()
    print("Head #%d; hash: %s, state_root: %s" %
          (head.block_number, head.hex_hash, encode_hex(head.state_root)))
Example #22
0
def run_database_process(chain_config: ChainConfig,
                         db_class: Type[BaseDB]) -> None:
    with chain_config.process_id_file('database'):
        base_db = db_class(db_path=chain_config.database_dir)

        serve_chaindb(chain_config, base_db)
def chain_config():
    return ChainConfig(network_id=1, max_peers=1)
Example #24
0
def main() -> None:
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    chain_config = ChainConfig.from_parser_args(args)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        initialize_data_dir(chain_config)

    logger, log_queue, listener = setup_trinity_logging(
        chain_config, log_level)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        console(chain_config.jsonrpc_ipc_path,
                use_ipython=not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    logging_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=logging_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(chain_config, ),
        kwargs=logging_kwargs,
    )

    # start the processes
    database_server_process.start()
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()

    try:
        if args.subcommand == 'console':
            console(chain_config.jsonrpc_ipc_path,
                    use_ipython=not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(networking_process)
        logger.info('KILLED networking_process')
        kill_process_gracefully(database_server_process)
        logger.info('KILLED database_server_process')
Example #25
0
def main() -> None:
    plugin_manager = setup_plugins()
    plugin_manager.amend_argparser_config(parser)
    args = parser.parse_args()

    log_level = getattr(logging, args.log_level.upper())

    if args.network_id not in PRECONFIGURED_NETWORKS:
        raise NotImplementedError(
            "Unsupported network id: {0}.  Only the ropsten and mainnet "
            "networks are supported.".format(args.network_id))

    logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level)

    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        exit_because_ambigious_filesystem(logger)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            exit_because_ambigious_filesystem(logger)
        except MissingPath as e:
            msg = (
                "\n"
                "It appears that {} does not exist.\n"
                "Trinity does not attempt to create directories outside of its root path\n"
                "Either manually create the path or ensure you are using a data directory\n"
                "inside the XDG_TRINITY_ROOT path").format(e.path)
            logger.error(msg)
            sys.exit(1)

    logger, log_queue, listener = setup_trinity_file_and_queue_logging(
        logger, formatter, handler_stream, chain_config, log_level)

    display_launch_logs(chain_config)

    # if console command, run the trinity CLI
    if args.subcommand == 'attach':
        run_console(chain_config, not args.vanilla_shell)
        sys.exit(0)

    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': log_level,
        'profile': args.profile,
    }

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    networking_process = ctx.Process(
        target=launch_node,
        args=(
            args,
            chain_config,
        ),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)",
                database_server_process.pid)
    wait_for_ipc(chain_config.database_ipc_path)

    networking_process.start()
    logger.info("Started networking process (pid=%d)", networking_process.pid)

    try:
        if args.subcommand == 'console':
            run_console(chain_config, not args.vanilla_shell)
        else:
            networking_process.join()
    except KeyboardInterrupt:
        # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the
        # foreground *process group*, so both our networking and database processes will terminate
        # at the same time and not sequentially as we'd like. That shouldn't be a problem but if
        # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in
        # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking
        # process' signal handler to wait until the DB process has terminated before doing its
        # thing.
        # Notice that we still need the kill_process_gracefully() calls here, for when the user
        # simply uses 'kill' to send a signal to the main process, but also because they will
        # perform a non-gracefull shutdown if the process takes too long to terminate.
        logger.info('Keyboard Interrupt: Stopping')
        kill_process_gracefully(database_server_process, logger)
        logger.info('DB server process (pid=%d) terminated',
                    database_server_process.pid)
        # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to
        # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940
        import time
        time.sleep(0.2)  # noqa: E702
        kill_process_gracefully(networking_process, logger)
        logger.info('Networking process (pid=%d) terminated',
                    networking_process.pid)