def kill_trinity_gracefully(logger: logging.Logger, processes: Iterable[multiprocessing.Process], plugin_manager: PluginManager, main_endpoint: Endpoint, event_bus: EventBus, reason: str = None) -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. hint = f"({reason})" if reason else f"" logger.info('Shutting down Trinity %s', hint) plugin_manager.shutdown_blocking() main_endpoint.stop() event_bus.stop() for process in processes: # Our sub-processes will have received a SIGINT already (see comment above), so here we # wait 2s for them to finish cleanly, and if they fail we kill them for real. process.join(2) if process.is_alive(): kill_process_gracefully(process, logger) logger.info('%s process (pid=%d) terminated', process.name, process.pid) ArgumentParser().exit(message=f"Trinity shutdown complete {hint}\n")
def database_server_ipc_path(): core_db = MemoryDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, data_dir=temp_dir) manager = get_chaindb_manager(chain_config, core_db) chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(manager, ), ) chaindb_server_process.start() wait_for_ipc(chain_config.database_ipc_path) try: yield chain_config.database_ipc_path finally: kill_process_gracefully(chaindb_server_process, logging.getLogger())
def kill_trinity_gracefully(logger: logging.Logger, database_server_process: Any, networking_process: Any, plugin_manager: PluginManager, main_endpoint: Endpoint, event_bus: EventBus, message: str="Trinity shudown complete\n") -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') plugin_manager.shutdown_blocking() main_endpoint.stop() event_bus.stop() for name, process in [("DB", database_server_process), ("Networking", networking_process)]: # Our sub-processes will have received a SIGINT already (see comment above), so here we # wait 2s for them to finish cleanly, and if they fail we kill them for real. process.join(2) if process.is_alive(): kill_process_gracefully(process, logger) logger.info('%s process (pid=%d) terminated', name, process.pid) # This is required to be within the `kill_trinity_gracefully` so that # plugins can trigger a shutdown of the trinity process. ArgumentParser().exit(message=message)
def kill_trinity_gracefully( logger: logging.Logger, database_server_process: Any, networking_process: Any, plugin_manager: PluginManager, event_bus: EventBus, message: str = "Trinity shudown complete\n") -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') plugin_manager.shutdown() event_bus.shutdown() kill_process_gracefully(database_server_process, logger) logger.info('DB server process (pid=%d) terminated', database_server_process.pid) # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940 time.sleep(0.2) kill_process_gracefully(networking_process, logger) logger.info('Networking process (pid=%d) terminated', networking_process.pid) # This is required to be within the `kill_trinity_gracefully` so that # plugins can trigger a shutdown of the trinity process. ArgumentParser().exit(message=message)
def trinity_boot(args: Namespace, chain_config: ChainConfig, extra_kwargs: Dict[str, Any], listener: logging.handlers.QueueListener, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=extra_kwargs, ) networking_process = ctx.Process( target=launch_node, args=( args, chain_config, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) wait_for_ipc(chain_config.database_ipc_path) networking_process.start() logger.info("Started networking process (pid=%d)", networking_process.pid) try: networking_process.join() except KeyboardInterrupt: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(database_server_process, logger) logger.info('DB server process (pid=%d) terminated', database_server_process.pid) # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940 time.sleep(0.2) kill_process_gracefully(networking_process, logger) logger.info('Networking process (pid=%d) terminated', networking_process.pid)
def database_server_ipc_path(): core_db = MemoryDB() core_db[b'key-a'] = b'value-a' chaindb = ChainDB(core_db) # TODO: use a custom chain class only for testing. chaindb.persist_header_to_db(ROPSTEN_GENESIS_HEADER) with tempfile.TemporaryDirectory() as temp_dir: ipc_path = os.path.join(temp_dir, 'chaindb.ipc') chaindb_server_process = multiprocessing.Process( target=serve_chaindb, args=(core_db, ipc_path), ) chaindb_server_process.start() wait_for_ipc(ipc_path) try: yield ipc_path finally: kill_process_gracefully(chaindb_server_process)
def trinity_boot(args: Namespace, trinity_config: TrinityConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() networking_endpoint = event_bus.create_endpoint(NETWORKING_EVENTBUS_ENDPOINT) event_bus.start() # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( trinity_config, LevelDB, ), kwargs=extra_kwargs, ) networking_process = ctx.Process( target=launch_node, args=(args, trinity_config, networking_endpoint,), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) # networking process needs the IPC socket file provided by the database process try: wait_for_ipc(trinity_config.database_ipc_path) except TimeoutError as e: logger.error("Timeout waiting for database to start. Exiting...") kill_process_gracefully(database_server_process, logger) ArgumentParser().error(message="Timed out waiting for database start") networking_process.start() logger.info("Started networking process (pid=%d)", networking_process.pid) main_endpoint.subscribe( ShutdownRequest, lambda ev: kill_trinity_gracefully( logger, database_server_process, networking_process, plugin_manager, main_endpoint, event_bus ) ) plugin_manager.prepare(args, trinity_config, extra_kwargs) plugin_manager.broadcast(TrinityStartupEvent( args, trinity_config )) try: loop = asyncio.get_event_loop() loop.run_forever() loop.close() except KeyboardInterrupt: kill_trinity_gracefully( logger, database_server_process, networking_process, plugin_manager, main_endpoint, event_bus )
def main() -> None: args = parser.parse_args() if args.ropsten: chain_identifier = ROPSTEN else: # TODO: mainnet chain_identifier = ROPSTEN if args.light: sync_mode = SYNC_LIGHT else: # TODO: actually use args.sync_mode (--sync-mode) sync_mode = SYNC_LIGHT chain_config = ChainConfig.from_parser_args( chain_identifier, args, ) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) pool_class = PeerPool if args.local_geth: pool_class = LocalGethPeerPool # if console command, run the trinity CLI if args.subcommand == 'console': use_ipython = not args.vanilla_shell debug = args.log_level.upper() == 'DEBUG' # TODO: this should use the base `Chain` class rather than the protocol # class since it's just a repl with access to the chain. chain_class = get_chain_protocol_class(chain_config, sync_mode) chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir)) if not is_database_initialized(chaindb): initialize_database(chain_config, chaindb) peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey) chain = chain_class(chaindb, peer_pool) console(chain, use_ipython=use_ipython, debug=debug) sys.exit(0) logger, log_queue, listener = setup_trinity_logging(args.log_level.upper()) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs={'log_queue': log_queue} ) # For now we just run the light sync against ropsten by default. networking_process = ctx.Process( target=run_networking_process, args=(chain_config, sync_mode, pool_class), kwargs={'log_queue': log_queue} ) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def do_stop(self) -> None: self.context.event_bus.stop() kill_process_gracefully(self._process, self.logger)
def main() -> None: args = parser.parse_args() log_level = getattr(logging, args.log_level.upper()) if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) chain_config = ChainConfig.from_parser_args(args) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) logger, log_queue, listener = setup_trinity_logging( chain_config, log_level) # if console command, run the trinity CLI if args.subcommand == 'attach': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) sys.exit(0) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() logging_kwargs = { 'log_queue': log_queue, 'log_level': log_level, } # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=logging_kwargs, ) networking_process = ctx.Process( target=launch_node, args=(chain_config, ), kwargs=logging_kwargs, ) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: if args.subcommand == 'console': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def main() -> None: args = parser.parse_args() logger, log_queue, listener = setup_trinity_logging(args.log_level.upper()) if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) if args.sync_mode != SYNC_LIGHT: raise NotImplementedError( "Only light sync is supported. Run with `--sync-mode=light` or `--light`" ) chain_config = ChainConfig.from_parser_args(args) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) # TODO: needs to be made generic once we have non-light modes. pool_class = HardCodedNodesPeerPool # if console command, run the trinity CLI if args.subcommand == 'attach': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) sys.exit(0) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() # First initialize the database process. database_server_process = ctx.Process(target=run_database_process, args=( chain_config, LevelDB, ), kwargs={'log_queue': log_queue}) # For now we just run the light sync against ropsten by default. networking_process = ctx.Process(target=run_networking_process, args=(chain_config, args.sync_mode, pool_class), kwargs={'log_queue': log_queue}) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: if args.subcommand == 'console': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def main() -> None: args = parser.parse_args() if args.ropsten: chain_identifier = ROPSTEN else: # TODO: mainnet chain_identifier = ROPSTEN if args.light: sync_mode = SYNC_LIGHT else: # TODO: actually use args.sync_mode (--sync-mode) sync_mode = SYNC_LIGHT chain_config = ChainConfig.from_parser_args( chain_identifier, args, ) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) pool_class = PeerPool if args.local_geth: pool_class = LocalGethPeerPool # if console command, run the trinity CLI if args.subcommand == 'console': use_ipython = not args.vanilla_shell debug = args.log_level.upper() == 'DEBUG' # TODO: this should use the base `Chain` class rather than the protocol # class since it's just a repl with access to the chain. chain_class = get_chain_protocol_class(chain_config, sync_mode) chaindb = FakeAsyncChainDB(LevelDB(chain_config.database_dir)) if not is_database_initialized(chaindb): initialize_database(chain_config, chaindb) peer_pool = pool_class(LESPeer, chaindb, chain_config.network_id, chain_config.nodekey) chain = chain_class(chaindb, peer_pool) console(chain, use_ipython=use_ipython, debug=debug) sys.exit(0) logger, log_queue, listener = setup_trinity_logging(args.log_level.upper()) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() # First initialize the database process. database_server_process = ctx.Process(target=run_database_process, args=( chain_config, LevelDB, ), kwargs={'log_queue': log_queue}) # For now we just run the light sync against ropsten by default. networking_process = ctx.Process(target=run_networking_process, args=(chain_config, sync_mode, pool_class), kwargs={'log_queue': log_queue}) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def main() -> None: args = parser.parse_args() log_level = getattr(logging, args.log_level.upper()) if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) logger, formatter, handler_stream = setup_trinity_stdout_logging(log_level) try: chain_config = ChainConfig.from_parser_args(args) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. try: initialize_data_dir(chain_config) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) except MissingPath as e: msg = ( "\n" "It appears that {} does not exist.\n" "Trinity does not attempt to create directories outside of its root path\n" "Either manually create the path or ensure you are using a data directory\n" "inside the XDG_TRINITY_ROOT path").format(e.path) logger.error(msg) sys.exit(1) logger, log_queue, listener = setup_trinity_file_and_queue_logging( logger, formatter, handler_stream, chain_config, log_level) # if console command, run the trinity CLI if args.subcommand == 'attach': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) sys.exit(0) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() extra_kwargs = { 'log_queue': log_queue, 'log_level': log_level, 'profile': args.profile, } # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=extra_kwargs, ) networking_process = ctx.Process( target=launch_node, args=(chain_config, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: if args.subcommand == 'console': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def main() -> None: args = parser.parse_args() if args.ropsten: chain_identifier = ROPSTEN else: # TODO: mainnet chain_identifier = ROPSTEN if args.light: sync_mode = SYNC_LIGHT else: # TODO: actually use args.sync_mode (--sync-mode) sync_mode = SYNC_LIGHT chain_config = ChainConfig.from_parser_args( chain_identifier, args, ) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) # TODO: needs to be made generic once we have non-light modes. pool_class = HardCodedNodesPeerPool # if console command, run the trinity CLI if args.subcommand == 'attach': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) sys.exit(0) logger, log_queue, listener = setup_trinity_logging(args.log_level.upper()) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() # First initialize the database process. database_server_process = ctx.Process(target=run_database_process, args=( chain_config, LevelDB, ), kwargs={'log_queue': log_queue}) # For now we just run the light sync against ropsten by default. networking_process = ctx.Process(target=run_networking_process, args=(chain_config, sync_mode, pool_class), kwargs={'log_queue': log_queue}) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: if args.subcommand == 'console': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def trinity_boot(args: Namespace, trinity_config: TrinityConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() event_bus.start() # First initialize the database process. database_server_process = ctx.Process( name="DB", target=run_database_process, args=( trinity_config, LevelDB, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) # networking process needs the IPC socket file provided by the database process try: wait_for_ipc(trinity_config.database_ipc_path) except TimeoutError as e: logger.error("Timeout waiting for database to start. Exiting...") kill_process_gracefully(database_server_process, logger) ArgumentParser().error(message="Timed out waiting for database start") def kill_trinity_with_reason(reason: str) -> None: kill_trinity_gracefully( logger, (database_server_process,), plugin_manager, main_endpoint, event_bus, reason=reason ) main_endpoint.subscribe( ShutdownRequest, lambda ev: kill_trinity_with_reason(ev.reason) ) plugin_manager.prepare(args, trinity_config, extra_kwargs) kill_trinity_with_reason("No beacon support yet. SOON!") try: loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGTERM, lambda: kill_trinity_with_reason("SIGTERM")) loop.run_forever() loop.close() except KeyboardInterrupt: kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
def main() -> None: args = parser.parse_args() log_level = getattr(logging, args.log_level.upper()) logger, log_queue, listener = setup_trinity_logging(log_level) if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) chain_config = ChainConfig.from_parser_args(args) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. initialize_data_dir(chain_config) # TODO: needs to be made generic once we have non-light modes. pool_class = HardCodedNodesPeerPool # if console command, run the trinity CLI if args.subcommand == 'attach': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) sys.exit(0) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() logging_kwargs = { 'log_queue': log_queue, 'log_level': log_level, } # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=logging_kwargs, ) # TODO: Combine run_fullnode_process/run_lightnode_process into a single function that simply # passes the sync mode to p2p.Server, which then selects the appropriate sync service. networking_proc_fn = run_fullnode_process if args.sync_mode == SYNC_LIGHT: networking_proc_fn = run_lightnode_process networking_process = ctx.Process( target=networking_proc_fn, args=(chain_config, pool_class), kwargs=logging_kwargs, ) # start the processes database_server_process.start() wait_for_ipc(chain_config.database_ipc_path) networking_process.start() try: if args.subcommand == 'console': console(chain_config.jsonrpc_ipc_path, use_ipython=not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(networking_process) logger.info('KILLED networking_process') kill_process_gracefully(database_server_process) logger.info('KILLED database_server_process')
def main() -> None: plugin_manager = setup_plugins() plugin_manager.amend_argparser_config(parser) args = parser.parse_args() log_level = getattr(logging, args.log_level.upper()) if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) logger, formatter, handler_stream = setup_trinity_stderr_logging(log_level) try: chain_config = ChainConfig.from_parser_args(args) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. try: initialize_data_dir(chain_config) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) except MissingPath as e: msg = ( "\n" "It appears that {} does not exist.\n" "Trinity does not attempt to create directories outside of its root path\n" "Either manually create the path or ensure you are using a data directory\n" "inside the XDG_TRINITY_ROOT path").format(e.path) logger.error(msg) sys.exit(1) logger, log_queue, listener = setup_trinity_file_and_queue_logging( logger, formatter, handler_stream, chain_config, log_level) display_launch_logs(chain_config) # if console command, run the trinity CLI if args.subcommand == 'attach': run_console(chain_config, not args.vanilla_shell) sys.exit(0) # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() extra_kwargs = { 'log_queue': log_queue, 'log_level': log_level, 'profile': args.profile, } # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=extra_kwargs, ) networking_process = ctx.Process( target=launch_node, args=( args, chain_config, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) wait_for_ipc(chain_config.database_ipc_path) networking_process.start() logger.info("Started networking process (pid=%d)", networking_process.pid) try: if args.subcommand == 'console': run_console(chain_config, not args.vanilla_shell) else: networking_process.join() except KeyboardInterrupt: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') kill_process_gracefully(database_server_process, logger) logger.info('DB server process (pid=%d) terminated', database_server_process.pid) # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940 import time time.sleep(0.2) # noqa: E702 kill_process_gracefully(networking_process, logger) logger.info('Networking process (pid=%d) terminated', networking_process.pid)