def kill_trinity_gracefully( logger: logging.Logger, database_server_process: Any, networking_process: Any, plugin_manager: PluginManager, event_bus: EventBus, message: str = "Trinity shudown complete\n") -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') plugin_manager.shutdown() event_bus.shutdown() kill_process_gracefully(database_server_process, logger) logger.info('DB server process (pid=%d) terminated', database_server_process.pid) # XXX: This short sleep here seems to avoid us hitting a deadlock when attempting to # join() the networking subprocess: https://github.com/ethereum/py-evm/issues/940 time.sleep(0.2) kill_process_gracefully(networking_process, logger) logger.info('Networking process (pid=%d) terminated', networking_process.pid) # This is required to be within the `kill_trinity_gracefully` so that # plugins can trigger a shutdown of the trinity process. ArgumentParser().exit(message=message)
def kill_trinity_gracefully(logger: logging.Logger, database_server_process: Any, networking_process: Any, plugin_manager: PluginManager, main_endpoint: Endpoint, event_bus: EventBus, message: str="Trinity shudown complete\n") -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. logger.info('Keyboard Interrupt: Stopping') plugin_manager.shutdown_blocking() main_endpoint.stop() event_bus.stop() for name, process in [("DB", database_server_process), ("Networking", networking_process)]: # Our sub-processes will have received a SIGINT already (see comment above), so here we # wait 2s for them to finish cleanly, and if they fail we kill them for real. process.join(2) if process.is_alive(): kill_process_gracefully(process, logger) logger.info('%s process (pid=%d) terminated', name, process.pid) # This is required to be within the `kill_trinity_gracefully` so that # plugins can trigger a shutdown of the trinity process. ArgumentParser().exit(message=message)
def kill_trinity_gracefully(logger: logging.Logger, processes: Iterable[multiprocessing.Process], plugin_manager: PluginManager, main_endpoint: Endpoint, event_bus: EventBus, reason: str = None) -> None: # When a user hits Ctrl+C in the terminal, the SIGINT is sent to all processes in the # foreground *process group*, so both our networking and database processes will terminate # at the same time and not sequentially as we'd like. That shouldn't be a problem but if # we keep getting unhandled BrokenPipeErrors/ConnectionResetErrors like reported in # https://github.com/ethereum/py-evm/issues/827, we might want to change the networking # process' signal handler to wait until the DB process has terminated before doing its # thing. # Notice that we still need the kill_process_gracefully() calls here, for when the user # simply uses 'kill' to send a signal to the main process, but also because they will # perform a non-gracefull shutdown if the process takes too long to terminate. hint = f"({reason})" if reason else f"" logger.info('Shutting down Trinity %s', hint) plugin_manager.shutdown_blocking() main_endpoint.stop() event_bus.stop() for process in processes: # Our sub-processes will have received a SIGINT already (see comment above), so here we # wait 2s for them to finish cleanly, and if they fail we kill them for real. process.join(2) if process.is_alive(): kill_process_gracefully(process, logger) logger.info('%s process (pid=%d) terminated', process.name, process.pid) ArgumentParser().exit(message=f"Trinity shutdown complete {hint}\n")
def trinity_boot(args: Namespace, trinity_config: TrinityConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() event_bus.start() # First initialize the database process. database_server_process = ctx.Process( name="DB", target=run_database_process, args=( trinity_config, LevelDB, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) # networking process needs the IPC socket file provided by the database process try: wait_for_ipc(trinity_config.database_ipc_path) except TimeoutError as e: logger.error("Timeout waiting for database to start. Exiting...") kill_process_gracefully(database_server_process, logger) ArgumentParser().error(message="Timed out waiting for database start") def kill_trinity_with_reason(reason: str) -> None: kill_trinity_gracefully(logger, (database_server_process, ), plugin_manager, main_endpoint, event_bus, reason=reason) main_endpoint.subscribe(ShutdownRequest, lambda ev: kill_trinity_with_reason(ev.reason)) plugin_manager.prepare(args, trinity_config, extra_kwargs) kill_trinity_with_reason("No beacon support yet. SOON!") try: loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGTERM, lambda: kill_trinity_with_reason("SIGTERM")) loop.run_forever() loop.close() except KeyboardInterrupt: kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
async def test_stream_with_max() -> None: bus = EventBus() endpoint = bus.create_endpoint('test') bus.start() endpoint.connect() stream_counter = 0 async def stream_response() -> None: async for event in endpoint.stream(DummyRequest, max=2): # Accessing `ev.property_of_dummy_request` here allows us to validate # mypy has the type information we think it has. We run mypy on the tests. print(event.property_of_dummy_request) nonlocal stream_counter stream_counter += 1 asyncio.ensure_future(stream_response()) # we broadcast one more item than what we consume and test for that for i in range(3): endpoint.broadcast(DummyRequest()) await asyncio.sleep(0.01) endpoint.stop() bus.stop() assert stream_counter == 2
async def event_bus(event_loop): bus = EventBus() endpoint = bus.create_endpoint(NETWORKING_EVENTBUS_ENDPOINT) bus.start(event_loop) await endpoint.connect(event_loop) try: yield endpoint finally: endpoint.stop() bus.stop()
async def event_bus(event_loop): bus = EventBus() endpoint = bus.create_endpoint('test') bus.start(event_loop) await endpoint.connect(event_loop) try: yield endpoint finally: endpoint.stop() bus.stop()
def trinity_boot(args: Namespace, trinity_config: TrinityConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() event_bus.start() def kill_trinity_with_reason(reason: str) -> None: kill_trinity_gracefully( logger, (), plugin_manager, main_endpoint, event_bus, reason=reason ) main_endpoint.subscribe( ShutdownRequest, lambda ev: kill_trinity_with_reason(ev.reason) ) plugin_manager.prepare(args, trinity_config, extra_kwargs) try: loop = asyncio.get_event_loop() loop.add_signal_handler(signal.SIGTERM, lambda: kill_trinity_with_reason("SIGTERM")) loop.run_forever() loop.close() except KeyboardInterrupt: kill_trinity_with_reason("CTRL+C / Keyboard Interrupt")
async def test_response_must_match() -> None: bus = EventBus() endpoint = bus.create_endpoint('test') bus.start() endpoint.connect() endpoint.subscribe( DummyRequestPair, lambda ev: endpoint.broadcast( # We intentionally broadcast an unexpected response. Mypy can't catch # this but we ensure it is caught and raised during the processing. DummyRequest(), ev.broadcast_config())) with pytest.raises(UnexpectedResponse): await endpoint.request(DummyRequestPair()) endpoint.stop() bus.stop()
async def test_request() -> None: bus = EventBus() endpoint = bus.create_endpoint('test') bus.start() endpoint.connect() endpoint.subscribe( DummyRequestPair, lambda ev: endpoint.broadcast( # Accessing `ev.property_of_dummy_request_pair` here allows us to validate # mypy has the type information we think it has. We run mypy on the tests. DummyResponse(ev.property_of_dummy_request_pair), ev.broadcast_config())) response = await endpoint.request(DummyRequestPair()) # Accessing `ev.property_of_dummy_response` here allows us to validate # mypy has the type information we think it has. We run mypy on the tests. print(response.property_of_dummy_response) assert isinstance(response, DummyResponse) endpoint.stop() bus.stop()
async def test_wait_for() -> None: bus = EventBus() endpoint = bus.create_endpoint('test') bus.start() endpoint.connect() received = None async def stream_response() -> None: request = await endpoint.wait_for(DummyRequest) # Accessing `ev.property_of_dummy_request` here allows us to validate # mypy has the type information we think it has. We run mypy on the tests. print(request.property_of_dummy_request) nonlocal received received = request asyncio.ensure_future(stream_response()) endpoint.broadcast(DummyRequest()) await asyncio.sleep(0.01) endpoint.stop() bus.stop() assert isinstance(received, DummyRequest)
def main() -> None: event_bus = EventBus(ctx) main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT) main_endpoint.connect() plugin_manager = setup_plugins( MainAndIsolatedProcessScope(event_bus, main_endpoint)) plugin_manager.amend_argparser_config(parser, subparser) args = parser.parse_args() if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id)) logger, formatter, handler_stream = setup_trinity_stderr_logging( args.stderr_log_level) if args.log_levels: setup_log_levels(args.log_levels) try: chain_config = ChainConfig.from_parser_args(args) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. try: initialize_data_dir(chain_config) except AmbigiousFileSystem: exit_because_ambigious_filesystem(logger) except MissingPath as e: msg = ( "\n" "It appears that {} does not exist.\n" "Trinity does not attempt to create directories outside of its root path\n" "Either manually create the path or ensure you are using a data directory\n" "inside the XDG_TRINITY_ROOT path").format(e.path) logger.error(msg) sys.exit(1) logger, log_queue, listener = setup_trinity_file_and_queue_logging( logger, formatter, handler_stream, chain_config, args.file_log_level, ) display_launch_logs(chain_config) # compute the minimum configured log level across all configured loggers. min_configured_log_level = min(args.stderr_log_level, args.file_log_level, *(args.log_levels or {}).values()) extra_kwargs = { 'log_queue': log_queue, 'log_level': min_configured_log_level, 'profile': args.profile, } # Plugins can provide a subcommand with a `func` which does then control # the entire process from here. if hasattr(args, 'func'): args.func(args, chain_config) else: trinity_boot(args, chain_config, extra_kwargs, plugin_manager, listener, event_bus, main_endpoint, logger)
def helios_boot(args: Namespace, chain_config: ChainConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() logger.info( "Checking for any already running Helios Protocol processes that need shutting down. Remember that you can only run 1 instance at a time." ) fix_unclean_shutdown(chain_config, logger) with chain_config.process_id_file('main'): networking_endpoint = event_bus.create_endpoint( NETWORKING_EVENTBUS_ENDPOINT) event_bus.start() # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( chain_config, LevelDB, ), kwargs=extra_kwargs, ) chain_processes = [] for i in range(chain_config.num_chain_processes): chain_process = ctx.Process( target=run_chain_process, args=(chain_config, i), kwargs=extra_kwargs, ) chain_processes.append(chain_process) networking_process = ctx.Process( target=launch_node, args=( args, chain_config, networking_endpoint, ), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) # networking process needs the IPC socket file provided by the database process try: wait_for_ipc(chain_config.database_ipc_path) except TimeoutError as e: logger.error("Timeout waiting for database to start. Exiting...") kill_process_gracefully(database_server_process, logger) ArgumentParser().error( message="Timed out waiting for database start") for i in range(chain_config.num_chain_processes): chain_process = chain_processes[i] chain_process.start() logger.info("Started chain instance {} process (pid={})".format( i, chain_process.pid)) try: wait_for_ipc(chain_config.get_chain_ipc_path(i)) except TimeoutError as e: logger.error( "Timeout waiting for chain instance {} to start. Exiting..." .format(i)) kill_process_gracefully(chain_process, logger) for j in range(i + 1): kill_process_gracefully(chain_processes[j], logger) ArgumentParser().error( message="Timed out waiting for chain instance {} start". format(i)) networking_process.start() logger.info("Started networking process (pid=%d)", networking_process.pid) main_endpoint.subscribe( ShutdownRequest, lambda ev: kill_helios_gracefully( logger, database_server_process, chain_processes, networking_process, plugin_manager, main_endpoint, event_bus)) plugin_manager.prepare(args, chain_config, extra_kwargs) plugin_manager.broadcast(HeliosStartupEvent(args, chain_config)) try: loop = asyncio.get_event_loop() loop.run_forever() loop.close() except KeyboardInterrupt: kill_helios_gracefully(logger, database_server_process, chain_processes, networking_process, plugin_manager, main_endpoint, event_bus)
def trinity_boot(args: Namespace, trinity_config: TrinityConfig, extra_kwargs: Dict[str, Any], plugin_manager: PluginManager, listener: logging.handlers.QueueListener, event_bus: EventBus, main_endpoint: Endpoint, logger: logging.Logger) -> None: # start the listener thread to handle logs produced by other processes in # the local logger. listener.start() networking_endpoint = event_bus.create_endpoint(NETWORKING_EVENTBUS_ENDPOINT) event_bus.start() # First initialize the database process. database_server_process = ctx.Process( target=run_database_process, args=( trinity_config, LevelDB, ), kwargs=extra_kwargs, ) networking_process = ctx.Process( target=launch_node, args=(args, trinity_config, networking_endpoint,), kwargs=extra_kwargs, ) # start the processes database_server_process.start() logger.info("Started DB server process (pid=%d)", database_server_process.pid) # networking process needs the IPC socket file provided by the database process try: wait_for_ipc(trinity_config.database_ipc_path) except TimeoutError as e: logger.error("Timeout waiting for database to start. Exiting...") kill_process_gracefully(database_server_process, logger) ArgumentParser().error(message="Timed out waiting for database start") networking_process.start() logger.info("Started networking process (pid=%d)", networking_process.pid) main_endpoint.subscribe( ShutdownRequest, lambda ev: kill_trinity_gracefully( logger, database_server_process, networking_process, plugin_manager, main_endpoint, event_bus ) ) plugin_manager.prepare(args, trinity_config, extra_kwargs) plugin_manager.broadcast(TrinityStartupEvent( args, trinity_config )) try: loop = asyncio.get_event_loop() loop.run_forever() loop.close() except KeyboardInterrupt: kill_trinity_gracefully( logger, database_server_process, networking_process, plugin_manager, main_endpoint, event_bus )
def main() -> None: event_bus = EventBus(ctx) main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT) main_endpoint.connect() plugin_manager = setup_plugins( MainAndIsolatedProcessScope(event_bus, main_endpoint) ) plugin_manager.amend_argparser_config(parser, subparser) args = parser.parse_args() if args.network_id not in PRECONFIGURED_NETWORKS: raise NotImplementedError( "Unsupported network id: {0}. Only the ropsten and mainnet " "networks are supported.".format(args.network_id) ) has_ambigous_logging_config = ( args.log_levels is not None and None in args.log_levels and args.stderr_log_level is not None ) if has_ambigous_logging_config: parser.error( "\n" "Ambiguous logging configuration: The logging level for stderr was " "configured with both `--stderr-log-level` and `--log-level`. " "Please remove one of these flags", ) if is_prerelease(): # this modifies the asyncio logger, but will be overridden by any custom settings below enable_warnings_by_default() stderr_logger, formatter, handler_stream = setup_trinity_stderr_logging( args.stderr_log_level or (args.log_levels and args.log_levels.get(None)) ) if args.log_levels: setup_log_levels(args.log_levels) try: trinity_config = TrinityConfig.from_parser_args(args) except AmbigiousFileSystem: parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO) if not is_data_dir_initialized(trinity_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. try: initialize_data_dir(trinity_config) except AmbigiousFileSystem: parser.error(TRINITY_AMBIGIOUS_FILESYSTEM_INFO) except MissingPath as e: parser.error( "\n" f"It appears that {e.path} does not exist. " "Trinity does not attempt to create directories outside of its root path. " "Either manually create the path or ensure you are using a data directory " "inside the XDG_TRINITY_ROOT path" ) file_logger, log_queue, listener = setup_trinity_file_and_queue_logging( stderr_logger, formatter, handler_stream, trinity_config, args.file_log_level, ) display_launch_logs(trinity_config) # compute the minimum configured log level across all configured loggers. min_configured_log_level = min( stderr_logger.level, file_logger.level, *(args.log_levels or {}).values() ) extra_kwargs = { 'log_queue': log_queue, 'log_level': min_configured_log_level, 'profile': args.profile, } # Plugins can provide a subcommand with a `func` which does then control # the entire process from here. if hasattr(args, 'func'): args.func(args, trinity_config) else: trinity_boot( args, trinity_config, extra_kwargs, plugin_manager, listener, event_bus, main_endpoint, stderr_logger, )
def main() -> None: event_bus = EventBus(ctx) main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT) main_endpoint.connect() plugin_manager = setup_plugins( MainAndIsolatedProcessScope(event_bus, main_endpoint) ) plugin_manager.amend_argparser_config(parser, subparser) args = parser.parse_args() # # Dev testing stuff # if args.start_memory_profile: os.environ["PYTHONTRACEMALLOC"] = '1' if args.rand_db: os.environ["GENERATE_RANDOM_DATABASE"] = 'true' if args.instance is not None: from helios.utils.xdg import get_xdg_helios_root args.port = args.port + args.instance * 2 if args.instance != 0: args.do_rpc_http_server = False subdir = 'instance_' + str(args.instance) absolute_path = get_xdg_helios_root() / subdir absolute_dir = os.path.dirname(os.path.realpath(__file__)) absolute_keystore_path = absolute_dir + '/keystore/' args.keystore_path = absolute_keystore_path + subdir args.keystore_password = '******' os.environ["HELIOS_DATA_DIR"] = str(absolute_path.resolve()) os.environ["INSTANCE_NUMBER"] = str(args.instance) # # # if not args.keystore_password and not hasattr(args, 'func'): password = getpass.getpass(prompt='Keystore Password: '******'default'] = TRACE_LEVEL_NUM log_levels['hvm'] = TRACE_LEVEL_NUM log_levels['hp2p'] = TRACE_LEVEL_NUM log_levels['helios'] = TRACE_LEVEL_NUM log_levels['urllib3'] = TRACE_LEVEL_NUM log_levels['ssdp'] = TRACE_LEVEL_NUM log_levels['Service'] = TRACE_LEVEL_NUM log_levels['Action'] = TRACE_LEVEL_NUM log_levels['Device'] = TRACE_LEVEL_NUM log_levels['helios.extensibility'] = TRACE_LEVEL_NUM else: log_levels['default'] = logging.INFO log_levels['urllib3'] = logging.INFO log_levels['ssdp'] = logging.INFO log_levels['Service'] = logging.INFO log_levels['hvm'] = logging.DEBUG #sets all of hvm log_levels['hvm.db.account.AccountDB'] = logging.DEBUG log_levels['hvm.vm.base.VM.VM'] = logging.DEBUG log_levels['hvm.chain'] = logging.DEBUG #log_levels['hvm.chain.chain.Chain'] = logging.DEBUG log_levels['hvm.db.chain_head.ChainHeadDB'] = logging.DEBUG log_levels['hvm.db.chain_db.ChainDB'] = logging.DEBUG log_levels['hvm.db.consensus'] = logging.DEBUG log_levels['hvm.memoryLogger'] = logging.DEBUG #log_levels['hp2p'] = logging.INFO log_levels['hp2p.peer'] = logging.DEBUG log_levels['hp2p.peer.PeerPool'] = logging.DEBUG log_levels['hp2p.consensus.Consensus'] = logging.DEBUG log_levels['hp2p.SmartContractChainManager'] = logging.DEBUG log_levels['hp2p.kademlia.KademliaProtocol'] = logging.DEBUG log_levels['hp2p.discovery.DiscoveryProtocol'] = logging.INFO log_levels['hp2p.discovery.DiscoveryService'] = logging.INFO log_levels['hp2p.nat.UPnPService'] = logging.CRITICAL log_levels['connectionpool'] = logging.CRITICAL log_levels['hp2p.protocol'] = logging.DEBUG log_levels['hp2p.protocol.Protocol'] = logging.DEBUG #log_levels['helios'] = logging.INFO log_levels['helios.rpc.ipc'] = logging.INFO log_levels['helios.Node'] = logging.INFO log_levels['helios.sync'] = logging.DEBUG log_levels['helios.protocol'] = logging.INFO log_levels['helios.protocol.common'] = logging.DEBUG log_levels['helios.protocol.hls.peer.HLSPeer'] = 5 log_levels['helios.memoryLogger'] = logging.DEBUG log_levels['hp2p.hls'] = logging.INFO log_levels['helios.server.FullServer'] = logging.DEBUG log_levels['Action'] = logging.INFO log_levels['Device'] = logging.INFO log_levels['helios.extensibility'] = logging.INFO setup_log_levels(log_levels = log_levels) try: chain_config = ChainConfig.from_parser_args(args) except AmbigiousFileSystem: parser.error(HELIOS_AMBIGIOUS_FILESYSTEM_INFO) if not is_data_dir_initialized(chain_config): # TODO: this will only work as is for chains with known genesis # parameters. Need to flesh out how genesis parameters for custom # chains are defined and passed around. try: initialize_data_dir(chain_config) except AmbigiousFileSystem: parser.error(HELIOS_AMBIGIOUS_FILESYSTEM_INFO) except MissingPath as e: parser.error( "\n" f"It appears that {e.path} does not exist. " "Helios does not attempt to create directories outside of its root path. " "Either manually create the path or ensure you are using a data directory " "inside the XDG_HELIOS_ROOT path" ) file_logger, log_queue, listener = setup_helios_file_and_queue_logging( stderr_logger, formatter, handler_stream, chain_config, args.file_log_level, ) display_launch_logs(chain_config) # compute the minimum configured log level across all configured loggers. min_configured_log_level = min( stderr_logger.level, file_logger.level, *(args.log_levels or {}).values(), *(log_levels or {}).values() ) extra_kwargs = { 'log_queue': log_queue, 'log_level': min_configured_log_level, 'log_levels': log_levels, 'profile': args.profile, } # Plugins can provide a subcommand with a `func` which does then control # the entire process from here. if hasattr(args, 'func'): args.func(args, chain_config) else: helios_boot( args, chain_config, extra_kwargs, plugin_manager, listener, event_bus, main_endpoint, stderr_logger, )
def run_proc2(endpoint): loop = asyncio.get_event_loop() endpoint.connect() loop.run_until_complete(proc2_worker(endpoint)) async def proc2_worker(endpoint): for i in range(3): result = await endpoint.request(GetSomethingRequest()) print(result.payload) if __name__ == "__main__": # Configure and start event bus ctx = multiprocessing.get_context('spawn') event_bus = EventBus(ctx) e1 = event_bus.create_endpoint('e1') e2 = event_bus.create_endpoint('e2') event_bus.start() # Start two processes and pass in event bus endpoints p1 = ctx.Process(target=run_proc1, args=(e1, )) p1.start() p2 = ctx.Process(target=run_proc2, args=(e2, )) p2.start() asyncio.get_event_loop().run_forever()