def get_chain_manager(chain_config: ChainConfig,
                      base_db: AsyncBaseDB,
                      instance=0) -> BaseManager:
    # TODO: think about using async chian here. Depends which process we would like the threaded work to happen in.
    # There might be a performance savings by doing the threaded work in this process to avoid one process hop.
    if chain_config.network_id == MAINNET_NETWORK_ID:
        chain_class = MainnetChain
    else:
        raise NotImplementedError(
            "Only the mainnet chain is currently supported")

    chain = chain_class(base_db, chain_config.node_wallet_address,
                        chain_config.node_private_helios_key)  # type: ignore

    class ChainManager(BaseManager):
        pass

    ChainManager.register(  # type: ignore
        'get_chain',
        callable=lambda: TracebackRecorder(chain),
        proxytype=ChainProxy)

    manager = ChainManager(address=str(
        chain_config.get_chain_ipc_path(instance)))  # type: ignore
    return manager
Exemple #2
0
    def __init__(self, plugin_manager: PluginManager,
                 chain_config: ChainConfig) -> None:
        super().__init__()
        self.chain_config: ChainConfig = chain_config
        self.private_helios_key = chain_config.node_private_helios_key
        self.wallet_address = chain_config.node_wallet_address
        self._plugin_manager = plugin_manager
        self._db_manager = create_db_manager(chain_config.database_ipc_path)
        self._db_manager.connect()  # type: ignore

        for i in range(chain_config.num_chain_processes):
            chain_manager = create_chain_manager(
                chain_config.get_chain_ipc_path(i))
            chain_manager.connect()
            self._chain_managers.append(chain_manager)

        self._chain_head_db = self._db_manager.get_chain_head_db(
        )  # type: ignore
        self._jsonrpc_ipc_path: Path = chain_config.jsonrpc_ipc_path
Exemple #3
0
def helios_boot(args: Namespace,
                chain_config: ChainConfig,
                extra_kwargs: Dict[str, Any],
                plugin_manager: PluginManager,
                listener: logging.handlers.QueueListener,
                event_bus: EventBus,
                main_endpoint: Endpoint,
                logger: logging.Logger) -> None:
    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    networking_endpoint = event_bus.create_endpoint(NETWORKING_EVENTBUS_ENDPOINT)
    event_bus.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    chain_processes = []
    for i in range(chain_config.num_chain_processes):
        chain_process = ctx.Process(
            target=run_chain_process,
            args=(
                chain_config,
                i
            ),
            kwargs=extra_kwargs,
        )
        chain_processes.append(chain_process)


    networking_process = ctx.Process(
        target=launch_node,
        args=(args, chain_config, networking_endpoint,),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)", database_server_process.pid)

    # networking process needs the IPC socket file provided by the database process
    try:
        wait_for_ipc(chain_config.database_ipc_path)
    except TimeoutError as e:
        logger.error("Timeout waiting for database to start.  Exiting...")
        kill_process_gracefully(database_server_process, logger)
        ArgumentParser().error(message="Timed out waiting for database start")


    for i in range(chain_config.num_chain_processes):
        chain_process = chain_processes[i]
        chain_process.start()
        logger.info("Started chain instance {} process (pid={})".format(i,database_server_process.pid))
        try:
            wait_for_ipc(chain_config.get_chain_ipc_path(i))
        except TimeoutError as e:
            logger.error("Timeout waiting for chain instance {} to start.  Exiting...".format(i))
            kill_process_gracefully(database_server_process, logger)
            for j in range(i+1):
                kill_process_gracefully(chain_processes[j], logger)
            ArgumentParser().error(message="Timed out waiting for chain instance {} start".format(i))


    networking_process.start()
    logger.info("Started networking process (pid=%d)", networking_process.pid)

    main_endpoint.subscribe(
        ShutdownRequest,
        lambda ev: kill_helios_gracefully(
            logger,
            database_server_process,
            chain_processes,
            networking_process,
            plugin_manager,
            main_endpoint,
            event_bus
        )
    )

    plugin_manager.prepare(args, chain_config, extra_kwargs)
    plugin_manager.broadcast(HeliosStartupEvent(
        args,
        chain_config
    ))
    try:
        loop = asyncio.get_event_loop()
        loop.run_forever()
        loop.close()
    except KeyboardInterrupt:
        kill_helios_gracefully(
            logger,
            database_server_process,
            chain_processes,
            networking_process,
            plugin_manager,
            main_endpoint,
            event_bus
        )
Exemple #4
0
    def fix_unclean_shutdown(self, args: Namespace,
                             chain_config: ChainConfig) -> None:
        self.logger.info("Cleaning up unclean shutdown...")

        self.logger.info("Searching for process id files in %s..." %
                         chain_config.data_dir)
        pidfiles = tuple(chain_config.data_dir.glob('*.pid'))
        if len(pidfiles) > 1:
            self.logger.info(
                'Found %d processes from a previous run. Closing...' %
                len(pidfiles))
        elif len(pidfiles) == 1:
            self.logger.info('Found 1 process from a previous run. Closing...')
        else:
            self.logger.info(
                'Found 0 processes from a previous run. No processes to kill.')

        for pidfile in pidfiles:
            process_id = int(pidfile.read_text())
            kill_process_id_gracefully(process_id, time.sleep, self.logger)
            try:
                pidfile.unlink()
                self.logger.info(
                    'Manually removed %s after killing process id %d' %
                    (pidfile, process_id))
            except FileNotFoundError:
                self.logger.debug(
                    'pidfile %s was gone after killing process id %d' %
                    (pidfile, process_id))

        db_ipc = chain_config.database_ipc_path
        try:
            db_ipc.unlink()
            self.logger.info(
                'Removed a dangling IPC socket file for database connections at %s',
                db_ipc)
        except FileNotFoundError:
            self.logger.debug(
                'The IPC socket file for database connections at %s was already gone',
                db_ipc)

        for i in range(chain_config.num_chain_processes):
            chain_ipc = chain_config.get_chain_ipc_path(i)
            try:
                chain_ipc.unlink()
                self.logger.info(
                    'Removed a dangling IPC socket file for chain instance {} process at {}'
                    .format(i, chain_ipc))
            except FileNotFoundError:
                self.logger.debug(
                    'The IPC socket file for chain instance {} process at {} was already gone'
                    .format(i, chain_ipc))

        jsonrpc_ipc = chain_config.jsonrpc_ipc_path
        try:
            jsonrpc_ipc.unlink()
            self.logger.info(
                'Removed a dangling IPC socket file for JSON-RPC connections at %s',
                jsonrpc_ipc,
            )
        except FileNotFoundError:
            self.logger.debug(
                'The IPC socket file for JSON-RPC connections at %s was already gone',
                jsonrpc_ipc,
            )