def get_chain_context(base_db, privkey):
    chain = FakeAsyncTestnetChain(base_db,
                                  privkey.public_key.to_canonical_address(),
                                  privkey)
    chaindb = FakeAsyncChainDB(base_db)
    chain_head_db = FakeAsyncChainHeadDB.load_from_saved_root_hash(base_db)
    consensus_db = FakeAsyncConsensusDB(chaindb)

    chain_config = ChainConfig(network_id=TESTNET_NETWORK_ID)
    chain_config._node_private_helios_key = privkey
    chain_config.num_chain_processes = 1

    network_id = TESTNET_NETWORK_ID
    vm_configuration = tuple()

    chain_context = ChainContext(
        base_db=base_db,
        chains=[chain],
        chaindb=chaindb,
        chain_head_db=chain_head_db,
        consensus_db=consensus_db,
        chain_config=chain_config,
        network_id=network_id,
        vm_configuration=vm_configuration,
    )
    return chain_context
示例#2
0
def launch_node(args: Namespace, chain_config: ChainConfig, endpoint: Endpoint) -> None:
    with chain_config.process_id_file('networking'):

        endpoint.connect()

        NodeClass = chain_config.node_class
        # Temporary hack: We setup a second instance of the PluginManager.
        # The first instance was only to configure the ArgumentParser whereas
        # for now, the second instance that lives inside the networking process
        # performs the bulk of the work. In the future, the PluginManager
        # should probably live in its own process and manage whether plugins
        # run in the shared plugin process or spawn their own.

        plugin_manager = setup_plugins(SharedProcessScope(endpoint))
        plugin_manager.prepare(args, chain_config)
        plugin_manager.broadcast(HeliosStartupEvent(
            args,
            chain_config
        ))

        node = NodeClass(plugin_manager, chain_config)
        loop = node.get_event_loop()
        asyncio.ensure_future(handle_networking_exit(node, plugin_manager, endpoint), loop=loop)
        asyncio.ensure_future(node.run(), loop=loop)
        loop.run_forever()
        loop.close()
def test_chain_config_explictely_provided_nodekey(nodekey_bytes, as_bytes):
    chain_config = ChainConfig(
        network_id=1,
        nodekey=nodekey_bytes if as_bytes else keys.PrivateKey(nodekey_bytes),
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
def test_chain_config_explicit_properties():
    chain_config = ChainConfig(network_id=1,
                               data_dir='./data-dir',
                               nodekey_path='./nodekey')

    assert chain_config.data_dir == Path('./data-dir').resolve()
    assert chain_config.nodekey_path == Path('./nodekey').resolve()
def test_chain_config_nodekey_loading(nodekey_bytes, nodekey_path):
    chain_config = ChainConfig(
        network_id=1,
        nodekey_path=nodekey_path,
    )

    assert chain_config.nodekey.to_bytes() == nodekey_bytes
示例#6
0
def run_database_process(chain_config: ChainConfig, db_class: Type[BaseDB]) -> None:
    with chain_config.process_id_file('database'):

        if chain_config.report_memory_usage:
            from threading import Thread
            memory_logger = logging.getLogger('hvm.memoryLogger')

            t = Thread(target=sync_periodically_report_memory_stats, args=(chain_config.memory_usage_report_interval, memory_logger))
            t.start()


        base_db = db_class(db_path=chain_config.database_dir)

        # TODO:remove
        #base_db = JournalDB(base_db)

        manager = get_chaindb_manager(chain_config, base_db)
        server = manager.get_server()  # type: ignore

        def _sigint_handler(*args: Any) -> None:
            server.stop_event.set()

        signal.signal(signal.SIGINT, _sigint_handler)
        try:
            server.serve_forever()
        except SystemExit:
            server.stop_event.set()
            raise
示例#7
0
def get_chain_manager(chain_config: ChainConfig,
                      base_db: AsyncBaseDB,
                      instance=0) -> BaseManager:
    # TODO: think about using async chian here. Depends which process we would like the threaded work to happen in.
    # There might be a performance savings by doing the threaded work in this process to avoid one process hop.
    if chain_config.network_id == MAINNET_NETWORK_ID:
        chain_class = MainnetChain
    else:
        raise NotImplementedError(
            "Only the mainnet chain is currently supported")

    chain = chain_class(base_db, chain_config.node_wallet_address,
                        chain_config.node_private_helios_key)  # type: ignore

    class ChainManager(BaseManager):
        pass

    ChainManager.register(  # type: ignore
        'get_chain',
        callable=lambda: TracebackRecorder(chain),
        proxytype=ChainProxy)

    manager = ChainManager(address=str(
        chain_config.get_chain_ipc_path(instance)))  # type: ignore
    return manager
def test_chain_config_computed_properties(xdg_helios_root):
    data_dir = get_local_data_dir('muffin', xdg_helios_root)
    chain_config = ChainConfig(network_id=1234, data_dir=data_dir)

    assert chain_config.network_id == 1234
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == data_dir / DATABASE_DIR_NAME / "full"
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
def test_chain_config_computed_properties_custom_xdg(tmpdir, xdg_helios_root):
    alt_xdg_root = tmpdir.mkdir('helios-custom')
    assert not is_under_path(alt_xdg_root, xdg_helios_root)

    data_dir = get_data_dir_for_network_id(1, alt_xdg_root)
    chain_config = ChainConfig(helios_root_dir=alt_xdg_root, network_id=1)

    assert chain_config.network_id == 1
    assert chain_config.data_dir == data_dir
    assert chain_config.database_dir == data_dir / DATABASE_DIR_NAME / "full"
    assert chain_config.nodekey_path == get_nodekey_path(data_dir)
示例#10
0
    def __init__(self, plugin_manager: PluginManager,
                 chain_config: ChainConfig) -> None:
        super().__init__()
        self.chain_config: ChainConfig = chain_config
        self.private_helios_key = chain_config.node_private_helios_key
        self.wallet_address = chain_config.node_wallet_address
        self._plugin_manager = plugin_manager
        self._db_manager = create_db_manager(chain_config.database_ipc_path)
        self._db_manager.connect()  # type: ignore

        for i in range(chain_config.num_chain_processes):
            chain_manager = create_chain_manager(
                chain_config.get_chain_ipc_path(i))
            chain_manager.connect()
            self._chain_managers.append(chain_manager)

        self._chain_head_db = self._db_manager.get_chain_head_db(
        )  # type: ignore
        self._jsonrpc_ipc_path: Path = chain_config.jsonrpc_ipc_path
示例#11
0
def run_chain_process(chain_config: ChainConfig, instance = 0) -> None:
    with chain_config.process_id_file('database_{}'.format(instance)):
        # connect with database process
        db_manager = create_db_manager(chain_config.database_ipc_path)
        db_manager.connect()

        base_db = db_manager.get_db()

        # start chain process
        manager = get_chain_manager(chain_config, base_db, instance)
        server = manager.get_server()  # type: ignore

        def _sigint_handler(*args: Any) -> None:
            server.stop_event.set()

        signal.signal(signal.SIGINT, _sigint_handler)
        try:
            server.serve_forever()
        except SystemExit:
            server.stop_event.set()
            raise
def database_server_ipc_path():
    core_db = AtomicDB()
    core_db[b'key-a'] = b'value-a'

    chaindb = ChainDB(core_db)
    # TODO: use a custom chain class only for testing.
    chaindb.persist_header(ROPSTEN_GENESIS_HEADER)

    with tempfile.TemporaryDirectory() as temp_dir:
        chain_config = ChainConfig(network_id=ROPSTEN_NETWORK_ID, max_peers=1, data_dir=temp_dir)

        manager = get_chaindb_manager(chain_config, core_db)
        chaindb_server_process = multiprocessing.Process(
            target=serve_chaindb,
            args=(manager,),
        )
        chaindb_server_process.start()

        wait_for_ipc(chain_config.database_ipc_path)

        try:
            yield chain_config.database_ipc_path
        finally:
            kill_process_gracefully(chaindb_server_process, logging.getLogger())
示例#13
0
def chain_config():
    return ChainConfig(network_id=1, max_peers=1)
def chain_config():
    _chain_config = ChainConfig(network_id=1, max_peers=1)
    initialize_data_dir(_chain_config)
    return _chain_config
示例#15
0
def main() -> None:

    event_bus = EventBus(ctx)
    main_endpoint = event_bus.create_endpoint(MAIN_EVENTBUS_ENDPOINT)
    main_endpoint.connect()

    plugin_manager = setup_plugins(
        MainAndIsolatedProcessScope(event_bus, main_endpoint)
    )
    plugin_manager.amend_argparser_config(parser, subparser)
    args = parser.parse_args()

    #
    # Dev testing stuff
    #
    if args.start_memory_profile:
        os.environ["PYTHONTRACEMALLOC"] = '1'
    if args.rand_db:
        os.environ["GENERATE_RANDOM_DATABASE"] = 'true'
    if args.instance is not None:

        from helios.utils.xdg import get_xdg_helios_root
        args.port = args.port + args.instance * 2

        if args.instance != 0:
            args.do_rpc_http_server = False
        subdir = 'instance_' + str(args.instance)
        absolute_path = get_xdg_helios_root() / subdir

        absolute_dir = os.path.dirname(os.path.realpath(__file__))
        absolute_keystore_path = absolute_dir + '/keystore/'
        args.keystore_path = absolute_keystore_path + subdir

        args.keystore_password = '******'

        os.environ["HELIOS_DATA_DIR"] = str(absolute_path.resolve())
        os.environ["INSTANCE_NUMBER"] = str(args.instance)


    #
    #
    #
    if not args.keystore_password and not hasattr(args, 'func'):
        password = getpass.getpass(prompt='Keystore Password: '******'default'] = TRACE_LEVEL_NUM
        log_levels['hvm'] = TRACE_LEVEL_NUM
        log_levels['hp2p'] = TRACE_LEVEL_NUM
        log_levels['helios'] = TRACE_LEVEL_NUM

        log_levels['urllib3'] = TRACE_LEVEL_NUM
        log_levels['ssdp'] = TRACE_LEVEL_NUM
        log_levels['Service'] = TRACE_LEVEL_NUM

        log_levels['Action'] = TRACE_LEVEL_NUM
        log_levels['Device'] = TRACE_LEVEL_NUM
        log_levels['helios.extensibility'] = TRACE_LEVEL_NUM

    else:
        log_levels['default'] = logging.INFO

        log_levels['urllib3'] = logging.INFO
        log_levels['ssdp'] = logging.INFO
        log_levels['Service'] = logging.INFO

        log_levels['hvm'] = logging.DEBUG  #sets all of hvm
        log_levels['hvm.db.account.AccountDB'] = logging.DEBUG
        log_levels['hvm.vm.base.VM.VM'] = logging.DEBUG
        log_levels['hvm.chain'] = logging.DEBUG
        #log_levels['hvm.chain.chain.Chain'] = logging.DEBUG
        log_levels['hvm.db.chain_head.ChainHeadDB'] = logging.DEBUG
        log_levels['hvm.db.chain_db.ChainDB'] = logging.DEBUG
        log_levels['hvm.db.consensus'] = logging.DEBUG
        log_levels['hvm.memoryLogger'] = logging.DEBUG

        #log_levels['hp2p'] = logging.INFO


        log_levels['hp2p.peer'] = logging.DEBUG
        log_levels['hp2p.peer.PeerPool'] = logging.DEBUG
        log_levels['hp2p.consensus.Consensus'] = logging.DEBUG
        log_levels['hp2p.SmartContractChainManager'] = logging.DEBUG
        log_levels['hp2p.kademlia.KademliaProtocol'] = logging.DEBUG
        log_levels['hp2p.discovery.DiscoveryProtocol'] = logging.INFO
        log_levels['hp2p.discovery.DiscoveryService'] = logging.INFO
        log_levels['hp2p.nat.UPnPService'] = logging.CRITICAL
        log_levels['connectionpool'] = logging.CRITICAL
        log_levels['hp2p.protocol'] = logging.DEBUG
        log_levels['hp2p.protocol.Protocol'] = logging.DEBUG


        #log_levels['helios'] = logging.INFO
        log_levels['helios.rpc.ipc'] = logging.INFO
        log_levels['helios.Node'] = logging.INFO
        log_levels['helios.sync'] = logging.DEBUG
        log_levels['helios.protocol'] = logging.INFO
        log_levels['helios.protocol.common'] = logging.DEBUG
        log_levels['helios.protocol.hls.peer.HLSPeer'] = 5
        log_levels['helios.memoryLogger'] = logging.DEBUG

        log_levels['hp2p.hls'] = logging.INFO
        log_levels['helios.server.FullServer'] = logging.DEBUG

        log_levels['Action'] = logging.INFO
        log_levels['Device'] = logging.INFO
        log_levels['helios.extensibility'] = logging.INFO


        setup_log_levels(log_levels = log_levels)





    try:
        chain_config = ChainConfig.from_parser_args(args)
    except AmbigiousFileSystem:
        parser.error(HELIOS_AMBIGIOUS_FILESYSTEM_INFO)

    if not is_data_dir_initialized(chain_config):
        # TODO: this will only work as is for chains with known genesis
        # parameters.  Need to flesh out how genesis parameters for custom
        # chains are defined and passed around.
        try:
            initialize_data_dir(chain_config)
        except AmbigiousFileSystem:
            parser.error(HELIOS_AMBIGIOUS_FILESYSTEM_INFO)
        except MissingPath as e:
            parser.error(
                "\n"
                f"It appears that {e.path} does not exist. "
                "Helios does not attempt to create directories outside of its root path. "
                "Either manually create the path or ensure you are using a data directory "
                "inside the XDG_HELIOS_ROOT path"
            )

    file_logger, log_queue, listener = setup_helios_file_and_queue_logging(
        stderr_logger,
        formatter,
        handler_stream,
        chain_config,
        args.file_log_level,
    )

    display_launch_logs(chain_config)

    # compute the minimum configured log level across all configured loggers.
    min_configured_log_level = min(
        stderr_logger.level,
        file_logger.level,
        *(args.log_levels or {}).values(),
        *(log_levels or {}).values()
    )


    extra_kwargs = {
        'log_queue': log_queue,
        'log_level': min_configured_log_level,
        'log_levels': log_levels,
        'profile': args.profile,
    }

    # Plugins can provide a subcommand with a `func` which does then control
    # the entire process from here.
    if hasattr(args, 'func'):
        args.func(args, chain_config)
    else:
        helios_boot(
            args,
            chain_config,
            extra_kwargs,
            plugin_manager,
            listener,
            event_bus,
            main_endpoint,
            stderr_logger,
        )
示例#16
0
    def fix_unclean_shutdown(self, args: Namespace,
                             chain_config: ChainConfig) -> None:
        self.logger.info("Cleaning up unclean shutdown...")

        self.logger.info("Searching for process id files in %s..." %
                         chain_config.data_dir)
        pidfiles = tuple(chain_config.data_dir.glob('*.pid'))
        if len(pidfiles) > 1:
            self.logger.info(
                'Found %d processes from a previous run. Closing...' %
                len(pidfiles))
        elif len(pidfiles) == 1:
            self.logger.info('Found 1 process from a previous run. Closing...')
        else:
            self.logger.info(
                'Found 0 processes from a previous run. No processes to kill.')

        for pidfile in pidfiles:
            process_id = int(pidfile.read_text())
            kill_process_id_gracefully(process_id, time.sleep, self.logger)
            try:
                pidfile.unlink()
                self.logger.info(
                    'Manually removed %s after killing process id %d' %
                    (pidfile, process_id))
            except FileNotFoundError:
                self.logger.debug(
                    'pidfile %s was gone after killing process id %d' %
                    (pidfile, process_id))

        db_ipc = chain_config.database_ipc_path
        try:
            db_ipc.unlink()
            self.logger.info(
                'Removed a dangling IPC socket file for database connections at %s',
                db_ipc)
        except FileNotFoundError:
            self.logger.debug(
                'The IPC socket file for database connections at %s was already gone',
                db_ipc)

        for i in range(chain_config.num_chain_processes):
            chain_ipc = chain_config.get_chain_ipc_path(i)
            try:
                chain_ipc.unlink()
                self.logger.info(
                    'Removed a dangling IPC socket file for chain instance {} process at {}'
                    .format(i, chain_ipc))
            except FileNotFoundError:
                self.logger.debug(
                    'The IPC socket file for chain instance {} process at {} was already gone'
                    .format(i, chain_ipc))

        jsonrpc_ipc = chain_config.jsonrpc_ipc_path
        try:
            jsonrpc_ipc.unlink()
            self.logger.info(
                'Removed a dangling IPC socket file for JSON-RPC connections at %s',
                jsonrpc_ipc,
            )
        except FileNotFoundError:
            self.logger.debug(
                'The IPC socket file for JSON-RPC connections at %s was already gone',
                jsonrpc_ipc,
            )
示例#17
0
def helios_boot(args: Namespace,
                chain_config: ChainConfig,
                extra_kwargs: Dict[str, Any],
                plugin_manager: PluginManager,
                listener: logging.handlers.QueueListener,
                event_bus: EventBus,
                main_endpoint: Endpoint,
                logger: logging.Logger) -> None:
    # start the listener thread to handle logs produced by other processes in
    # the local logger.
    listener.start()

    networking_endpoint = event_bus.create_endpoint(NETWORKING_EVENTBUS_ENDPOINT)
    event_bus.start()

    # First initialize the database process.
    database_server_process = ctx.Process(
        target=run_database_process,
        args=(
            chain_config,
            LevelDB,
        ),
        kwargs=extra_kwargs,
    )

    chain_processes = []
    for i in range(chain_config.num_chain_processes):
        chain_process = ctx.Process(
            target=run_chain_process,
            args=(
                chain_config,
                i
            ),
            kwargs=extra_kwargs,
        )
        chain_processes.append(chain_process)


    networking_process = ctx.Process(
        target=launch_node,
        args=(args, chain_config, networking_endpoint,),
        kwargs=extra_kwargs,
    )

    # start the processes
    database_server_process.start()
    logger.info("Started DB server process (pid=%d)", database_server_process.pid)

    # networking process needs the IPC socket file provided by the database process
    try:
        wait_for_ipc(chain_config.database_ipc_path)
    except TimeoutError as e:
        logger.error("Timeout waiting for database to start.  Exiting...")
        kill_process_gracefully(database_server_process, logger)
        ArgumentParser().error(message="Timed out waiting for database start")


    for i in range(chain_config.num_chain_processes):
        chain_process = chain_processes[i]
        chain_process.start()
        logger.info("Started chain instance {} process (pid={})".format(i,database_server_process.pid))
        try:
            wait_for_ipc(chain_config.get_chain_ipc_path(i))
        except TimeoutError as e:
            logger.error("Timeout waiting for chain instance {} to start.  Exiting...".format(i))
            kill_process_gracefully(database_server_process, logger)
            for j in range(i+1):
                kill_process_gracefully(chain_processes[j], logger)
            ArgumentParser().error(message="Timed out waiting for chain instance {} start".format(i))


    networking_process.start()
    logger.info("Started networking process (pid=%d)", networking_process.pid)

    main_endpoint.subscribe(
        ShutdownRequest,
        lambda ev: kill_helios_gracefully(
            logger,
            database_server_process,
            chain_processes,
            networking_process,
            plugin_manager,
            main_endpoint,
            event_bus
        )
    )

    plugin_manager.prepare(args, chain_config, extra_kwargs)
    plugin_manager.broadcast(HeliosStartupEvent(
        args,
        chain_config
    ))
    try:
        loop = asyncio.get_event_loop()
        loop.run_forever()
        loop.close()
    except KeyboardInterrupt:
        kill_helios_gracefully(
            logger,
            database_server_process,
            chain_processes,
            networking_process,
            plugin_manager,
            main_endpoint,
            event_bus
        )