Esempio n. 1
0
def start_cluster(cmd: str, args: List[str]) -> clusterlib.ClusterLib:
    """Start cluster."""
    args_str = " ".join(args)
    args_str = f" {args_str}" if args_str else ""
    LOGGER.info(f"Starting cluster with `{cmd}{args_str}`.")
    helpers.run_command(f"{cmd}{args_str}", workdir=get_cluster_env().work_dir)
    LOGGER.info("Cluster started.")
    return get_cluster_type().get_cluster_obj()
Esempio n. 2
0
def restart_all_nodes(instance_num: Optional[int] = None) -> None:
    """Restart all Cardano nodes of the running cluster."""
    LOGGER.info("Restarting all cluster nodes.")

    if instance_num is None:
        instance_num = get_cluster_env().instance_num

    supervisor_port = get_cluster_type().cluster_scripts.get_instance_ports(instance_num).supervisor
    try:
        helpers.run_command(f"supervisorctl -s http://localhost:{supervisor_port} restart nodes:")
    except Exception as exc:
        LOGGER.debug(f"Failed to restart cluster nodes: {exc}")
def restart_all_nodes() -> None:
    """Restart all Cardano nodes of the running cluster."""
    LOGGER.info("Restarting all cluster nodes.")
    cluster_env = get_cluster_env()
    supervisor_port = (get_cluster_type().cluster_scripts.get_instance_ports(
        cluster_env.instance_num).supervisor)
    try:
        helpers.run_command(
            f"supervisorctl -s http://localhost:{supervisor_port} restart nodes:",
            workdir=cluster_env.work_dir,
        )
    except Exception as exc:
        LOGGER.debug(f"Failed to restart cluster nodes: {exc}")
def restart_node(node_name: str) -> None:
    """Restart single node of the running cluster."""
    LOGGER.info(f"Restarting cluster node `{node_name}`.")
    cluster_env = get_cluster_env()
    supervisor_port = (get_cluster_type().cluster_scripts.get_instance_ports(
        cluster_env.instance_num).supervisor)
    try:
        helpers.run_command(
            f"supervisorctl -s http://localhost:{supervisor_port} restart {node_name}",
            workdir=cluster_env.work_dir,
        )
    except Exception as exc:
        LOGGER.debug(f"Failed to restart cluster node `{node_name}`: {exc}")
def cli_has(command: str) -> bool:
    """Check if a cardano-cli subcommand or argument is available.

    E.g. `cli_has("query leadership-schedule --next")`
    """
    err_str = ""
    try:
        helpers.run_command(f"cardano-cli {command}")
    except AssertionError as err:
        err_str = str(err)
    else:
        return True

    cmd_err = err_str.split(":", maxsplit=1)[1].strip()
    return not cmd_err.startswith("Invalid")
def restart_nodes(node_names: List[str]) -> None:
    """Restart list of Cardano nodes of the running cluster."""
    LOGGER.info(f"Restarting cluster nodes {node_names}.")
    cluster_env = get_cluster_env()
    supervisor_port = (get_cluster_type().cluster_scripts.get_instance_ports(
        cluster_env.instance_num).supervisor)
    for node_name in node_names:
        try:
            helpers.run_command(
                f"supervisorctl -s http://localhost:{supervisor_port} restart nodes:{node_name}",
                workdir=cluster_env.work_dir,
            )
        except Exception as exc:
            LOGGER.debug(
                f"Failed to restart cluster node `{node_name}`: {exc}")
    def stop_all_clusters(self) -> None:
        """Stop all cluster instances."""
        self._log("called `stop_all_clusters`")

        # don't stop cluster if it was started outside of test framework
        if configuration.DEV_CLUSTER_RUNNING:
            LOGGER.warning(
                "Ignoring request to stop clusters as 'DEV_CLUSTER_RUNNING' is set."
            )
            return

        work_dir = cluster_nodes.get_cluster_env().work_dir

        for instance_num in range(self.num_of_instances):
            instance_dir = self.pytest_tmp_dir / f"{CLUSTER_DIR_TEMPLATE}{instance_num}"
            if (not (instance_dir / CLUSTER_RUNNING_FILE).exists()
                    or (instance_dir / CLUSTER_STOPPED_FILE).exists()):
                self._log(f"c{instance_num}: cluster instance not running")
                continue

            state_dir = work_dir / f"{cluster_nodes.STATE_CLUSTER}{instance_num}"

            stop_script = state_dir / cluster_scripts.STOP_SCRIPT
            if not stop_script.exists():
                self._log(f"c{instance_num}: stop script doesn't exist!")
                continue

            self._log(
                f"c{instance_num}: stopping cluster instance with `{stop_script}`"
            )
            try:
                helpers.run_command(str(stop_script))
            except Exception as err:
                self._log(f"c{instance_num}: failed to stop cluster:\n{err}")

            artifacts.save_start_script_coverage(
                log_file=state_dir / CLUSTER_START_CMDS_LOG,
                pytest_config=self.pytest_config,
            )
            artifacts.save_cluster_artifacts(save_dir=self.pytest_tmp_dir,
                                             state_dir=state_dir)

            shutil.rmtree(state_dir, ignore_errors=True)

            helpers.touch(instance_dir / CLUSTER_STOPPED_FILE)
            self._log(f"c{instance_num}: stopped cluster instance")
Esempio n. 8
0
def services_action(
    service_names: List[str], action: str, instance_num: Optional[int] = None
) -> None:
    """Restart list of services running on the running cluster."""
    LOGGER.info(f"Performing '{action}' action on services {service_names}.")

    if instance_num is None:
        instance_num = get_cluster_env().instance_num

    supervisor_port = get_cluster_type().cluster_scripts.get_instance_ports(instance_num).supervisor
    for service_name in service_names:
        try:
            helpers.run_command(
                f"supervisorctl -s http://localhost:{supervisor_port} {action} {service_name}"
            )
        except Exception as exc:
            LOGGER.debug(f"Failed to restart service `{service_name}`: {exc}")
def create_script_context(cluster_obj: clusterlib.ClusterLib,
                          redeemer_file: Path,
                          tx_file: Optional[Path] = None) -> None:
    """Run the `create-script-context` command (available in plutus-examples)."""
    if tx_file:
        cmd_args = [
            "create-script-context",
            "--generate-tx",
            str(tx_file),
            "--out-file",
            str(redeemer_file),
            f"--{cluster_obj.protocol}-mode",
            *cluster_obj.magic_args,
        ]
    else:
        cmd_args = ["create-script-context", "--out-file", str(redeemer_file)]

    helpers.run_command(cmd_args)
    assert redeemer_file.exists()
def _kill_supervisor(instance_num: int) -> None:
    """Kill supervisor process."""
    port = f":{cluster_instances.get_instance_ports(instance_num).supervisor}"
    netstat = helpers.run_command("netstat -plnt").decode().splitlines()
    for line in netstat:
        if port not in line:
            continue
        line = line.replace("  ", " ").strip()
        pid = line.split(" ")[-1].split("/")[0]
        os.kill(int(pid), 15)
        return
Esempio n. 11
0
 def get_cardano_version(self) -> dict:
     """Return version info for cardano-node."""
     out = helpers.run_command("cardano-node --version").decode().strip()
     env_info, git_info, *__ = out.splitlines()
     node, platform, ghc, *__ = env_info.split(" - ")
     version_db = {
         "cardano-node": node.split(" ")[-1],
         "platform": platform,
         "ghc": ghc,
         "git_rev": git_info.split(" ")[-1],
     }
     return version_db
Esempio n. 12
0
 def get_dbsync_version(self) -> dict:
     """Return version info for db-sync."""
     out = helpers.run_command(f"{configuration.DBSYNC_BIN} --version").decode().strip()
     env_info, git_info, *__ = out.splitlines()
     dbsync, platform, ghc, *__ = env_info.split(" - ")
     version_db = {
         "version": dbsync.split(" ")[-1],
         "platform": platform,
         "ghc": ghc,
         "git_rev": git_info.split(" ")[-1],
     }
     return version_db
Esempio n. 13
0
def filtered_ledger_state(cluster_obj: clusterlib.ClusterLib, ) -> str:
    """Get filtered output of `query ledger-state`."""
    cardano_cmd = " ".join([
        "cardano-cli",
        "query",
        "ledger-state",
        *cluster_obj.magic_args,
        f"--{cluster_obj.protocol}-mode",
    ])
    # get rid of a huge amount of data we don't have any use for
    cmd = (
        f"{cardano_cmd} | jq -n --stream -c "
        "'fromstream(inputs|select((length == 2 and .[0][1] == \"esLState\")|not))'"
    )

    return helpers.run_command(cmd, shell=True).decode("utf-8").strip()
Esempio n. 14
0
def services_status(
    service_names: Optional[List[str]] = None, instance_num: Optional[int] = None
) -> List[ServiceStatus]:
    """Return status info for list of services running on the running cluster (all by default)."""
    if instance_num is None:
        instance_num = get_cluster_env().instance_num

    supervisor_port = get_cluster_type().cluster_scripts.get_instance_ports(instance_num).supervisor
    service_names_arg = " ".join(service_names) if service_names else "all"

    status_out = (
        helpers.run_command(
            f"supervisorctl -s http://localhost:{supervisor_port} status {service_names_arg}",
            ignore_fail=True,
        )
        .decode()
        .strip()
        .split("\n")
    )

    statuses = []
    for status_line in status_out:
        service_name, status, *running_status = status_line.split()
        if running_status and running_status[0] == "pid":
            _pid, pid, _uptime, uptime, *other = running_status
            message = " ".join(other)
        else:
            pid, uptime = "", ""
            message = " ".join(running_status)
        statuses.append(
            ServiceStatus(
                name=service_name,
                status=status,
                pid=int(pid.rstrip(",")) if pid else None,
                uptime=uptime or None,
                message=message,
            )
        )

    return statuses
    def _restart(self,
                 start_cmd: str = "",
                 stop_cmd: str = "") -> bool:  # noqa: C901
        """Restart cluster.

        Not called under global lock!
        """
        # pylint: disable=too-many-branches
        cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE

        # don't restart cluster if it was started outside of test framework
        if configuration.DEV_CLUSTER_RUNNING:
            self.cm._log(
                f"c{self.cm.cluster_instance_num}: ignoring restart, dev cluster is running"
            )
            if cluster_running_file.exists():
                LOGGER.warning(
                    "Ignoring requested cluster restart as 'DEV_CLUSTER_RUNNING' is set."
                )
            else:
                helpers.touch(cluster_running_file)
            return True

        # fail if cluster restart is forbidden and it was already started
        if configuration.FORBID_RESTART and cluster_running_file.exists():
            raise RuntimeError(
                "Cannot restart cluster when 'FORBID_RESTART' is set.")

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: called `_restart`, start_cmd='{start_cmd}', "
            f"stop_cmd='{stop_cmd}'")

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.prepare_scripts_files(
            destdir=self.cm._create_startup_files_dir(
                self.cm.cluster_instance_num),
            instance_num=self.cm.cluster_instance_num,
            start_script=start_cmd,
            stop_script=stop_cmd,
        )

        state_dir = cluster_nodes.get_cluster_env().state_dir

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: in `_restart`, new files "
            f"start_cmd='{startup_files.start_script}', "
            f"stop_cmd='{startup_files.stop_script}'")

        excp: Optional[Exception] = None
        for i in range(2):
            if i > 0:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\nretrying"
                )
                time.sleep(0.2)

            try:
                LOGGER.info(
                    f"Stopping cluster with `{startup_files.stop_script}`.")
                helpers.run_command(str(startup_files.stop_script))
            except Exception as err:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to stop cluster:\n{err}"
                )

            # save artifacts only when produced during this test run
            if cluster_running_file.exists():
                artifacts.save_start_script_coverage(
                    log_file=state_dir / CLUSTER_START_CMDS_LOG,
                    pytest_config=self.cm.pytest_config,
                )
                artifacts.save_cluster_artifacts(
                    save_dir=self.cm.pytest_tmp_dir, state_dir=state_dir)

            shutil.rmtree(state_dir, ignore_errors=True)

            with contextlib.suppress(Exception):
                _kill_supervisor(self.cm.cluster_instance_num)

            try:
                cluster_obj = cluster_nodes.start_cluster(
                    cmd=str(startup_files.start_script),
                    args=startup_files.start_script_args)
            except Exception as err:
                LOGGER.error(f"Failed to start cluster: {err}")
                excp = err
            else:
                break
        else:
            self.cm._log(
                f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\ncluster dead"
            )
            if not configuration.IS_XDIST:
                pytest.exit(msg=f"Failed to start cluster, exception: {excp}",
                            returncode=1)
            helpers.touch(self.cm.instance_dir / CLUSTER_DEAD_FILE)
            return False

        # Create temp dir for faucet addresses data.
        # Pytest's mktemp adds number to the end of the dir name, so keep the trailing '_'
        # as separator. Resulting dir name is e.g. 'addrs_data_ci3_0'.
        tmp_path = Path(
            self.cm.tmp_path_factory.mktemp(
                f"addrs_data_ci{self.cm.cluster_instance_num}_"))
        # setup faucet addresses
        cluster_nodes.setup_test_addrs(cluster_obj=cluster_obj,
                                       destination_dir=tmp_path)

        # create file that indicates that the cluster is running
        if not cluster_running_file.exists():
            helpers.touch(cluster_running_file)

        return True
def stop_cluster(cmd: str) -> None:
    """Stop cluster."""
    LOGGER.info(f"Stopping cluster with `{cmd}`.")
    helpers.run_command(cmd, workdir=get_cluster_env().work_dir)