Example #1
0
def _testnet_cleanup(pytest_root_tmp: Path) -> None:
    """Perform testnet cleanup at the end of session."""
    if cluster_nodes.get_cluster_type(
    ).type != cluster_nodes.ClusterType.TESTNET_NOPOOLS:
        return

    # there's only one cluster instance for testnets, so we don't need to use cluster manager
    cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()

    destdir = pytest_root_tmp.parent / f"cleanup-{pytest_root_tmp.stem}-{helpers.get_rand_str(8)}"
    destdir.mkdir(parents=True, exist_ok=True)

    with helpers.change_cwd(dir_path=destdir):
        testnet_cleanup.cleanup(cluster_obj=cluster_obj,
                                location=pytest_root_tmp)
Example #2
0
    def stop_all_clusters(self) -> None:
        """Stop all cluster instances."""
        self._log("called `stop_all_clusters`")
        for instance_num in range(self.num_of_instances):
            instance_dir = self.lock_dir / f"{CLUSTER_DIR_TEMPLATE}{instance_num}"
            if (not (instance_dir / CLUSTER_RUNNING_FILE).exists()
                    or (instance_dir / CLUSTER_STOPPED_FILE).exists()):
                self._log(f"cluster instance {instance_num} not running")
                continue

            startup_files = cluster_nodes.get_cluster_type(
            ).cluster_scripts.prepare_scripts_files(
                destdir=self._create_startup_files_dir(instance_num),
                instance_num=instance_num,
            )
            cluster_nodes.set_cluster_env(instance_num)
            self._log(
                f"stopping cluster instance {instance_num} with `{startup_files.stop_script}`"
            )

            state_dir = cluster_nodes.get_cluster_env().state_dir

            try:
                cluster_nodes.stop_cluster(cmd=str(startup_files.stop_script))
            except Exception as exc:
                LOGGER.error(f"While stopping cluster: {exc}")

            cli_coverage.save_start_script_coverage(
                log_file=state_dir / CLUSTER_START_CMDS_LOG,
                pytest_config=self.pytest_config,
            )
            cluster_nodes.save_cluster_artifacts(
                artifacts_dir=self.pytest_tmp_dir, clean=True)
            open(instance_dir / CLUSTER_STOPPED_FILE, "a").close()
            self._log(f"stopped cluster instance {instance_num}")
Example #3
0
def prepare_scripts_files(
    destdir: FileType,
    scriptsdir: FileType = "",
    instance_num: int = 0,
) -> cluster_scripts.InstanceFiles:
    """Prepare scripts files for starting and stopping cluster instance."""
    start_script: FileType = ""
    stop_script: FileType = ""

    if scriptsdir:
        scriptsdir = Path(scriptsdir)
        start_script = next(scriptsdir.glob("start-cluster*"), "")
        stop_script = next(scriptsdir.glob("stop-cluster*"), "")
        if not (start_script and stop_script):
            raise RuntimeError(
                f"Start/stop scripts not found in '{scriptsdir}'")

    startup_files = cluster_nodes.get_cluster_type(
    ).cluster_scripts.prepare_scripts_files(
        destdir=destdir,
        instance_num=instance_num,
        start_script=start_script,
        stop_script=stop_script,
    )
    return startup_files
Example #4
0
def epoch_length_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
    """Update *epochLength* to 1200."""
    pytest_globaltemp = helpers.get_pytest_globaltemp(tmp_path_factory)

    # need to lock because this same fixture can run on several workers in parallel
    with helpers.FileLockIfXdist(
            f"{pytest_globaltemp}/startup_files_epoch_1200.lock"):
        destdir = pytest_globaltemp / "startup_files_epoch_1200"
        destdir.mkdir(exist_ok=True)

        # return existing script if it is already generated by other worker
        destdir_ls = list(destdir.glob("start-cluster*"))
        if destdir_ls:
            return destdir_ls[0]

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.copy_scripts_files(destdir=destdir)
        with open(startup_files.genesis_spec) as fp_in:
            genesis_spec = json.load(fp_in)

        genesis_spec["epochLength"] = 1500

        with open(startup_files.genesis_spec, "w") as fp_out:
            json.dump(genesis_spec, fp_out)

        return startup_files.start_script
Example #5
0
def slot_length_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
    """Update *slotLength* to 0.3."""
    shared_tmp = temptools.get_pytest_shared_tmp(tmp_path_factory)

    # need to lock because this same fixture can run on several workers in parallel
    with locking.FileLockIfXdist(f"{shared_tmp}/startup_files_slot_03.lock"):
        destdir = shared_tmp / "startup_files_slot_03"
        destdir.mkdir(exist_ok=True)

        # return existing script if it is already generated by other worker
        destdir_ls = list(destdir.glob("start-cluster*"))
        if destdir_ls:
            return destdir_ls[0]

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.copy_scripts_files(destdir=destdir)
        with open(startup_files.genesis_spec, encoding="utf-8") as fp_in:
            genesis_spec = json.load(fp_in)

        genesis_spec["slotLength"] = 0.3

        with open(startup_files.genesis_spec, "w", encoding="utf-8") as fp_out:
            json.dump(genesis_spec, fp_out)

        return startup_files.start_script
Example #6
0
    def _reuse_dev_cluster(self) -> clusterlib.ClusterLib:
        """Reuse cluster that was already started outside of test framework."""
        instance_num = 0
        self.cm._cluster_instance_num = instance_num
        cluster_nodes.set_cluster_env(instance_num)
        state_dir = cluster_nodes.get_cluster_env().state_dir

        # make sure instance dir exists
        instance_dir = self.cm.lock_dir / f"{CLUSTER_DIR_TEMPLATE}{instance_num}"
        instance_dir.mkdir(exist_ok=True, parents=True)

        cluster_obj = self.cm.cache.cluster_obj
        if not cluster_obj:
            cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()

        # setup faucet addresses
        if not (state_dir / cluster_nodes.ADDRS_DATA).exists():
            tmp_path = state_dir / "addrs_data"
            tmp_path.mkdir(exist_ok=True, parents=True)
            cluster_nodes.setup_test_addrs(cluster_obj, tmp_path)

        # check if it is necessary to reload data
        self._reload_cluster_obj(state_dir=state_dir)

        return cluster_obj
Example #7
0
def short_kes_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
    """Update *slotsPerKESPeriod* and *maxKESEvolutions*."""
    pytest_globaltemp = helpers.get_pytest_globaltemp(tmp_path_factory)

    # need to lock because this same fixture can run on several workers in parallel
    with helpers.FileLockIfXdist(
            f"{pytest_globaltemp}/startup_files_short_kes.lock"):
        destdir = pytest_globaltemp / "startup_files_short_kes"
        destdir.mkdir(exist_ok=True)

        # return existing script if it is already generated by other worker
        destdir_ls = list(destdir.glob("start-cluster*"))
        if destdir_ls:
            return destdir_ls[0]

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.copy_scripts_files(destdir=destdir)
        with open(startup_files.genesis_spec) as fp_in:
            genesis_spec = json.load(fp_in)

        genesis_spec["slotsPerKESPeriod"] = 700
        genesis_spec["maxKESEvolutions"] = 5

        with open(startup_files.genesis_spec, "w") as fp_out:
            json.dump(genesis_spec, fp_out)

        return startup_files.start_script
Example #8
0
def main() -> None:
    logging.basicConfig(
        format="%(name)s:%(levelname)s:%(message)s",
        level=logging.INFO,
    )
    args = get_args()

    cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()
    cleanup(cluster_obj=cluster_obj, location=args.artifacts_base_dir)
Example #9
0
    def test_available_metrics(
        self,
        wait_epochs,
    ):
        """Test that available EKG metrics matches the expected schema."""
        # pylint: disable=unused-argument
        ekg_port = (cluster_nodes.get_cluster_type().cluster_scripts.
                    get_instance_ports(
                        cluster_nodes.get_instance_num()).ekg_pool1)

        response = get_ekg_metrics(ekg_port)
        model_ekg.Model.validate(response.json())
def _kill_supervisor(instance_num: int) -> None:
    """Kill supervisor process."""
    port_num = (cluster_nodes.get_cluster_type().cluster_scripts.
                get_instance_ports(instance_num).supervisor)
    port_str = f":{port_num}"
    netstat = helpers.run_command("netstat -plnt").decode().splitlines()
    for line in netstat:
        if port_str not in line:
            continue
        line = line.replace("  ", " ").strip()
        pid = line.split()[-1].split("/")[0]
        os.kill(int(pid), 15)
        return
Example #11
0
def cluster_and_pool(
    cluster_manager: cluster_management.ClusterManager,
) -> Tuple[clusterlib.ClusterLib, str]:
    """Return instance of `clusterlib.ClusterLib`, and pool id to delegate to.

    We need to mark the pool as "in use" when requesting local cluster
    instance, that's why cluster instance and pool id are tied together in
    single fixture.
    """
    cluster_type = cluster_nodes.get_cluster_type()
    if cluster_type.type == cluster_nodes.ClusterType.TESTNET_NOPOOLS:
        cluster_obj: clusterlib.ClusterLib = cluster_manager.get()

        # getting ledger state on official testnet is too expensive,
        # use one of hardcoded pool IDs if possible
        if cluster_type.testnet_type == cluster_nodes.Testnets.testnet:  # type: ignore
            stake_pools = cluster_obj.get_stake_pools()
            for pool_id in configuration.TESTNET_POOL_IDS:
                if pool_id in stake_pools:
                    return cluster_obj, pool_id

        blocks_before = clusterlib_utils.get_blocks_before(cluster_obj)
        # sort pools by how many blocks they produce
        pool_ids_s = sorted(blocks_before, key=blocks_before.get,
                            reverse=True)  # type: ignore
        # select a pool with reasonable margin
        for pool_id in pool_ids_s:
            pool_params = cluster_obj.get_pool_params(pool_id)
            if pool_params.pool_params[
                    "margin"] <= 0.5 and not pool_params.retiring:
                break
        else:
            pytest.skip("Cannot find any usable pool.")
    elif cluster_type.type == cluster_nodes.ClusterType.TESTNET:
        # the "testnet" cluster has just single pool, "node-pool1"
        cluster_obj = cluster_manager.get(
            use_resources=[cluster_management.Resources.POOL1])
        pool_id = get_pool_id(
            cluster_obj=cluster_obj,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=cluster_management.Resources.POOL1,
        )
    else:
        cluster_obj = cluster_manager.get(
            use_resources=[cluster_management.Resources.POOL3])
        pool_id = get_pool_id(
            cluster_obj=cluster_obj,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=cluster_management.Resources.POOL3,
        )
    return cluster_obj, pool_id
Example #12
0
    def _reload_cluster_obj(self, state_dir: Path) -> None:
        """Reload cluster data if necessary."""
        addrs_data_checksum = helpers.checksum(state_dir /
                                               cluster_nodes.ADDRS_DATA)
        if addrs_data_checksum == self.cm.cache.last_checksum:
            return

        # save CLI coverage collected by the old `cluster_obj` instance
        self._save_cli_coverage()
        # replace the old `cluster_obj` instance and reload data
        self.cm.cache.cluster_obj = cluster_nodes.get_cluster_type(
        ).get_cluster_obj()
        self.cm.cache.test_data = {}
        self.cm.cache.addrs_data = cluster_nodes.load_addrs_data()
        self.cm.cache.last_checksum = addrs_data_checksum
Example #13
0
    def test_available_metrics(
        self,
        wait_epochs,
    ):
        """Test that list of available metrics == list of expected metrics."""
        # pylint: disable=unused-argument
        prometheus_port = (cluster_nodes.get_cluster_type(
        ).cluster_scripts.get_instance_ports(
            cluster_nodes.get_instance_num()).prometheus_pool1)

        response = get_prometheus_metrics(prometheus_port)

        metrics = response.text.strip().split("\n")
        metrics_keys = sorted(m.split()[0] for m in metrics)
        assert metrics_keys == self.EXPECTED_METRICS, "Metrics differ"
    def _setup_dev_cluster(self) -> None:
        """Set up cluster instance that was already started outside of test framework."""
        work_dir = cluster_nodes.get_cluster_env().work_dir
        state_dir = work_dir / f"{cluster_nodes.STATE_CLUSTER}0"
        if (state_dir / cluster_nodes.ADDRS_DATA).exists():
            return

        self.cm._log("c0: setting up dev cluster")

        # Create "addrs_data" directly in the cluster state dir, so it can be reused
        # (in normal non-`DEV_CLUSTER_RUNNING` setup we want "addrs_data" stored among
        # tests artifacts, so it can be used during cleanup etc.).
        tmp_path = state_dir / "addrs_data"
        tmp_path.mkdir(exist_ok=True, parents=True)
        cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()
        cluster_nodes.setup_test_addrs(cluster_obj=cluster_obj,
                                       destination_dir=tmp_path)
Example #15
0
def short_kes_start_cluster(tmp_path_factory: TempdirFactory) -> Path:
    """Update *slotsPerKESPeriod* and *maxKESEvolutions*."""
    shared_tmp = temptools.get_pytest_shared_tmp(tmp_path_factory)
    max_kes_evolutions = 10

    # need to lock because this same fixture can run on several workers in parallel
    with locking.FileLockIfXdist(f"{shared_tmp}/startup_files_short_kes.lock"):
        destdir = shared_tmp / "startup_files_short_kes"
        destdir.mkdir(exist_ok=True)

        # return existing script if it is already generated by other worker
        destdir_ls = list(destdir.glob("start-cluster*"))
        if destdir_ls:
            return destdir_ls[0]

        startup_files = cluster_nodes.get_cluster_type().cluster_scripts.copy_scripts_files(
            destdir=destdir
        )
        with open(startup_files.genesis_spec, encoding="utf-8") as fp_in:
            genesis_spec = json.load(fp_in)

        # KES needs to be valid at least until the local cluster is fully started.
        # We need to calculate how many slots there is from the start of Shelley epoch
        # until the cluster is fully started.
        # Assume k=10, i.e. k * 10 = 100 slots in Byron era.
        # Subtract one Byron epoch and current (last) epoch when calculating slots in
        # Shelley epochs.
        epoch_length = genesis_spec["epochLength"]
        cluster_start_time_slots = int((NUM_OF_EPOCHS - 2) * epoch_length + 100)
        exact_kes_period_slots = int(cluster_start_time_slots / max_kes_evolutions)

        genesis_spec["slotsPerKESPeriod"] = int(exact_kes_period_slots * 1.2)  # add buffer
        genesis_spec["maxKESEvolutions"] = max_kes_evolutions

        with open(startup_files.genesis_spec, "w", encoding="utf-8") as fp_out:
            json.dump(genesis_spec, fp_out)

        return startup_files.start_script
Example #16
0
class TestCLI:
    """Tests for cardano-cli."""

    TX_BODY_FILE = DATA_DIR / "test_tx_metadata_both_tx.body"
    TX_FILE = DATA_DIR / "test_tx_metadata_both_tx.signed"
    TX_OUT = DATA_DIR / "test_tx_metadata_both_tx.out"

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.testnets
    @pytest.mark.skipif(
        VERSIONS.cluster_era != VERSIONS.transaction_era,
        reason="different TX eras doesn't affect this test",
    )
    def test_protocol_mode(self, cluster: clusterlib.ClusterLib):
        """Check the default protocol mode - command works even without specifying protocol mode."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")
        cluster.cli([
            "query",
            "utxo",
            "--address",
            "addr_test1vpst87uzwafqkxumyf446zr2jsyn44cfpu9fe8yqanyuh6glj2hkl",
            *cluster.magic_args,
        ])

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.skipif(
        VERSIONS.cluster_era != VERSIONS.transaction_era,
        reason="different TX eras doesn't affect this test",
    )
    def test_whole_utxo(self, cluster: clusterlib.ClusterLib):
        """Check that it is possible to return the whole UTxO on local cluster."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")
        cluster.cli([
            "query",
            "utxo",
            "--whole-utxo",
            *cluster.magic_args,
        ])

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.skipif(
        VERSIONS.cluster_era != VERSIONS.transaction_era,
        reason="different TX eras doesn't affect this test",
    )
    @pytest.mark.skipif(
        cluster_nodes.get_cluster_type().type ==
        cluster_nodes.ClusterType.LOCAL,
        reason="supposed to run on testnet",
    )
    def test_testnet_whole_utxo(self, cluster: clusterlib.ClusterLib):
        """Check that it is possible to return the whole UTxO on testnets."""
        magic_args = " ".join(cluster.magic_args)
        helpers.run_in_bash(
            f"cardano-cli query utxo --whole-utxo {magic_args} > /dev/null")

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.testnets
    def test_tx_view(self, cluster: clusterlib.ClusterLib):
        """Check that the output of `transaction view` is as expected."""
        tx_body = cluster.view_tx(tx_body_file=self.TX_BODY_FILE)
        tx = cluster.view_tx(tx_file=self.TX_FILE)
        assert tx_body == tx

        with open(self.TX_OUT) as infile:
            tx_view_out = infile.read()
        assert tx == tx_view_out.strip()
Example #17
0
    def test_stake_snapshot(self, cluster: clusterlib.ClusterLib):  # noqa: C901
        """Test the `stake-snapshot` and `ledger-state` commands and ledger state values."""
        # pylint: disable=too-many-statements,too-many-locals,too-many-branches
        temp_template = common.get_test_id(cluster)

        # make sure the queries can be finished in single epoch
        stop = (
            20 if cluster_nodes.get_cluster_type().type == cluster_nodes.ClusterType.LOCAL else 200
        )
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-stop)

        stake_pool_ids = cluster.get_stake_pools()
        if not stake_pool_ids:
            pytest.skip("No stake pools are available.")
        if len(stake_pool_ids) > 200:
            pytest.skip("Skipping on this testnet, there's too many pools.")

        ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)
        clusterlib_utils.save_ledger_state(
            cluster_obj=cluster,
            state_name=temp_template,
            ledger_state=ledger_state,
        )
        es_snapshot: dict = ledger_state["stateBefore"]["esSnapshots"]

        def _get_hashes(snapshot: str) -> Dict[str, int]:
            hashes: Dict[str, int] = {}
            for r in es_snapshot[snapshot]["stake"]:
                r_hash_rec = r[0]
                r_hash = r_hash_rec.get("script hash") or r_hash_rec.get("key hash")
                if r_hash in hashes:
                    hashes[r_hash] += r[1]
                else:
                    hashes[r_hash] = r[1]
            return hashes

        def _get_delegations(snapshot: str) -> Dict[str, List[str]]:
            delegations: Dict[str, List[str]] = {}
            for r in es_snapshot[snapshot]["delegations"]:
                r_hash_rec = r[0]
                r_hash = r_hash_rec.get("script hash") or r_hash_rec.get("key hash")
                r_pool_id = r[1]
                if r_pool_id in delegations:
                    delegations[r_pool_id].append(r_hash)
                else:
                    delegations[r_pool_id] = [r_hash]
            return delegations

        errors = []

        ledger_state_keys = set(ledger_state)
        if ledger_state_keys != LEDGER_STATE_KEYS:
            errors.append(
                "unexpected ledger state keys: "
                f"{ledger_state_keys.difference(LEDGER_STATE_KEYS)} and "
                f"{LEDGER_STATE_KEYS.difference(ledger_state_keys)}"
            )

        # stake addresses (hashes) and corresponding amounts
        stake_mark = _get_hashes("pstakeMark")
        stake_set = _get_hashes("pstakeSet")
        stake_go = _get_hashes("pstakeGo")

        # pools (hashes) and stake addresses (hashes) delegated to corresponding pool
        delegations_mark = _get_delegations("pstakeMark")
        delegations_set = _get_delegations("pstakeSet")
        delegations_go = _get_delegations("pstakeGo")

        # all delegated stake addresses (hashes)
        delegated_hashes_mark = set(itertools.chain.from_iterable(delegations_mark.values()))
        delegated_hashes_set = set(itertools.chain.from_iterable(delegations_set.values()))
        delegated_hashes_go = set(itertools.chain.from_iterable(delegations_go.values()))

        # check if all delegated addresses are listed among stake addresses
        stake_hashes_mark = set(stake_mark)
        if not delegated_hashes_mark.issubset(stake_hashes_mark):
            errors.append(
                "for 'mark', some delegations are not listed in 'stake': "
                f"{delegated_hashes_mark.difference(stake_hashes_mark)}"
            )

        stake_hashes_set = set(stake_set)
        if not delegated_hashes_set.issubset(stake_hashes_set):
            errors.append(
                "for 'set', some delegations are not listed in 'stake': "
                f"{delegated_hashes_set.difference(stake_hashes_set)}"
            )

        stake_hashes_go = set(stake_go)
        if not delegated_hashes_go.issubset(stake_hashes_go):
            errors.append(
                "for 'go', some delegations are not listed in 'stake': "
                f"{delegated_hashes_go.difference(stake_hashes_go)}"
            )

        sum_mark = sum_set = sum_go = 0
        seen_hashes_mark: Set[str] = set()
        seen_hashes_set: Set[str] = set()
        seen_hashes_go: Set[str] = set()
        delegation_pool_ids = {*delegations_mark, *delegations_set, *delegations_go}
        for pool_id_dec in delegation_pool_ids:
            pool_id = helpers.encode_bech32(prefix="pool", data=pool_id_dec)

            # get stake info from ledger state
            pstake_hashes_mark = delegations_mark.get(pool_id_dec) or ()
            seen_hashes_mark.update(pstake_hashes_mark)
            pstake_amounts_mark = [stake_mark[h] for h in pstake_hashes_mark]
            pstake_sum_mark = functools.reduce(lambda x, y: x + y, pstake_amounts_mark, 0)

            pstake_hashes_set = delegations_set.get(pool_id_dec) or ()
            seen_hashes_set.update(pstake_hashes_set)
            pstake_amounts_set = [stake_set[h] for h in pstake_hashes_set]
            pstake_sum_set = functools.reduce(lambda x, y: x + y, pstake_amounts_set, 0)

            pstake_hashes_go = delegations_go.get(pool_id_dec) or ()
            seen_hashes_go.update(pstake_hashes_go)
            pstake_amounts_go = [stake_go[h] for h in pstake_hashes_go]
            pstake_sum_go = functools.reduce(lambda x, y: x + y, pstake_amounts_go, 0)

            # get stake info from `stake-snapshot` command
            stake_snapshot = cluster.get_stake_snapshot(stake_pool_id=pool_id)
            pstake_mark_cmd = stake_snapshot["poolStakeMark"]
            pstake_set_cmd = stake_snapshot["poolStakeSet"]
            pstake_go_cmd = stake_snapshot["poolStakeGo"]

            if pstake_sum_mark != pstake_mark_cmd:
                errors.append(f"pool: {pool_id}, mark:\n  {pstake_sum_mark} != {pstake_mark_cmd}")
            if pstake_sum_set != pstake_set_cmd:
                errors.append(f"pool: {pool_id}, set:\n  {pstake_sum_set} != {pstake_set_cmd}")
            if pstake_sum_go != pstake_go_cmd:
                errors.append(f"pool: {pool_id}, go:\n  {pstake_sum_go} != {pstake_go_cmd}")

            sum_mark += pstake_mark_cmd
            sum_set += pstake_set_cmd
            sum_go += pstake_go_cmd

        if seen_hashes_mark != delegated_hashes_mark:
            errors.append(
                "seen hashes and existing hashes differ for 'mark': "
                f"{seen_hashes_mark.difference(delegated_hashes_mark)} and "
                f"{delegated_hashes_mark.difference(seen_hashes_mark)}"
            )

        if seen_hashes_set != delegated_hashes_set:
            errors.append(
                "seen hashes and existing hashes differ for 'set': "
                f"{seen_hashes_set.difference(delegated_hashes_set)} and "
                f"{delegated_hashes_set.difference(seen_hashes_set)}"
            )

        if seen_hashes_go != delegated_hashes_go:
            errors.append(
                "seen hashes and existing hashes differ for 'go': "
                f"{seen_hashes_go.difference(delegated_hashes_go)} and "
                f"{delegated_hashes_go.difference(seen_hashes_go)}"
            )

        # active stake can be lower than sum of stakes, as some pools may not be running
        # and minting blocks
        if sum_mark < stake_snapshot["activeStakeMark"]:
            errors.append(f"active_mark: {sum_mark} < {stake_snapshot['activeStakeMark']}")
        if sum_set < stake_snapshot["activeStakeSet"]:
            errors.append(f"active_set: {sum_set} < {stake_snapshot['activeStakeSet']}")
        if sum_go < stake_snapshot["activeStakeGo"]:
            errors.append(f"active_go: {sum_go} < {stake_snapshot['activeStakeGo']}")

        if errors:
            err_joined = "\n".join(errors)
            pytest.fail(f"Errors:\n{err_joined}")
Example #18
0
    def _restart(self,
                 start_cmd: str = "",
                 stop_cmd: str = "") -> bool:  # noqa: C901
        """Restart cluster.

        Not called under global lock!
        """
        # don't restart cluster if it was started outside of test framework
        if self.cm.num_of_instances == 1 and DEV_CLUSTER_RUNNING:
            return True

        # using `_locked_log` because restart is not called under global lock
        self.cm._locked_log(
            f"c{self.cm.cluster_instance}: called `_restart`, start_cmd='{start_cmd}', "
            f"stop_cmd='{stop_cmd}'")

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.prepare_scripts_files(
            destdir=self.cm._create_startup_files_dir(
                self.cm.cluster_instance),
            instance_num=self.cm.cluster_instance,
            start_script=start_cmd,
            stop_script=stop_cmd,
        )

        self.cm._locked_log(
            f"c{self.cm.cluster_instance}: in `_restart`, new files "
            f"start_cmd='{startup_files.start_script}', "
            f"stop_cmd='{startup_files.stop_script}'")

        excp: Optional[Exception]
        for i in range(2):
            excp = None
            if i > 0:
                self.cm._locked_log(
                    f"c{self.cm.cluster_instance}: failed to start cluster, retrying"
                )
                time.sleep(0.2)

            try:
                cluster_nodes.stop_cluster(cmd=str(startup_files.stop_script))
            except Exception:
                pass

            self._restart_save_cluster_artifacts(clean=True)
            try:
                _kill_supervisor(self.cm.cluster_instance)
            except Exception:
                pass

            try:
                cluster_obj = cluster_nodes.start_cluster(
                    cmd=str(startup_files.start_script),
                    args=startup_files.start_script_args)
            except Exception as err:
                LOGGER.error(f"Failed to start cluster: {err}")
                excp = err
            else:
                break
        else:
            if not helpers.IS_XDIST:
                pytest.exit(msg=f"Failed to start cluster, exception: {excp}",
                            returncode=1)
            open(self.cm.instance_dir / CLUSTER_DEAD_FILE, "a").close()
            return False

        # setup faucet addresses
        tmp_path = Path(self.cm.tmp_path_factory.mktemp("addrs_data"))
        cluster_nodes.setup_test_addrs(cluster_obj, tmp_path)

        # create file that indicates that the cluster is running
        cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE
        if not cluster_running_file.exists():
            open(cluster_running_file, "a").close()

        return True
    def _restart(self,
                 start_cmd: str = "",
                 stop_cmd: str = "") -> bool:  # noqa: C901
        """Restart cluster.

        Not called under global lock!
        """
        # pylint: disable=too-many-branches
        cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE

        # don't restart cluster if it was started outside of test framework
        if configuration.DEV_CLUSTER_RUNNING:
            self.cm._log(
                f"c{self.cm.cluster_instance_num}: ignoring restart, dev cluster is running"
            )
            if cluster_running_file.exists():
                LOGGER.warning(
                    "Ignoring requested cluster restart as 'DEV_CLUSTER_RUNNING' is set."
                )
            else:
                helpers.touch(cluster_running_file)
            return True

        # fail if cluster restart is forbidden and it was already started
        if configuration.FORBID_RESTART and cluster_running_file.exists():
            raise RuntimeError(
                "Cannot restart cluster when 'FORBID_RESTART' is set.")

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: called `_restart`, start_cmd='{start_cmd}', "
            f"stop_cmd='{stop_cmd}'")

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.prepare_scripts_files(
            destdir=self.cm._create_startup_files_dir(
                self.cm.cluster_instance_num),
            instance_num=self.cm.cluster_instance_num,
            start_script=start_cmd,
            stop_script=stop_cmd,
        )

        state_dir = cluster_nodes.get_cluster_env().state_dir

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: in `_restart`, new files "
            f"start_cmd='{startup_files.start_script}', "
            f"stop_cmd='{startup_files.stop_script}'")

        excp: Optional[Exception] = None
        for i in range(2):
            if i > 0:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\nretrying"
                )
                time.sleep(0.2)

            try:
                LOGGER.info(
                    f"Stopping cluster with `{startup_files.stop_script}`.")
                helpers.run_command(str(startup_files.stop_script))
            except Exception as err:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to stop cluster:\n{err}"
                )

            # save artifacts only when produced during this test run
            if cluster_running_file.exists():
                artifacts.save_start_script_coverage(
                    log_file=state_dir / CLUSTER_START_CMDS_LOG,
                    pytest_config=self.cm.pytest_config,
                )
                artifacts.save_cluster_artifacts(
                    save_dir=self.cm.pytest_tmp_dir, state_dir=state_dir)

            shutil.rmtree(state_dir, ignore_errors=True)

            with contextlib.suppress(Exception):
                _kill_supervisor(self.cm.cluster_instance_num)

            try:
                cluster_obj = cluster_nodes.start_cluster(
                    cmd=str(startup_files.start_script),
                    args=startup_files.start_script_args)
            except Exception as err:
                LOGGER.error(f"Failed to start cluster: {err}")
                excp = err
            else:
                break
        else:
            self.cm._log(
                f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\ncluster dead"
            )
            if not configuration.IS_XDIST:
                pytest.exit(msg=f"Failed to start cluster, exception: {excp}",
                            returncode=1)
            helpers.touch(self.cm.instance_dir / CLUSTER_DEAD_FILE)
            return False

        # Create temp dir for faucet addresses data.
        # Pytest's mktemp adds number to the end of the dir name, so keep the trailing '_'
        # as separator. Resulting dir name is e.g. 'addrs_data_ci3_0'.
        tmp_path = Path(
            self.cm.tmp_path_factory.mktemp(
                f"addrs_data_ci{self.cm.cluster_instance_num}_"))
        # setup faucet addresses
        cluster_nodes.setup_test_addrs(cluster_obj=cluster_obj,
                                       destination_dir=tmp_path)

        # create file that indicates that the cluster is running
        if not cluster_running_file.exists():
            helpers.touch(cluster_running_file)

        return True
 def ports(self) -> cluster_scripts.InstancePorts:
     """Return port mappings for current cluster instance."""
     return cluster_nodes.get_cluster_type(
     ).cluster_scripts.get_instance_ports(self.cluster_instance_num)
class TestDelegateAddr:
    """Tests for address delegation to stake pools."""
    @allure.link(helpers.get_vcs_link())
    @pytest.mark.parametrize(
        "use_build_cmd",
        (
            False,
            pytest.param(
                True,
                marks=pytest.mark.skipif(not common.BUILD_USABLE,
                                         reason=common.BUILD_SKIP_MSG),
            ),
        ),
        ids=("build_raw", "build"),
    )
    @pytest.mark.dbsync
    @pytest.mark.smoke
    def test_delegate_using_pool_id(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
        use_build_cmd: bool,
    ):
        """Submit registration certificate and delegate to pool using pool id.

        * register stake address and delegate it to pool
        * check that the stake address was delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
            use_build_cmd=use_build_cmd,
        )

        tx_db_record = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_record,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.parametrize(
        "use_build_cmd",
        (
            False,
            pytest.param(
                True,
                marks=pytest.mark.skipif(not common.BUILD_USABLE,
                                         reason=common.BUILD_SKIP_MSG),
            ),
        ),
        ids=("build_raw", "build"),
    )
    @pytest.mark.dbsync
    @pytest.mark.smoke
    @pytest.mark.skipif(
        cluster_nodes.get_cluster_type().type ==
        cluster_nodes.ClusterType.TESTNET_NOPOOLS,
        reason="supposed to run on cluster with pools",
    )
    def test_delegate_using_vkey(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_use_pool1: clusterlib.ClusterLib,
        use_build_cmd: bool,
    ):
        """Submit registration certificate and delegate to pool using cold vkey.

        * register stake address and delegate it to pool
        * check that the stake address was delegated
        * (optional) check records in db-sync
        """
        pool_name = cluster_management.Resources.POOL1
        cluster = cluster_use_pool1
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        node_cold = cluster_manager.cache.addrs_data[pool_name][
            "cold_key_pair"]
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            cold_vkey=node_cold.vkey_file,
            use_build_cmd=use_build_cmd,
        )

        tx_db_record = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_record,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.order(7)
    @pytest.mark.dbsync
    @pytest.mark.long
    def test_deregister(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Deregister stake address.

        * create two payment addresses that share single stake address
        * register and delegate the stake address to pool
        * attempt to deregister the stake address - deregistration is expected to fail
          because there are rewards in the stake address
        * withdraw rewards to payment address and deregister stake address
        * check that the key deposit was returned and rewards withdrawn
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        # create two payment addresses that share single stake address (just to test that
        # delegation works as expected even under such circumstances)
        stake_addr_rec = clusterlib_utils.create_stake_addr_records(
            f"{temp_template}_addr0", cluster_obj=cluster)[0]
        payment_addr_recs = clusterlib_utils.create_payment_addr_records(
            f"{temp_template}_addr0",
            f"{temp_template}_addr1",
            cluster_obj=cluster,
            stake_vkey_file=stake_addr_rec.vkey_file,
        )

        # fund payment address
        clusterlib_utils.fund_from_faucet(
            *payment_addr_recs,
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        pool_user = clusterlib.PoolUser(payment=payment_addr_recs[1],
                                        stake=stake_addr_rec)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_user=pool_user,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        if tx_db_deleg:
            # check in db-sync that both payment addresses share single stake address
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[0].address).stake_address ==
                    stake_addr_rec.address)
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[1].address).stake_address ==
                    stake_addr_rec.address)

        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering stake address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        # attempt to deregister the stake address - deregistration is expected to fail
        # because there are rewards in the stake address
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.send_tx(
                src_address=src_address,
                tx_name=f"{temp_template}_dereg_fail",
                tx_files=tx_files_deregister,
            )
        assert "StakeKeyNonZeroAccountBalanceDELEG" in str(excinfo.value)

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address, deregister stake address
        tx_raw_deregister_output = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_dereg_withdraw",
            tx_files=tx_files_deregister,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was returned and rewards withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_deregister_output.fee + reward_balance +
            cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        tx_db_dereg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=tx_raw_deregister_output)
        if tx_db_dereg:
            assert delegation_out.pool_user.stake.address in tx_db_dereg.stake_deregistration
            assert (
                cluster.get_address_balance(src_address) ==
                dbsync_utils.get_utxo(address=src_address).amount_sum
            ), f"Unexpected balance for source address `{src_address}` in db-sync"

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.order(7)
    @pytest.mark.dbsync
    @pytest.mark.long
    def test_undelegate(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Undelegate stake address.

        * submit registration certificate and delegate to pool
        * wait for first reward
        * undelegate stake address:

           - withdraw rewards to payment address
           - deregister stake address
           - re-register stake address

        * check that the key deposit was not returned
        * check that rewards were withdrawn
        * check that the stake address is still registered
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        # check records in db-sync
        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering / re-registering stake address
        stake_addr_dereg_cert_file = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_undeleg = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_dereg_cert_file, stake_addr_reg_cert_file
            ],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address; deregister and re-register stake address
        tx_raw_undeleg = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_undeleg_withdraw",
            tx_files=tx_files_undeleg,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was NOT returned and rewards were withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_undeleg.fee + reward_balance
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert stake_addr_info.address, f"Reward address is not registered: {stake_addr_info}"
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        this_epoch = cluster.wait_for_new_epoch(padding_seconds=20)
        assert cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address
        ).reward_account_balance, "No reward was received next epoch after undelegation"

        # check `transaction view` command
        tx_view.check_tx_view(cluster_obj=cluster,
                              tx_raw_output=tx_raw_undeleg)

        # check records in db-sync
        tx_db_undeleg = dbsync_utils.check_tx(cluster_obj=cluster,
                                              tx_raw_output=tx_raw_undeleg)
        if tx_db_undeleg:
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_deregistration
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_registration

            db_rewards = dbsync_utils.check_address_reward(
                address=delegation_out.pool_user.stake.address,
                epoch_from=init_epoch)
            assert db_rewards
            db_reward_epochs = sorted(r.spendable_epoch
                                      for r in db_rewards.rewards)
            assert db_reward_epochs[0] == init_epoch + 4
            assert this_epoch in db_reward_epochs

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.parametrize(
        "use_build_cmd",
        (
            False,
            pytest.param(
                True,
                marks=pytest.mark.skipif(not common.BUILD_USABLE,
                                         reason=common.BUILD_SKIP_MSG),
            ),
        ),
        ids=("build_raw", "build"),
    )
    @pytest.mark.dbsync
    @pytest.mark.smoke
    def test_addr_registration_deregistration(
        self,
        cluster: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
        pool_users_disposable: List[clusterlib.PoolUser],
        use_build_cmd: bool,
    ):
        """Submit registration and deregistration certificates in single TX.

        * create stake address registration cert
        * create stake address deregistration cert
        * register and deregister stake address in single TX
        * check that the balance for source address was correctly updated and that key deposit
          was not needed
        * (optional) check records in db-sync
        """
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        user_registered = pool_users_disposable[0]
        user_payment = pool_users[0].payment
        src_init_balance = cluster.get_address_balance(user_payment.address)

        # create stake address registration cert
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # create stake address deregistration cert
        stake_addr_dereg_cert_file = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # register and deregister stake address in single TX
        tx_files = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_reg_cert_file,
                stake_addr_dereg_cert_file,
            ] * 3,
            signing_key_files=[
                user_payment.skey_file, user_registered.stake.skey_file
            ],
        )

        if use_build_cmd:
            tx_raw_output = cluster.build_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_reg_deleg",
                tx_files=tx_files,
                fee_buffer=2_000_000,
                deposit=0,
                witness_override=len(tx_files.signing_key_files),
            )
            tx_signed = cluster.sign_tx(
                tx_body_file=tx_raw_output.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=f"{temp_template}_reg_deleg",
            )
            cluster.submit_tx(tx_file=tx_signed, txins=tx_raw_output.txins)
        else:
            tx_raw_output = cluster.send_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_reg_dereg",
                tx_files=tx_files,
                deposit=0,
            )

        # check that the balance for source address was correctly updated and that key deposit
        # was not needed
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_init_balance - tx_raw_output.fee
        ), f"Incorrect balance for source address `{user_payment.address}`"

        tx_db_record = dbsync_utils.check_tx(cluster_obj=cluster,
                                             tx_raw_output=tx_raw_output)
        if tx_db_record:
            assert user_registered.stake.address in tx_db_record.stake_registration
            assert user_registered.stake.address in tx_db_record.stake_deregistration

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.parametrize(
        "use_build_cmd",
        (
            False,
            pytest.param(
                True,
                marks=pytest.mark.skipif(not common.BUILD_USABLE,
                                         reason=common.BUILD_SKIP_MSG),
            ),
        ),
        ids=("build_raw", "build"),
    )
    @pytest.mark.dbsync
    @pytest.mark.smoke
    def test_addr_delegation_deregistration(
        self,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
        pool_users: List[clusterlib.PoolUser],
        pool_users_disposable: List[clusterlib.PoolUser],
        use_build_cmd: bool,
    ):
        """Submit delegation and deregistration certificates in single TX.

        * create stake address registration cert
        * create stake address deregistration cert
        * register stake address
        * create stake address delegation cert
        * delegate and deregister stake address in single TX
        * check that the balance for source address was correctly updated and that the key
          deposit was returned
        * check that the stake address was NOT delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        user_registered = pool_users_disposable[0]
        user_payment = pool_users[0].payment
        src_init_balance = cluster.get_address_balance(user_payment.address)

        # create stake address registration cert
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # create stake address deregistration cert
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # register stake address
        tx_files = clusterlib.TxFiles(
            certificate_files=[stake_addr_reg_cert_file],
            signing_key_files=[user_payment.skey_file],
        )
        tx_raw_output_reg = cluster.send_tx(
            src_address=user_payment.address,
            tx_name=f"{temp_template}_reg",
            tx_files=tx_files,
        )

        tx_db_reg = dbsync_utils.check_tx(cluster_obj=cluster,
                                          tx_raw_output=tx_raw_output_reg)
        if tx_db_reg:
            assert user_registered.stake.address in tx_db_reg.stake_registration

        # check that the balance for source address was correctly updated
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_init_balance -
            tx_raw_output_reg.fee - cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{user_payment.address}`"

        src_registered_balance = cluster.get_address_balance(
            user_payment.address)

        # create stake address delegation cert
        stake_addr_deleg_cert_file = cluster.gen_stake_addr_delegation_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file,
            stake_pool_id=pool_id,
        )

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # delegate and deregister stake address in single TX
        tx_files = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_deleg_cert_file, stake_addr_dereg_cert
            ],
            signing_key_files=[
                user_payment.skey_file, user_registered.stake.skey_file
            ],
        )

        if use_build_cmd:
            tx_raw_output_deleg = cluster.build_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_deleg_dereg",
                tx_files=tx_files,
                fee_buffer=2_000_000,
                witness_override=len(tx_files.signing_key_files),
            )
            tx_signed = cluster.sign_tx(
                tx_body_file=tx_raw_output_deleg.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=f"{temp_template}_deleg_dereg",
            )
            cluster.submit_tx(tx_file=tx_signed,
                              txins=tx_raw_output_deleg.txins)
        else:
            tx_raw_output_deleg = cluster.send_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_deleg_dereg",
                tx_files=tx_files,
            )

        # check that the balance for source address was correctly updated and that the key
        # deposit was returned
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_registered_balance -
            tx_raw_output_deleg.fee + cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{user_payment.address}`"

        # check that the stake address was NOT delegated
        stake_addr_info = cluster.get_stake_addr_info(
            user_registered.stake.address)
        assert not stake_addr_info.delegation, f"Stake address was delegated: {stake_addr_info}"

        tx_db_deleg = dbsync_utils.check_tx(cluster_obj=cluster,
                                            tx_raw_output=tx_raw_output_deleg)
        if tx_db_deleg:
            assert user_registered.stake.address in tx_db_deleg.stake_deregistration
            assert user_registered.stake.address == tx_db_deleg.stake_delegation[
                0].address
            assert tx_db_deleg.stake_delegation[
                0].active_epoch_no == init_epoch + 2
            assert pool_id == tx_db_deleg.stake_delegation[0].pool_id

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.parametrize(
        "use_build_cmd",
        (
            False,
            pytest.param(
                True,
                marks=pytest.mark.skipif(not common.BUILD_USABLE,
                                         reason=common.BUILD_SKIP_MSG),
            ),
        ),
        ids=("build_raw", "build"),
    )
    @pytest.mark.dbsync
    @pytest.mark.smoke
    def test_addr_registration_certificate_order(
        self,
        cluster: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
        pool_users_disposable: List[clusterlib.PoolUser],
        use_build_cmd: bool,
    ):
        """Submit (de)registration certificates in single TX and check that the order matter.

        * create stake address registration cert
        * create stake address deregistration cert
        * register, deregister, register, deregister and register stake address in single TX
        * check that the address is registered
        * check that the balance for source address was correctly updated and that key deposit
          was needed
        * (optional) check records in db-sync
        """
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        user_registered = pool_users_disposable[0]
        user_payment = pool_users[0].payment
        src_init_balance = cluster.get_address_balance(user_payment.address)

        # create stake address registration cert
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # create stake address deregistration cert
        stake_addr_dereg_cert_file = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # register, deregister, register, deregister and register stake address in single TX
        # prove that the order matters
        tx_files = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_reg_cert_file,
                stake_addr_dereg_cert_file,
                stake_addr_reg_cert_file,
                stake_addr_dereg_cert_file,
                stake_addr_reg_cert_file,
            ],
            signing_key_files=[
                user_payment.skey_file, user_registered.stake.skey_file
            ],
        )

        deposit = cluster.get_address_deposit()

        if use_build_cmd:
            tx_raw_output = cluster.build_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_reg_dereg_cert_order",
                tx_files=tx_files,
                fee_buffer=2_000_000,
                witness_override=len(tx_files.signing_key_files),
                deposit=deposit,
            )
            tx_signed = cluster.sign_tx(
                tx_body_file=tx_raw_output.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=f"{temp_template}_reg_dereg_cert_order",
            )
            cluster.submit_tx(tx_file=tx_signed, txins=tx_raw_output.txins)
        else:
            tx_raw_output = cluster.send_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_reg_dereg",
                tx_files=tx_files,
                deposit=deposit,
            )

        # check that the stake address is registered
        assert cluster.get_stake_addr_info(
            user_registered.stake.address).address

        # check that the balance for source address was correctly updated and that key deposit
        # was needed
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_init_balance - tx_raw_output.fee -
            deposit
        ), f"Incorrect balance for source address `{user_payment.address}`"

        tx_db_record = dbsync_utils.check_tx(cluster_obj=cluster,
                                             tx_raw_output=tx_raw_output)
        if tx_db_record:
            assert user_registered.stake.address in tx_db_record.stake_registration
            assert user_registered.stake.address in tx_db_record.stake_deregistration
Example #22
0
    def get(  # noqa: C901
        self,
        singleton: bool = False,
        mark: str = "",
        lock_resources: UnpackableSequence = (),
        use_resources: UnpackableSequence = (),
        cleanup: bool = False,
        start_cmd: str = "",
    ) -> clusterlib.ClusterLib:
        """Return the `clusterlib.ClusterLib` instance once we can start the test.

        It checks current conditions and waits if the conditions don't allow to start the test
        right away.
        """
        # pylint: disable=too-many-statements,too-many-branches,too-many-locals

        # don't start new cluster if it was already started outside of test framework
        if DEV_CLUSTER_RUNNING:
            if start_cmd:
                LOGGER.warning(
                    f"Ignoring the '{start_cmd}' cluster start command as "
                    "'DEV_CLUSTER_RUNNING' is set.")
            return self._reuse_dev_cluster()

        if FORBID_RESTART and start_cmd:
            raise RuntimeError(
                "Cannot use custom start command when 'FORBID_RESTART' is set."
            )

        selected_instance = -1
        restart_here = False
        restart_ready = False
        first_iteration = True
        sleep_delay = 1
        marked_tests_cache: Dict[int, MarkedTestsStatus] = {}

        if start_cmd:
            if not (singleton or mark):
                raise AssertionError(
                    "Custom start command can be used only together with `singleton` or `mark`"
                )
            # always clean after test(s) that started cluster with custom configuration
            cleanup = True

        # iterate until it is possible to start the test
        while True:
            if restart_ready:
                self._restart(start_cmd=start_cmd)

            if not first_iteration:
                helpers.xdist_sleep(random.random() * sleep_delay)

            # nothing time consuming can go under this lock as it will block all other workers
            with helpers.FileLockIfXdist(self.cm.cluster_lock):
                test_on_worker = list(
                    self.cm.lock_dir.glob(
                        f"{CLUSTER_DIR_TEMPLATE}*/{TEST_RUNNING_GLOB}_{self.cm.worker_id}"
                    ))

                # test is already running, nothing to set up
                if (first_iteration and test_on_worker
                        and self.cm._cluster_instance_num != -1
                        and self.cm.cache.cluster_obj):
                    self.cm._log(f"{test_on_worker[0]} already exists")
                    return self.cm.cache.cluster_obj

                first_iteration = False  # needs to be set here, before the first `continue`
                self.cm._cluster_instance_num = -1

                # try all existing cluster instances
                for instance_num in range(self.cm.num_of_instances):
                    # if instance to run the test on was already decided, skip all other instances
                    # pylint: disable=consider-using-in
                    if selected_instance != -1 and instance_num != selected_instance:
                        continue

                    instance_dir = self.cm.lock_dir / f"{CLUSTER_DIR_TEMPLATE}{instance_num}"
                    instance_dir.mkdir(exist_ok=True)

                    # if the selected instance failed to start, move on to other instance
                    if (instance_dir / CLUSTER_DEAD_FILE).exists():
                        selected_instance = -1
                        restart_here = False
                        restart_ready = False
                        # remove status files that are checked by other workers
                        for sf in (
                                *instance_dir.glob(f"{TEST_CURR_MARK_GLOB}_*"),
                                *instance_dir.glob(
                                    f"{TEST_MARK_STARTING_GLOB}_*"),
                        ):
                            os.remove(sf)

                        dead_clusters = list(
                            self.cm.lock_dir.glob(
                                f"{CLUSTER_DIR_TEMPLATE}*/{CLUSTER_DEAD_FILE}")
                        )
                        if len(dead_clusters) == self.cm.num_of_instances:
                            raise RuntimeError(
                                "All clusters are dead, cannot run.")
                        continue

                    # singleton test is running, so no other test can be started
                    if (instance_dir / TEST_SINGLETON_FILE).exists():
                        self.cm._log(
                            f"c{instance_num}: singleton test in progress, cannot run"
                        )
                        sleep_delay = 5
                        continue

                    restart_in_progress = list(
                        instance_dir.glob(f"{RESTART_IN_PROGRESS_GLOB}_*"))
                    # cluster restart planned, no new tests can start
                    if not restart_here and restart_in_progress:
                        # no log message here, it would be too many of them
                        sleep_delay = 5
                        continue

                    started_tests = list(
                        instance_dir.glob(f"{TEST_RUNNING_GLOB}_*"))

                    # "marked tests" = group of tests marked with a specific mark.
                    # While these tests are running, no unmarked test can start.
                    marked_starting = list(
                        instance_dir.glob(f"{TEST_MARK_STARTING_GLOB}_*"))
                    marked_running = list(
                        instance_dir.glob(f"{TEST_CURR_MARK_GLOB}_*"))

                    if mark:
                        marked_running_my = (
                            instance_dir /
                            f"{TEST_CURR_MARK_GLOB}_{mark}").exists()
                        marked_starting_my = list(
                            instance_dir.glob(
                                f"{TEST_MARK_STARTING_GLOB}_{mark}_*"))

                        marked_running_my_anywhere = list(
                            self.cm.lock_dir.glob(
                                f"{CLUSTER_DIR_TEMPLATE}*/{TEST_CURR_MARK_GLOB}_{mark}"
                            ))
                        # check if tests with my mark are running on some other cluster instance
                        if not marked_running_my and marked_running_my_anywhere:
                            self.cm._log(
                                f"c{instance_num}: tests marked with my mark '{mark}' "
                                "already running on other cluster instance, cannot run"
                            )
                            continue

                        marked_starting_my_anywhere = list(
                            self.cm.lock_dir.glob(
                                f"{CLUSTER_DIR_TEMPLATE}*/{TEST_MARK_STARTING_GLOB}_{mark}_*"
                            ))
                        # check if tests with my mark are starting on some other cluster instance
                        if not marked_starting_my and marked_starting_my_anywhere:
                            self.cm._log(
                                f"c{instance_num}: tests marked with my mark '{mark}' starting "
                                "on other cluster instance, cannot run")
                            continue

                        # check if this test has the same mark as currently running marked tests
                        if marked_running_my or marked_starting_my:
                            # lock to this cluster instance
                            selected_instance = instance_num
                        elif marked_running or marked_starting:
                            self.cm._log(
                                f"c{instance_num}: tests marked with other mark starting "
                                f"or running, I have different mark '{mark}'")
                            continue

                        # check if needs to wait until marked tests can run
                        if marked_starting_my and started_tests:
                            self.cm._log(
                                f"c{instance_num}: unmarked tests running, wants to start '{mark}'"
                            )
                            sleep_delay = 2
                            continue

                    # no unmarked test can run while marked tests are starting or running
                    elif marked_running or marked_starting:
                        self.cm._log(
                            f"c{instance_num}: marked tests starting or running, "
                            f"I don't have mark")
                        sleep_delay = 5
                        continue

                    # is this the first marked test that wants to run?
                    initial_marked_test = bool(mark and not marked_running)

                    # indicate that it is planned to start marked tests as soon as
                    # all currently running tests are finished or the cluster is restarted
                    if initial_marked_test:
                        # lock to this cluster instance
                        selected_instance = instance_num
                        mark_starting_file = (
                            instance_dir /
                            f"{TEST_MARK_STARTING_GLOB}_{mark}_{self.cm.worker_id}"
                        )
                        if not mark_starting_file.exists():
                            open(
                                mark_starting_file,
                                "a",
                            ).close()
                        if started_tests:
                            self.cm._log(
                                f"c{instance_num}: unmarked tests running, wants to start '{mark}'"
                            )
                            sleep_delay = 3
                            continue

                    # get marked tests status
                    marked_tests_status = self._get_marked_tests_status(
                        cache=marked_tests_cache, instance_num=instance_num)

                    # marked tests are already running
                    if marked_running:
                        active_mark_file = marked_running[0].name

                        # update marked tests status
                        self._update_marked_tests(
                            marked_tests_status=marked_tests_status,
                            active_mark_name=active_mark_file,
                            started_tests=started_tests,
                            instance_num=instance_num,
                        )

                        self.cm._log(
                            f"c{instance_num}: in marked tests branch, "
                            f"I have required mark '{mark}'")

                    # reset counter of cycles with no marked test running
                    marked_tests_status.no_marked_tests_iter = 0

                    # this test is a singleton - no other test can run while this one is running
                    if singleton and started_tests:
                        self.cm._log(
                            f"c{instance_num}: tests are running, cannot start singleton"
                        )
                        sleep_delay = 5
                        continue

                    # this test wants to lock some resources, check if these are not
                    # locked or in use
                    if lock_resources:
                        res_usable = self._are_resources_usable(
                            resources=lock_resources,
                            instance_dir=instance_dir,
                            instance_num=instance_num,
                        )
                        if not res_usable:
                            sleep_delay = 5
                            continue

                    # filter out `lock_resources` from the list of `use_resources`
                    if use_resources and lock_resources:
                        use_resources = list(
                            set(use_resources) - set(lock_resources))

                    # this test wants to use some resources, check if these are not locked
                    if use_resources:
                        res_locked = self._are_resources_locked(
                            resources=use_resources,
                            instance_dir=instance_dir,
                            instance_num=instance_num,
                        )
                        if res_locked:
                            sleep_delay = 5
                            continue

                    # indicate that the cluster will be restarted
                    new_cmd_restart = bool(start_cmd and
                                           (initial_marked_test or singleton))
                    if not restart_here and (
                            new_cmd_restart
                            or self._is_restart_needed(instance_num)):
                        if started_tests:
                            self.cm._log(
                                f"c{instance_num}: tests are running, cannot restart"
                            )
                            continue

                        # Cluster restart will be performed by this worker.
                        # By setting `restart_here`, we make sure this worker continue on
                        # this cluster instance after restart. It is important because
                        # the `start_cmd` used for starting the cluster might be speciffic
                        # to the test.
                        restart_here = True
                        self.cm._log(
                            f"c{instance_num}: setting to restart cluster")
                        selected_instance = instance_num
                        restart_in_progress_file = (
                            instance_dir /
                            f"{RESTART_IN_PROGRESS_GLOB}_{self.cm.worker_id}")
                        if not restart_in_progress_file.exists():
                            open(restart_in_progress_file, "a").close()

                    # we've found suitable cluster instance
                    selected_instance = instance_num
                    self.cm._cluster_instance_num = instance_num
                    cluster_nodes.set_cluster_env(instance_num)

                    if restart_here:
                        if restart_ready:
                            # The cluster was already restarted if we are here and
                            # `restart_ready` is still True.
                            restart_ready = False

                            # Remove status files that are no longer valid after restart.
                            for f in instance_dir.glob(
                                    f"{RESTART_IN_PROGRESS_GLOB}_*"):
                                os.remove(f)
                            for f in instance_dir.glob(
                                    f"{RESTART_NEEDED_GLOB}_*"):
                                os.remove(f)
                        else:
                            self.cm._log(f"c{instance_num}: calling restart")
                            # the actual `_restart` function will be called outside
                            # of global lock
                            restart_ready = True
                            continue

                    # from this point on, all conditions needed to start the test are met

                    # this test is a singleton
                    if singleton:
                        self.cm._log(f"c{instance_num}: starting singleton")
                        open(self.cm.instance_dir / TEST_SINGLETON_FILE,
                             "a").close()

                    # this test is a first marked test
                    if initial_marked_test:
                        self.cm._log(
                            f"c{instance_num}: starting '{mark}' tests")
                        open(
                            self.cm.instance_dir /
                            f"{TEST_CURR_MARK_GLOB}_{mark}", "a").close()
                        for sf in marked_starting:
                            os.remove(sf)

                    # create status file for each in-use resource
                    _ = [
                        open(
                            self.cm.instance_dir /
                            f"{RESOURCE_IN_USE_GLOB}_{r}_{self.cm.worker_id}",
                            "a",
                        ).close() for r in use_resources
                    ]

                    # create status file for each locked resource
                    _ = [
                        open(
                            self.cm.instance_dir /
                            f"{RESOURCE_LOCKED_GLOB}_{r}_{self.cm.worker_id}",
                            "a",
                        ).close() for r in lock_resources
                    ]

                    # cleanup = cluster restart after test (group of tests) is finished
                    if cleanup:
                        # cleanup after group of test that are marked with a marker
                        if mark:
                            self.cm._log(f"c{instance_num}: cleanup and mark")
                            open(
                                self.cm.instance_dir /
                                f"{RESTART_AFTER_MARK_GLOB}_{self.cm.worker_id}",
                                "a",
                            ).close()
                        # cleanup after single test (e.g. singleton)
                        else:
                            self.cm._log(
                                f"c{instance_num}: cleanup and not mark")
                            open(
                                self.cm.instance_dir /
                                f"{RESTART_NEEDED_GLOB}_{self.cm.worker_id}",
                                "a",
                            ).close()

                    break
                else:
                    # if the test cannot start on any instance, return to top-level loop
                    continue

                test_running_file = (
                    self.cm.instance_dir /
                    f"{TEST_RUNNING_GLOB}_{self.cm.worker_id}")
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: creating {test_running_file}"
                )
                open(test_running_file, "a").close()

                # check if it is necessary to reload data
                state_dir = cluster_nodes.get_cluster_env().state_dir
                self._reload_cluster_obj(state_dir=state_dir)

                cluster_obj = self.cm.cache.cluster_obj
                if not cluster_obj:
                    cluster_obj = cluster_nodes.get_cluster_type(
                    ).get_cluster_obj()

                # `cluster_obj` is ready, we can start the test
                break

        return cluster_obj
Example #23
0
    def _restart(self,
                 start_cmd: str = "",
                 stop_cmd: str = "") -> bool:  # noqa: C901
        """Restart cluster.

        Not called under global lock!
        """
        # pylint: disable=too-many-branches
        cluster_running_file = self.cm.instance_dir / CLUSTER_RUNNING_FILE

        # don't restart cluster if it was started outside of test framework
        if DEV_CLUSTER_RUNNING:
            if cluster_running_file.exists():
                LOGGER.warning(
                    "Ignoring requested cluster restart as 'DEV_CLUSTER_RUNNING' is set."
                )
            else:
                open(cluster_running_file, "a").close()
            return True

        # fail if cluster restart is forbidden and it was already started
        if FORBID_RESTART and cluster_running_file.exists():
            raise RuntimeError(
                "Cannot restart cluster when 'FORBID_RESTART' is set.")

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: called `_restart`, start_cmd='{start_cmd}', "
            f"stop_cmd='{stop_cmd}'")

        startup_files = cluster_nodes.get_cluster_type(
        ).cluster_scripts.prepare_scripts_files(
            destdir=self.cm._create_startup_files_dir(
                self.cm.cluster_instance_num),
            instance_num=self.cm.cluster_instance_num,
            start_script=start_cmd,
            stop_script=stop_cmd,
        )

        state_dir = cluster_nodes.get_cluster_env().state_dir

        self.cm._log(
            f"c{self.cm.cluster_instance_num}: in `_restart`, new files "
            f"start_cmd='{startup_files.start_script}', "
            f"stop_cmd='{startup_files.stop_script}'")

        excp: Optional[Exception] = None
        for i in range(2):
            if i > 0:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\nretrying"
                )
                time.sleep(0.2)

            try:
                cluster_nodes.stop_cluster(cmd=str(startup_files.stop_script))
            except Exception as err:
                self.cm._log(
                    f"c{self.cm.cluster_instance_num}: failed to stop cluster:\n{err}"
                )

            # save artifacts only when produced during this test run
            if cluster_running_file.exists():
                cli_coverage.save_start_script_coverage(
                    log_file=state_dir / CLUSTER_START_CMDS_LOG,
                    pytest_config=self.cm.pytest_config,
                )
                self._restart_save_cluster_artifacts(clean=True)

            try:
                _kill_supervisor(self.cm.cluster_instance_num)
            except Exception:
                pass

            try:
                cluster_obj = cluster_nodes.start_cluster(
                    cmd=str(startup_files.start_script),
                    args=startup_files.start_script_args)
            except Exception as err:
                LOGGER.error(f"Failed to start cluster: {err}")
                excp = err
            else:
                break
        else:
            self.cm._log(
                f"c{self.cm.cluster_instance_num}: failed to start cluster:\n{excp}\ncluster dead"
            )
            if not helpers.IS_XDIST:
                pytest.exit(msg=f"Failed to start cluster, exception: {excp}",
                            returncode=1)
            open(self.cm.instance_dir / CLUSTER_DEAD_FILE, "a").close()
            return False

        # setup faucet addresses
        tmp_path = Path(self.cm.tmp_path_factory.mktemp("addrs_data"))
        cluster_nodes.setup_test_addrs(cluster_obj, tmp_path)

        # create file that indicates that the cluster is running
        if not cluster_running_file.exists():
            open(cluster_running_file, "a").close()

        return True
Example #24
0
class TestCLI:
    """Tests for cardano-cli."""

    TX_BODY_FILE = DATA_DIR / "test_tx_metadata_both_tx.body"
    TX_FILE = DATA_DIR / "test_tx_metadata_both_tx.signed"
    TX_BODY_OUT = DATA_DIR / "test_tx_metadata_both_tx_body.out"
    TX_OUT = DATA_DIR / "test_tx_metadata_both_tx.out"

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.testnets
    @pytest.mark.skipif(
        VERSIONS.transaction_era != VERSIONS.DEFAULT_TX_ERA,
        reason="different TX eras doesn't affect this test",
    )
    def test_protocol_mode(self, cluster: clusterlib.ClusterLib):
        """Check the default protocol mode - command works even without specifying protocol mode."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")

        common.get_test_id(cluster)

        cluster.cli(
            [
                "query",
                "utxo",
                "--address",
                "addr_test1vpst87uzwafqkxumyf446zr2jsyn44cfpu9fe8yqanyuh6glj2hkl",
                *cluster.magic_args,
            ]
        )

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.skipif(
        VERSIONS.transaction_era != VERSIONS.DEFAULT_TX_ERA,
        reason="different TX eras doesn't affect this test",
    )
    def test_whole_utxo(self, cluster: clusterlib.ClusterLib):
        """Check that it is possible to return the whole UTxO on local cluster."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")

        common.get_test_id(cluster)

        cluster.cli(
            [
                "query",
                "utxo",
                "--whole-utxo",
                *cluster.magic_args,
            ]
        )

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.skipif(
        VERSIONS.transaction_era != VERSIONS.DEFAULT_TX_ERA,
        reason="different TX eras doesn't affect this test",
    )
    @pytest.mark.skipif(
        cluster_nodes.get_cluster_type().type == cluster_nodes.ClusterType.LOCAL,
        reason="supposed to run on testnet",
    )
    def test_testnet_whole_utxo(self, cluster: clusterlib.ClusterLib):
        """Check that it is possible to return the whole UTxO on testnets."""
        common.get_test_id(cluster)

        magic_args = " ".join(cluster.magic_args)
        helpers.run_in_bash(f"cardano-cli query utxo --whole-utxo {magic_args} > /dev/null")

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.testnets
    @pytest.mark.skipif(
        VERSIONS.transaction_era != VERSIONS.LAST_KNOWN_ERA,
        reason="works only with the latest TX era",
    )
    def test_pretty_utxo(
        self, cluster_manager: cluster_management.ClusterManager, cluster: clusterlib.ClusterLib
    ):
        """Check that pretty printed `query utxo` output looks as expected."""
        temp_template = common.get_test_id(cluster)
        amount1 = 2_000_000
        amount2 = 2_500_000

        # create source and destination payment addresses
        payment_addrs = clusterlib_utils.create_payment_addr_records(
            f"{temp_template}_src",
            f"{temp_template}_dst",
            cluster_obj=cluster,
        )

        # fund source addresses
        clusterlib_utils.fund_from_faucet(
            payment_addrs[0],
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
            amount=amount1 + amount2 + 10_000_000,
        )

        src_address = payment_addrs[0].address
        dst_address = payment_addrs[1].address

        txouts = [
            clusterlib.TxOut(address=dst_address, amount=amount1),
            clusterlib.TxOut(address=dst_address, amount=amount2),
        ]
        tx_files = clusterlib.TxFiles(signing_key_files=[payment_addrs[0].skey_file])
        tx_raw_output = cluster.send_tx(
            src_address=src_address,
            tx_name=temp_template,
            txouts=txouts,
            tx_files=tx_files,
            join_txouts=False,
        )

        utxo_out = (
            cluster.cli(
                [
                    "query",
                    "utxo",
                    "--address",
                    dst_address,
                    *cluster.magic_args,
                ]
            )
            .stdout.decode("utf-8")
            .split()
        )

        txid = cluster.get_txid(tx_body_file=tx_raw_output.out_file)
        expected_out = [
            "TxHash",
            "TxIx",
            "Amount",
            "--------------------------------------------------------------------------------"
            "------",
            txid,
            "0",
            str(amount1),
            "lovelace",
            "+",
            "TxOutDatumNone",
            txid,
            "1",
            str(amount2),
            "lovelace",
            "+",
            "TxOutDatumNone",
        ]

        assert utxo_out == expected_out

    @allure.link(helpers.get_vcs_link())
    @pytest.mark.skipif(
        VERSIONS.transaction_era != VERSIONS.DEFAULT_TX_ERA,
        reason="different TX eras doesn't affect this test",
    )
    @pytest.mark.testnets
    def test_tx_view(self, cluster: clusterlib.ClusterLib):
        """Check that the output of `transaction view` is as expected."""
        common.get_test_id(cluster)

        tx_body = cluster.view_tx(tx_body_file=self.TX_BODY_FILE)
        tx = cluster.view_tx(tx_file=self.TX_FILE)

        if "payment credential key hash" in tx_body:
            with open(self.TX_BODY_OUT, encoding="utf-8") as infile:
                tx_body_view_out = infile.read()
            assert tx_body == tx_body_view_out.strip()

        if "witnesses:" in tx:
            with open(self.TX_OUT, encoding="utf-8") as infile:
                tx_view_out = infile.read()
            assert tx == tx_view_out.strip()
        else:
            assert tx == tx_body