def cluster_and_pool(
    cluster_manager: cluster_management.ClusterManager,
) -> Tuple[clusterlib.ClusterLib, str]:
    """Return instance of `clusterlib.ClusterLib`, and pool id to delegate to.

    We need to mark the pool as "in use" when requesting local cluster
    instance, that's why cluster instance and pool id are tied together in
    single fixture.
    """
    cluster_type = cluster_nodes.get_cluster_type()
    if cluster_type.type == cluster_nodes.ClusterType.TESTNET_NOPOOLS:
        cluster_obj: clusterlib.ClusterLib = cluster_manager.get()

        # getting ledger state on official testnet is too expensive,
        # use one of hardcoded pool IDs if possible
        if cluster_type.testnet_type == cluster_nodes.Testnets.testnet:  # type: ignore
            stake_pools = cluster_obj.get_stake_pools()
            for pool_id in configuration.TESTNET_POOL_IDS:
                if pool_id in stake_pools:
                    return cluster_obj, pool_id

        blocks_before = clusterlib_utils.get_blocks_before(cluster_obj)
        # sort pools by how many blocks they produce
        pool_ids_s = sorted(blocks_before, key=blocks_before.get,
                            reverse=True)  # type: ignore
        # select a pool with reasonable margin
        for pool_id in pool_ids_s:
            pool_params = cluster_obj.get_pool_params(pool_id)
            if pool_params.pool_params[
                    "margin"] <= 0.5 and not pool_params.retiring:
                break
        else:
            pytest.skip("Cannot find any usable pool.")
    elif cluster_type.type == cluster_nodes.ClusterType.TESTNET:
        # the "testnet" cluster has just single pool, "node-pool1"
        cluster_obj = cluster_manager.get(
            use_resources=[cluster_management.Resources.POOL1])
        pool_id = get_pool_id(
            cluster_obj=cluster_obj,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=cluster_management.Resources.POOL1,
        )
    else:
        cluster_obj = cluster_manager.get(
            use_resources=[cluster_management.Resources.POOL3])
        pool_id = get_pool_id(
            cluster_obj=cluster_obj,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=cluster_management.Resources.POOL3,
        )
    return cluster_obj, pool_id
Beispiel #2
0
def issuers_addrs(
    cluster_manager: cluster_management.ClusterManager,
    cluster: clusterlib.ClusterLib,
) -> List[clusterlib.AddressRecord]:
    """Create new issuers addresses."""
    with cluster_manager.cache_fixture() as fixture_cache:
        if fixture_cache.value:
            return fixture_cache.value  # type: ignore

        addrs = clusterlib_utils.create_payment_addr_records(
            *[
                f"token_minting_ci{cluster_manager.cluster_instance}_{i}"
                for i in range(3)
            ],
            cluster_obj=cluster,
        )
        fixture_cache.value = addrs

    # fund source addresses
    clusterlib_utils.fund_from_faucet(
        addrs[0],
        cluster_obj=cluster,
        faucet_data=cluster_manager.cache.addrs_data["user1"],
        amount=900_000_000,
    )

    return addrs
    def past_horizon_funds(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
    ) -> Tuple[List[clusterlib.UTXOData], List[clusterlib.UTXOData],
               clusterlib.TxRawOutput]:
        """Create UTxOs for `test_past_horizon`."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            temp_template = common.get_test_id(cluster)
            payment_addr = payment_addrs[0]
            issuer_addr = payment_addrs[1]

            script_fund = 200_000_000

            minting_cost = plutus_common.compute_cost(
                execution_cost=plutus_common.MINTING_WITNESS_REDEEMER_COST,
                protocol_params=cluster.get_protocol_params(),
            )
            mint_utxos, collateral_utxos, tx_raw_output = _fund_issuer(
                cluster_obj=cluster,
                temp_template=temp_template,
                payment_addr=payment_addr,
                issuer_addr=issuer_addr,
                minting_cost=minting_cost,
                amount=script_fund,
            )

            retval = mint_utxos, collateral_utxos, tx_raw_output
            fixture_cache.value = retval

        return retval
Beispiel #4
0
    def new_token(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
    ) -> clusterlib_utils.TokenRecord:
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            rand = clusterlib.get_rand_str(4)
            temp_template = f"test_tx_new_token_{rand}"
            asset_name = f"couttscoin{rand}"

            new_tokens = clusterlib_utils.new_tokens(
                asset_name,
                cluster_obj=cluster,
                temp_template=temp_template,
                token_mint_addr=payment_addrs[0],
                issuer_addr=payment_addrs[1],
                amount=20_000_000,
            )
            new_token = new_tokens[0]
            fixture_cache.value = new_token

        return new_token
def pool_users(
    cluster_manager: cluster_management.ClusterManager,
    cluster: clusterlib.ClusterLib,
) -> List[clusterlib.PoolUser]:
    """Create pool users."""
    with cluster_manager.cache_fixture() as fixture_cache:
        if fixture_cache.value:
            return fixture_cache.value  # type: ignore

        created_users = clusterlib_utils.create_pool_users(
            cluster_obj=cluster,
            name_template=
            f"test_delegation_pool_user_ci{cluster_manager.cluster_instance_num}",
            no_of_addr=2,
        )
        fixture_cache.value = created_users

    # fund source addresses
    clusterlib_utils.fund_from_faucet(
        created_users[0],
        cluster_obj=cluster,
        faucet_data=cluster_manager.cache.addrs_data["user1"],
    )

    return created_users
 def cluster_update_proposal(
     self,
     cluster_manager: cluster_management.ClusterManager,
 ) -> clusterlib.ClusterLib:
     return cluster_manager.get(
         lock_resources=[cluster_management.Resources.CLUSTER],
         cleanup=True)
    def registered_user(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
        pool_user: clusterlib.PoolUser,
    ) -> clusterlib.PoolUser:
        """Register pool user's stake address."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore
            fixture_cache.value = pool_user

        temp_template = f"test_mir_certs_ci{cluster_manager.cluster_instance}"

        addr_reg_cert = cluster.gen_stake_addr_registration_cert(
            addr_name=temp_template,
            stake_vkey_file=pool_user.stake.vkey_file,
        )
        tx_files = clusterlib.TxFiles(
            certificate_files=[addr_reg_cert],
            signing_key_files=[
                pool_user.payment.skey_file, pool_user.stake.skey_file
            ],
        )
        cluster.send_tx(src_address=pool_user.payment.address,
                        tx_name=f"{temp_template}_reg",
                        tx_files=tx_files)
        assert cluster.get_stake_addr_info(
            pool_user.stake.address
        ), f"The address {pool_user.stake.address} was not registered"

        return pool_user
    def pool_user(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
    ) -> clusterlib.PoolUser:
        """Create pool user."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            created_user = clusterlib_utils.create_pool_users(
                cluster_obj=cluster,
                name_template=
                f"test_mir_certs_ci{cluster_manager.cluster_instance}",
                no_of_addr=1,
            )[0]
            fixture_cache.value = created_user

        # fund source addresses
        clusterlib_utils.fund_from_faucet(
            created_user,
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        return created_user
    def payment_addr(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_update_proposal: clusterlib.ClusterLib,
    ) -> clusterlib.AddressRecord:
        """Create new payment address."""
        cluster = cluster_update_proposal

        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            addr = clusterlib_utils.create_payment_addr_records(
                f"addr_test_update_proposal_ci{cluster_manager.cluster_instance_num}_0",
                cluster_obj=cluster,
            )[0]
            fixture_cache.value = addr

        # fund source addresses
        clusterlib_utils.fund_from_faucet(
            addr,
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        return addr
Beispiel #10
0
    def payment_addrs(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
    ) -> List[clusterlib.AddressRecord]:
        """Create 2 new payment addresses."""
        if cluster.use_cddl:
            pytest.skip("runs only when `cluster.use_cddl == False`")

        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            addrs = clusterlib_utils.create_payment_addr_records(
                f"addr_shelley_cddl_ci{cluster_manager.cluster_instance_num}_0",
                f"addr_shelley_cddl_ci{cluster_manager.cluster_instance_num}_1",
                cluster_obj=cluster,
            )
            fixture_cache.value = addrs

        # fund source addresses
        clusterlib_utils.fund_from_faucet(
            addrs[0],
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )
        return addrs
    def payment_addrs(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
    ) -> List[clusterlib.AddressRecord]:
        """Create 2 new payment addresses."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            addrs = clusterlib_utils.create_payment_addr_records(
                f"addr_test_fee_ci{cluster_manager.cluster_instance}_0",
                f"addr_test_fee_ci{cluster_manager.cluster_instance}_1",
                cluster_obj=cluster,
            )
            fixture_cache.value = addrs

        # fund source addresses
        clusterlib_utils.fund_from_faucet(
            addrs[0],
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        return addrs
Beispiel #12
0
 def cluster_pots(
     self,
     cluster_manager: cluster_management.ClusterManager,
 ) -> clusterlib.ClusterLib:
     return cluster_manager.get(lock_resources=[
         cluster_management.Resources.RESERVES,
         cluster_management.Resources.TREASURY,
     ])
Beispiel #13
0
def cluster_epoch_length(
        cluster_manager: cluster_management.ClusterManager,
        epoch_length_start_cluster: Path) -> clusterlib.ClusterLib:
    return cluster_manager.get(
        lock_resources=[cluster_management.Resources.CLUSTER],
        cleanup=True,
        start_cmd=str(epoch_length_start_cluster),
    )
Beispiel #14
0
def cluster_kes(
    cluster_manager: cluster_management.ClusterManager, short_kes_start_cluster: Path
) -> clusterlib.ClusterLib:
    return cluster_manager.get(
        lock_resources=[cluster_management.Resources.CLUSTER],
        cleanup=True,
        start_cmd=str(short_kes_start_cluster),
    )
Beispiel #15
0
def cluster_lock_pools(
    cluster_manager: cluster_management.ClusterManager
) -> clusterlib.ClusterLib:
    return cluster_manager.get(lock_resources=[
        cluster_management.Resources.POOL1,
        cluster_management.Resources.POOL2,
        cluster_management.Resources.POOL3,
    ])
Beispiel #16
0
    def registered_user(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_pots: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
    ) -> clusterlib.PoolUser:
        """Register pool user's stake address."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore
            fixture_cache.value = pool_users[1]

        temp_template = f"test_mir_certs_ci{cluster_manager.cluster_instance_num}"
        pool_user = pool_users[1]
        clusterlib_utils.register_stake_address(
            cluster_obj=cluster_pots, pool_user=pool_users[1], name_template=temp_template
        )
        return pool_user
Beispiel #17
0
def cluster_lock_42stake(
    cluster_manager: cluster_management.ClusterManager,
) -> Tuple[clusterlib.ClusterLib, str]:
    """Make sure just one staking Plutus test run at a time.

    Plutus script always has the same address. When one script is used in multiple
    tests that are running in parallel, the balances etc. don't add up.
    """
    cluster_obj = cluster_manager.get(
        lock_resources=[str(plutus_common.STAKE_GUESS_42_PLUTUS_V1.stem)],
        use_resources=[cluster_management.Resources.POOL3],
    )
    pool_id = delegation.get_pool_id(
        cluster_obj=cluster_obj,
        addrs_data=cluster_manager.cache.addrs_data,
        pool_name=cluster_management.Resources.POOL3,
    )
    return cluster_obj, pool_id
Beispiel #18
0
    def registered_users(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_pots: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
    ) -> List[clusterlib.PoolUser]:
        """Register pool user's stake address."""
        registered = pool_users[1:3]

        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore
            fixture_cache.value = registered

        for i, pool_user in enumerate(registered):
            temp_template = f"test_mir_certs_{i}_ci{cluster_manager.cluster_instance_num}"
            clusterlib_utils.register_stake_address(
                cluster_obj=cluster_pots,
                pool_user=pool_user,
                name_template=temp_template)

        return registered
Beispiel #19
0
def multisig_script_policyid(
    cluster_manager: cluster_management.ClusterManager,
    cluster: clusterlib.ClusterLib,
    issuers_addrs: List[clusterlib.AddressRecord],
) -> Tuple[Path, str]:
    """Return multisig script and it's PolicyId."""
    with cluster_manager.cache_fixture() as fixture_cache:
        if fixture_cache.value:
            return fixture_cache.value  # type: ignore

    temp_template = "test_native_tokens_multisig"
    payment_vkey_files = [p.vkey_file for p in issuers_addrs]

    # create multisig script
    multisig_script = cluster.build_multisig_script(
        script_name=temp_template,
        script_type_arg=clusterlib.MultiSigTypeArgs.ALL,
        payment_vkey_files=payment_vkey_files[1:],
    )
    policyid = cluster.get_policyid(multisig_script)

    return multisig_script, policyid
Beispiel #20
0
def simple_script_policyid(
    cluster_manager: cluster_management.ClusterManager,
    cluster: clusterlib.ClusterLib,
    issuers_addrs: List[clusterlib.AddressRecord],
) -> Tuple[Path, str]:
    """Return script and it's PolicyId."""
    with cluster_manager.cache_fixture() as fixture_cache:
        if fixture_cache.value:
            return fixture_cache.value  # type: ignore

    temp_template = "test_native_tokens_simple"
    issuer_addr = issuers_addrs[1]

    # create simple script
    keyhash = cluster.get_payment_vkey_hash(issuer_addr.vkey_file)
    script_content = {"keyHash": keyhash, "type": "sig"}
    script = Path(f"{temp_template}.script")
    with open(f"{temp_template}.script", "w") as out_json:
        json.dump(script_content, out_json)

    policyid = cluster.get_policyid(script)

    return script, policyid
Beispiel #21
0
    def test_update_valid_opcert(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Update a valid operational certificate with another valid operational certificate.

        * generate new operational certificate with valid `--kes-period`
        * restart the node with the new operational certificate
        * check that the pool is still producing blocks
        """
        pool_name = "node-pool2"
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = helpers.get_func_name()
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file = pool_rec["pool_operational_cert"]

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with valid `--kes-period`
            new_opcert_file = cluster.gen_node_operational_cert(
                node_name=node_name,
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )

            # restart the node with the new operational certificate
            logfiles.add_ignore_rule("*.stdout", "MuxBearerClosed")
            shutil.copy(new_opcert_file, opcert_file)
            cluster_nodes.restart_node(node_name)

            LOGGER.info("Checking blocks production for 5 epochs.")
            blocks_made_db = []
            this_epoch = -1
            updated_epoch = cluster.get_epoch()
            for __ in range(5):
                # wait for next epoch
                if cluster.get_epoch() == this_epoch:
                    cluster.wait_for_new_epoch()

                # wait for the end of the epoch
                clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                         start=-19,
                                                         stop=-9)
                this_epoch = cluster.get_epoch()

                ledger_state = clusterlib_utils.get_ledger_state(
                    cluster_obj=cluster)

                # save ledger state
                clusterlib_utils.save_ledger_state(
                    cluster_obj=cluster,
                    state_name=f"{temp_template}_{this_epoch}",
                    ledger_state=ledger_state,
                )

                # check that the pool is still producing blocks
                blocks_made = ledger_state["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(blocks_made_db), (
                f"The pool '{pool_name}' has not produced any blocks "
                f"since epoch {updated_epoch}")
Beispiel #22
0
    def test_opcert_past_kes_period(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Start a stake pool with an operational certificate created with expired `--kes-period`.

        * generate new operational certificate with `--kes-period` in the past
        * restart the node with the new operational certificate
        * check that the pool is not producing any blocks
        * generate new operational certificate with valid `--kes-period` and restart the node
        * check that the pool is producing blocks again
        """
        pool_name = "node-pool2"
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = helpers.get_func_name()
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file: Path = pool_rec["pool_operational_cert"]

        def _wait_epoch_chores(this_epoch: int):
            # wait for next epoch
            if cluster.get_epoch() == this_epoch:
                cluster.wait_for_new_epoch()

            # wait for the end of the epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=-19,
                                                     stop=-9)

            # save ledger state
            clusterlib_utils.save_ledger_state(
                cluster_obj=cluster,
                state_name=f"{temp_template}_{cluster.get_epoch()}",
            )

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with `--kes-period` in the past
            invalid_opcert_file = cluster.gen_node_operational_cert(
                node_name=node_name,
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period() - 5,
            )

            expected_errors = [
                (f"{node_name}.stdout", "TPraosCannotForgeKeyNotUsableYet"),
            ]
            with logfiles.expect_errors(expected_errors):
                # restart the node with the new operational certificate
                logfiles.add_ignore_rule("*.stdout", "MuxBearerClosed")
                shutil.copy(invalid_opcert_file, opcert_file)
                cluster_nodes.restart_node(node_name)
                cluster.wait_for_new_epoch()

                LOGGER.info("Checking blocks production for 5 epochs.")
                this_epoch = -1
                for __ in range(5):
                    _wait_epoch_chores(this_epoch)
                    this_epoch = cluster.get_epoch()

                    # check that the pool is not producing any blocks
                    blocks_made = clusterlib_utils.get_ledger_state(
                        cluster_obj=cluster)["blocksCurrent"]
                    if blocks_made:
                        assert (
                            stake_pool_id_dec not in blocks_made
                        ), f"The pool '{pool_name}' has produced blocks in epoch {this_epoch}"

            # generate new operational certificate with valid `--kes-period`
            os.remove(opcert_file)
            valid_opcert_file = cluster.gen_node_operational_cert(
                node_name=node_name,
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )
            # copy the new certificate and restart the node
            shutil.move(str(valid_opcert_file), str(opcert_file))
            cluster_nodes.restart_node(node_name)
            cluster.wait_for_new_epoch()

            LOGGER.info("Checking blocks production for another 5 epochs.")
            blocks_made_db = []
            this_epoch = cluster.get_epoch()
            active_again_epoch = this_epoch
            for __ in range(5):
                _wait_epoch_chores(this_epoch)
                this_epoch = cluster.get_epoch()

                # check that the pool is producing blocks
                blocks_made = clusterlib_utils.get_ledger_state(
                    cluster_obj=cluster)["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(blocks_made_db), (
                f"The pool '{pool_name}' has not produced any blocks "
                f"since epoch {active_again_epoch}")
Beispiel #23
0
def cluster_epoch_length(
        cluster_manager: cluster_management.ClusterManager,
        epoch_length_start_cluster: Path) -> clusterlib.ClusterLib:
    return cluster_manager.get(singleton=True,
                               cleanup=True,
                               start_cmd=str(epoch_length_start_cluster))
def cluster_use_pool1(
    cluster_manager: cluster_management.ClusterManager
) -> clusterlib.ClusterLib:
    return cluster_manager.get(
        use_resources=[cluster_management.Resources.POOL1])
Beispiel #25
0
    def test_update_valid_opcert(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Update a valid operational certificate with another valid operational certificate.

        * generate new operational certificate with valid `--kes-period`
        * copy new operational certificate to the node
        * stop the node so the corresponding pool is not minting new blocks
        * check `kes-period-info` while the pool is not minting blocks
        * start the node with the new operational certificate
        * check that the pool is minting blocks again
        * check that metrics reported by `kes-period-info` got updated once the pool started
          minting blocks again
        * check `kes-period-info` with the old (replaced) operational certificate
        """
        # pylint: disable=too-many-statements
        pool_name = cluster_management.Resources.POOL2
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = common.get_test_id(cluster)
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file = pool_rec["pool_operational_cert"]
        opcert_file_old = shutil.copy(opcert_file, f"{opcert_file}_old")

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with valid `--kes-period`
            new_opcert_file = cluster.gen_node_operational_cert(
                node_name=f"{node_name}_new_opcert_file",
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )

            # copy new operational certificate to the node
            logfiles.add_ignore_rule(
                files_glob="*.stdout",
                regex="MuxBearerClosed",
                ignore_file_id=cluster_manager.worker_id,
            )
            shutil.copy(new_opcert_file, opcert_file)

            # stop the node so the corresponding pool is not minting new blocks
            cluster_nodes.stop_nodes([node_name])

            time.sleep(10)

            # check kes-period-info while the pool is not minting blocks
            # TODO: the query is currently broken
            kes_query_currently_broken = False
            try:
                kes_period_info_new = cluster.get_kes_period_info(opcert_file)
            except clusterlib.CLIError as err:
                if "currentlyBroken" not in str(err):
                    raise
                kes_query_currently_broken = True

            if not kes_query_currently_broken:
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_new, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                kes_period_info_old = cluster.get_kes_period_info(opcert_file_old)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_old, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                assert (
                    kes_period_info_new["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                    == kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                )

            # start the node with the new operational certificate
            cluster_nodes.start_nodes([node_name])

            # make sure we are not at the very end of an epoch so we still have time for
            # the first block production check
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-18)

            LOGGER.info("Checking blocks production for 5 epochs.")
            blocks_made_db = []
            this_epoch = -1
            updated_epoch = cluster.get_epoch()
            for __ in range(5):
                # wait for next epoch
                if cluster.get_epoch() == this_epoch:
                    cluster.wait_for_new_epoch()

                # wait for the end of the epoch
                clusterlib_utils.wait_for_epoch_interval(
                    cluster_obj=cluster, start=-19, stop=-15, force_epoch=True
                )
                this_epoch = cluster.get_epoch()

                ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)

                # save ledger state
                clusterlib_utils.save_ledger_state(
                    cluster_obj=cluster,
                    state_name=f"{temp_template}_{this_epoch}",
                    ledger_state=ledger_state,
                )

                # check that the pool is minting blocks
                blocks_made = ledger_state["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(
                blocks_made_db
            ), f"The pool '{pool_name}' has not minted any blocks since epoch {updated_epoch}"

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            # check that metrics reported by kes-period-info got updated once the pool started
            # minting blocks again
            kes_period_info_updated = cluster.get_kes_period_info(opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_updated, expected_scenario=kes.KesScenarios.ALL_VALID
            )
            assert (
                kes_period_info_updated["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                != kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
            )

            # check kes-period-info with operational certificate with a wrong counter
            kes_period_info_invalid = cluster.get_kes_period_info(opcert_file_old)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_invalid,
                expected_scenario=kes.KesScenarios.INVALID_COUNTERS,
            )
Beispiel #26
0
    def test_opcert_future_kes_period(  # noqa: C901
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Start a stake pool with an operational certificate created with invalid `--kes-period`.

        * generate new operational certificate with `--kes-period` in the future
        * restart the node with the new operational certificate
        * check that the pool is not producing any blocks
        * if network era > Alonzo

            - generate new operational certificate with valid `--kes-period`, but counter value +2
              from last used operational ceritificate
            - restart the node
            - check that the pool is not producing any blocks

        * generate new operational certificate with valid `--kes-period` and restart the node
        * check that the pool is producing blocks again
        """
        # pylint: disable=too-many-statements,too-many-branches
        pool_name = cluster_management.Resources.POOL2
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = common.get_test_id(cluster)
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file: Path = pool_rec["pool_operational_cert"]
        cold_counter_file: Path = pool_rec["cold_key_pair"].counter_file

        expected_errors = [
            (f"{node_name}.stdout", "PraosCannotForgeKeyNotUsableYet"),
        ]

        if VERSIONS.cluster_era > VERSIONS.ALONZO:
            expected_errors.append((f"{node_name}.stdout", "CounterOverIncrementedOCERT"))
            # In Babbage we get `CounterOverIncrementedOCERT` error if counter for new opcert
            # is not exactly +1 from last used opcert. We'll backup the original counter
            # file so we can use it for issuing next valid opcert.
            cold_counter_file_orig = Path(
                f"{cold_counter_file.stem}_orig{cold_counter_file.suffix}"
            ).resolve()
            shutil.copy(cold_counter_file, cold_counter_file_orig)

        logfiles.add_ignore_rule(
            files_glob="*.stdout",
            regex="MuxBearerClosed|CounterOverIncrementedOCERT",
            ignore_file_id=cluster_manager.worker_id,
        )

        # generate new operational certificate with `--kes-period` in the future
        invalid_opcert_file = cluster.gen_node_operational_cert(
            node_name=f"{node_name}_invalid_opcert_file",
            kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
            cold_skey_file=pool_rec["cold_key_pair"].skey_file,
            cold_counter_file=cold_counter_file,
            kes_period=cluster.get_kes_period() + 100,
        )

        kes_query_currently_broken = False

        with cluster_manager.restart_on_failure():
            with logfiles.expect_errors(expected_errors, ignore_file_id=cluster_manager.worker_id):
                # restart the node with the new operational certificate
                shutil.copy(invalid_opcert_file, opcert_file)
                cluster_nodes.restart_nodes([node_name])
                cluster.wait_for_new_epoch()

                LOGGER.info("Checking blocks production for 4 epochs.")
                this_epoch = -1
                for invalid_opcert_epoch in range(4):
                    _wait_epoch_chores(
                        cluster_obj=cluster, temp_template=temp_template, this_epoch=this_epoch
                    )
                    this_epoch = cluster.get_epoch()

                    # check that the pool is not producing any blocks
                    blocks_made = clusterlib_utils.get_ledger_state(cluster_obj=cluster)[
                        "blocksCurrent"
                    ]
                    if blocks_made:
                        assert (
                            stake_pool_id_dec not in blocks_made
                        ), f"The pool '{pool_name}' has produced blocks in epoch {this_epoch}"

                    if invalid_opcert_epoch == 1:
                        # check kes-period-info with operational certificate with
                        # invalid `--kes-period`
                        # TODO: the query is currently broken
                        try:
                            kes_period_info = cluster.get_kes_period_info(invalid_opcert_file)
                        except clusterlib.CLIError as err:
                            if "currentlyBroken" not in str(err):
                                raise
                            kes_query_currently_broken = True

                        if not kes_query_currently_broken:
                            kes.check_kes_period_info_result(
                                kes_output=kes_period_info,
                                expected_scenario=kes.KesScenarios.INVALID_KES_PERIOD,
                            )

                    # test the `CounterOverIncrementedOCERT` error - the counter will now be +2 from
                    # last used opcert counter value
                    if invalid_opcert_epoch == 2 and VERSIONS.cluster_era > VERSIONS.ALONZO:
                        overincrement_opcert_file = cluster.gen_node_operational_cert(
                            node_name=f"{node_name}_overincrement_opcert_file",
                            kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                            cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                            cold_counter_file=cold_counter_file,
                            kes_period=cluster.get_kes_period(),
                        )
                        # copy the new certificate and restart the node
                        shutil.copy(overincrement_opcert_file, opcert_file)
                        cluster_nodes.restart_nodes([node_name])

                    if invalid_opcert_epoch == 3:
                        # check kes-period-info with operational certificate with
                        # invalid counter
                        # TODO: the query is currently broken, implement once it is fixed
                        pass

            # in Babbage we'll use the original counter for issuing new valid opcert so the counter
            # value of new valid opcert equals to counter value of the original opcert +1
            if VERSIONS.cluster_era > VERSIONS.ALONZO:
                shutil.copy(cold_counter_file_orig, cold_counter_file)

            # generate new operational certificate with valid `--kes-period`
            valid_opcert_file = cluster.gen_node_operational_cert(
                node_name=f"{node_name}_valid_opcert_file",
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=cold_counter_file,
                kes_period=cluster.get_kes_period(),
            )
            # copy the new certificate and restart the node
            shutil.copy(valid_opcert_file, opcert_file)
            cluster_nodes.restart_nodes([node_name])
            this_epoch = cluster.wait_for_new_epoch()

            LOGGER.info("Checking blocks production for another 2 epochs.")
            blocks_made_db = []
            active_again_epoch = this_epoch
            for __ in range(2):
                _wait_epoch_chores(
                    cluster_obj=cluster, temp_template=temp_template, this_epoch=this_epoch
                )
                this_epoch = cluster.get_epoch()

                # check that the pool is producing blocks
                blocks_made = clusterlib_utils.get_ledger_state(cluster_obj=cluster)[
                    "blocksCurrent"
                ]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(blocks_made_db), (
                f"The pool '{pool_name}' has not produced any blocks "
                f"since epoch {active_again_epoch}"
            )

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            # check kes-period-info with valid operational certificate
            kes_period_info = cluster.get_kes_period_info(valid_opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info, expected_scenario=kes.KesScenarios.ALL_VALID
            )

            # check kes-period-info with invalid operational certificate, wrong counter and period
            kes_period_info = cluster.get_kes_period_info(invalid_opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info,
                expected_scenario=kes.KesScenarios.INVALID_KES_PERIOD
                if VERSIONS.cluster_era > VERSIONS.ALONZO
                else kes.KesScenarios.ALL_INVALID,
            )
Beispiel #27
0
    def test_oversaturated(  # noqa: C901
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pools: clusterlib.ClusterLib,
    ):
        """Check diminished rewards when stake pool is oversaturated.

        The stake pool continues to operate normally and those who delegate to that pool receive
        rewards, but the rewards are proportionally lower than those received from stake pool
        that is not oversaturated.

        * register and delegate stake address in "init epoch", for all available pools
        * in "init epoch" + 2, saturate all available pools (block distribution remains balanced
          among pools)
        * in "init epoch" + 3, oversaturate one pool
        * in "init epoch" + 5, for all available pools, withdraw rewards and transfer funds
          from delegated addresses so pools are no longer (over)saturated
        * while doing the steps above, collect rewards data for 9 epochs
        * compare proportionality of rewards in epochs where pools were non-saturated,
          saturated and oversaturated
        """
        # pylint: disable=too-many-statements,too-many-locals,too-many-branches
        epoch_saturate = 2
        epoch_oversaturate = 4
        epoch_withdrawal = 6

        cluster = cluster_lock_pools
        temp_template = common.get_test_id(cluster)
        initial_balance = 1_000_000_000

        faucet_rec = cluster_manager.cache.addrs_data["byron000"]
        pool_records: Dict[int, PoolRecord] = {}

        # make sure we have enough time to finish the delegation in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)
        init_epoch = cluster.get_epoch()

        # submit registration certificates and delegate to pools
        for idx, res in enumerate(
            [
                cluster_management.Resources.POOL1,
                cluster_management.Resources.POOL2,
                cluster_management.Resources.POOL3,
            ],
                start=1,
        ):
            pool_addrs_data = cluster_manager.cache.addrs_data[res]
            reward_addr = clusterlib.PoolUser(
                payment=pool_addrs_data["payment"],
                stake=pool_addrs_data["reward"])
            pool_id = delegation.get_pool_id(
                cluster_obj=cluster,
                addrs_data=cluster_manager.cache.addrs_data,
                pool_name=res,
            )
            pool_id_dec = helpers.decode_bech32(bech32=pool_id)

            delegation_out = delegation.delegate_stake_addr(
                cluster_obj=cluster,
                addrs_data=cluster_manager.cache.addrs_data,
                temp_template=f"{temp_template}_pool{idx}",
                pool_id=pool_id,
                amount=initial_balance,
            )

            pool_records[idx] = PoolRecord(
                name=res,
                id=pool_id,
                id_dec=pool_id_dec,
                reward_addr=reward_addr,
                delegation_out=delegation_out,
                user_rewards=[],
                owner_rewards=[],
                blocks_minted={},
                saturation_amounts={},
            )

        # record initial reward balance for each pool
        for pool_rec in pool_records.values():
            user_payment_balance = cluster.get_address_balance(
                pool_rec.delegation_out.pool_user.payment.address)
            owner_payment_balance = cluster.get_address_balance(
                pool_rec.reward_addr.payment.address)
            pool_rec.user_rewards.append(
                RewardRecord(
                    epoch_no=init_epoch,
                    reward_total=0,
                    reward_per_epoch=0,
                    stake_total=user_payment_balance,
                ))
            pool_rec.owner_rewards.append(
                RewardRecord(
                    epoch_no=init_epoch,
                    reward_total=cluster.get_stake_addr_info(
                        pool_rec.reward_addr.stake.address).
                    reward_account_balance,
                    reward_per_epoch=0,
                    stake_total=owner_payment_balance,
                ))

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        LOGGER.info("Checking rewards for 10 epochs.")
        for __ in range(10):
            # wait for new epoch
            if cluster.get_epoch(
            ) == pool_records[2].owner_rewards[-1].epoch_no:
                cluster.wait_for_new_epoch()

            # sleep till the end of epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=-50,
                                                     stop=-40,
                                                     force_epoch=True)
            this_epoch = cluster.get_epoch()

            ledger_state = clusterlib_utils.get_ledger_state(
                cluster_obj=cluster)
            clusterlib_utils.save_ledger_state(
                cluster_obj=cluster,
                state_name=f"{temp_template}_{this_epoch}",
                ledger_state=ledger_state,
            )

            for pool_rec in pool_records.values():
                # reward balance in previous epoch
                prev_user_reward = pool_rec.user_rewards[-1].reward_total
                prev_owner_reward = pool_rec.owner_rewards[-1].reward_total

                pool_rec.blocks_minted[this_epoch -
                                       1] = (ledger_state["blocksBefore"].get(
                                           pool_rec.id_dec) or 0)

                # current reward balance
                user_reward = cluster.get_stake_addr_info(
                    pool_rec.delegation_out.pool_user.stake.address
                ).reward_account_balance
                owner_reward = cluster.get_stake_addr_info(
                    pool_rec.reward_addr.stake.address).reward_account_balance

                # total reward amounts received this epoch
                owner_reward_epoch = owner_reward - prev_owner_reward
                # We cannot compare with previous rewards in epochs where
                # `this_epoch >= init_epoch + epoch_withdrawal`.
                # There's a withdrawal of rewards at the end of these epochs.
                if this_epoch > init_epoch + epoch_withdrawal:
                    user_reward_epoch = user_reward
                else:
                    user_reward_epoch = user_reward - prev_user_reward

                # store collected rewards info
                user_payment_balance = cluster.get_address_balance(
                    pool_rec.delegation_out.pool_user.payment.address)
                owner_payment_balance = cluster.get_address_balance(
                    pool_rec.reward_addr.payment.address)
                pool_rec.user_rewards.append(
                    RewardRecord(
                        epoch_no=this_epoch,
                        reward_total=user_reward,
                        reward_per_epoch=user_reward_epoch,
                        stake_total=user_payment_balance + user_reward,
                    ))
                pool_rec.owner_rewards.append(
                    RewardRecord(
                        epoch_no=this_epoch,
                        reward_total=owner_reward,
                        reward_per_epoch=owner_reward_epoch,
                        stake_total=owner_payment_balance,
                    ))

                pool_rec.saturation_amounts[
                    this_epoch] = _get_saturation_threshold(
                        cluster_obj=cluster,
                        ledger_state=ledger_state,
                        pool_id=pool_rec.id)

            # fund the delegated addresses - saturate all pools
            if this_epoch == init_epoch + epoch_saturate:
                clusterlib_utils.fund_from_faucet(
                    *[
                        p.delegation_out.pool_user.payment
                        for p in pool_records.values()
                    ],
                    cluster_obj=cluster,
                    faucet_data=faucet_rec,
                    amount=[
                        p.saturation_amounts[this_epoch] - 100_000_000_000
                        for p in pool_records.values()
                    ],
                    tx_name=f"{temp_template}_saturate_pools_ep{this_epoch}",
                    force=True,
                )

            with cluster_manager.restart_on_failure():
                # Fund the address delegated to "pool2" to oversaturate the pool.
                # New stake amount will be current (saturated) stake * 2.
                if this_epoch == init_epoch + epoch_oversaturate:
                    assert (pool_records[2].saturation_amounts[this_epoch] >
                            0), "Pool is already saturated"
                    current_stake = int(
                        cluster.get_stake_snapshot(
                            pool_records[2].id)["poolStakeMark"])
                    overstaturate_amount = current_stake * 2
                    saturation_threshold = pool_records[2].saturation_amounts[
                        this_epoch]
                    assert overstaturate_amount > saturation_threshold, (
                        f"{overstaturate_amount} Lovelace is not enough to oversature the pool "
                        f"({saturation_threshold} is needed)")
                    clusterlib_utils.fund_from_faucet(
                        pool_records[2].delegation_out.pool_user.payment,
                        cluster_obj=cluster,
                        faucet_data=faucet_rec,
                        amount=overstaturate_amount,
                        tx_name=f"{temp_template}_oversaturate_pool2",
                        force=True,
                    )

                # wait 4 epochs for first rewards
                if this_epoch >= init_epoch + 4:
                    assert (owner_reward > prev_owner_reward
                            ), "New reward was not received by pool owner"

                # transfer funds back to faucet so the pools are no longer (over)saturated
                # and staked amount is +- same as the `initial_balance`
                if this_epoch >= init_epoch + epoch_withdrawal:
                    _withdraw_rewards(
                        *[
                            p.delegation_out.pool_user
                            for p in pool_records.values()
                        ],
                        cluster_obj=cluster,
                        tx_name=f"{temp_template}_ep{this_epoch}",
                    )

                    return_to_addrs = []
                    return_amounts = []
                    for idx, pool_rec in pool_records.items():
                        deleg_payment_balance = cluster.get_address_balance(
                            pool_rec.delegation_out.pool_user.payment.address)
                        if deleg_payment_balance > initial_balance + 10_000_000:
                            return_to_addrs.append(
                                pool_rec.delegation_out.pool_user.payment)
                            return_amounts.append(deleg_payment_balance -
                                                  initial_balance)

                    clusterlib_utils.return_funds_to_faucet(
                        *return_to_addrs,
                        cluster_obj=cluster,
                        faucet_addr=faucet_rec["payment"].address,
                        amount=return_amounts,
                        tx_name=f"{temp_template}_ep{this_epoch}",
                    )

                    for return_addr in return_to_addrs:
                        deleg_payment_balance = cluster.get_address_balance(
                            return_addr.address)
                        assert (
                            deleg_payment_balance <= initial_balance
                        ), "Unexpected funds in payment address '{return_addr}'"

                assert (
                    cluster.get_epoch() == this_epoch
                ), "Failed to finish actions in single epoch, it would affect other checks"

        pool1_user_rewards_per_block = _get_reward_per_block(pool_records[1])
        pool2_user_rewards_per_block = _get_reward_per_block(pool_records[2])
        pool3_user_rewards_per_block = _get_reward_per_block(pool_records[3])

        pool1_owner_rewards_per_block = _get_reward_per_block(
            pool_records[1], owner_rewards=True)
        pool2_owner_rewards_per_block = _get_reward_per_block(
            pool_records[2], owner_rewards=True)
        pool3_owner_rewards_per_block = _get_reward_per_block(
            pool_records[3], owner_rewards=True)

        oversaturated_epoch = max(
            e for e, r in pool_records[2].saturation_amounts.items() if r < 0)
        saturated_epoch = oversaturated_epoch - 2
        nonsaturated_epoch = oversaturated_epoch - 4

        try:
            # check that rewards per block per stake for "pool2" in the epoch where the pool is
            # oversaturated is lower than in epochs where pools are not oversaturated
            assert (pool1_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool2_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool3_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])

            assert (pool1_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool2_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool3_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])

            # check that oversaturated pool doesn't lead to increased rewards for pool owner
            # when compared to saturated pool, i.e. total pool margin amount is not increased
            pool1_rew_fraction_sat = pool1_owner_rewards_per_block[
                saturated_epoch]
            pool2_rew_fraction_sat = pool2_owner_rewards_per_block[
                saturated_epoch]
            pool3_rew_fraction_sat = pool3_owner_rewards_per_block[
                saturated_epoch]

            pool2_rew_fraction_over = pool2_owner_rewards_per_block[
                oversaturated_epoch]

            assert pool2_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool2_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )
            assert pool1_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool1_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )
            assert pool3_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool3_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )

            # Compare rewards in last (non-saturated) epoch to rewards in next-to-last
            # (saturated / over-saturated) epoch.
            # This way check that staked amount for each pool was restored to `initial_balance`
            # and that rewards correspond to the restored amounts.
            for pool_rec in pool_records.values():
                assert (pool_rec.user_rewards[-1].reward_per_epoch * 100 <
                        pool_rec.user_rewards[-2].reward_per_epoch)
        except Exception:
            # save debugging data in case of test failure
            with open(f"{temp_template}_pool_records.pickle",
                      "wb") as out_data:
                pickle.dump(pool_records, out_data)
            raise
Beispiel #28
0
def cluster_lock_pool2(
    cluster_manager: cluster_management.ClusterManager
) -> clusterlib.ClusterLib:
    return cluster_manager.get(lock_resources=["node-pool2"])
Beispiel #29
0
def cluster_kes(cluster_manager: cluster_management.ClusterManager,
                short_kes_start_cluster: Path) -> clusterlib.ClusterLib:
    return cluster_manager.get(singleton=True,
                               cleanup=True,
                               start_cmd=str(short_kes_start_cluster))
Beispiel #30
0
    def test_deregister_reward_addr_retire_pool(  # noqa: C901
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pool2: clusterlib.ClusterLib,
    ):
        """Test deregistering reward address and retiring stake pool.

        The pool deposit is lost when reward address is deregistered before the pool is retired.

        * wait for first reward for the pool
        * withdraw pool rewards to payment address
        * deregister the pool reward address
        * check that the key deposit was returned
        * check that pool owner is NOT receiving rewards
        * deregister stake pool
        * check that the pool deposit was NOT returned to reward or stake address
        * return the pool to the original state - reregister the pool, register
          the reward address, delegate the stake address to the pool
        * check that pool deposit was needed
        * check that pool owner is receiving rewards
        """
        # pylint: disable=too-many-statements,too-many-locals
        __: Any  # mypy workaround
        pool_name = cluster_management.Resources.POOL2
        cluster = cluster_lock_pool2

        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_reward = clusterlib.PoolUser(payment=pool_rec["payment"],
                                          stake=pool_rec["reward"])
        pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"],
                                         stake=pool_rec["stake"])
        pool_opcert_file: Path = pool_rec["pool_operational_cert"]
        temp_template = common.get_test_id(cluster)

        LOGGER.info("Waiting up to 4 full epochs for first reward.")
        for i in range(5):
            if i > 0:
                cluster.wait_for_new_epoch(padding_seconds=10)
            if cluster.get_stake_addr_info(
                    pool_reward.stake.address).reward_account_balance:
                break
        else:
            pytest.skip(
                f"Pool '{pool_name}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish reward address deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # withdraw pool rewards to payment address
        cluster.withdraw_reward(
            stake_addr_record=pool_reward.stake,
            dst_addr_record=pool_reward.payment,
            tx_name=temp_template,
        )

        # deregister the pool reward address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=pool_reward.stake.vkey_file)
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                pool_reward.payment.skey_file, pool_reward.stake.skey_file
            ],
        )

        src_init_balance = cluster.get_address_balance(
            pool_reward.payment.address)

        tx_raw_deregister_output = cluster.send_tx(
            src_address=pool_reward.payment.address,
            tx_name=f"{temp_template}_dereg_reward",
            tx_files=tx_files_deregister,
        )

        with cluster_manager.restart_on_failure():
            # check that the key deposit was returned
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_init_balance -
                tx_raw_deregister_output.fee + cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            # check that the reward address is no longer delegated
            assert not cluster.get_stake_addr_info(
                pool_reward.stake.address), "Stake address still delegated"

            cluster.wait_for_new_epoch(3)

            # check that pool owner is NOT receiving rewards
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance == 0
                    ), "Pool owner received unexpected rewards"

            # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc.
            # are deducted
            clusterlib_utils.fund_from_faucet(
                pool_owner,
                cluster_obj=cluster,
                faucet_data=cluster_manager.cache.addrs_data["user1"],
                amount=900_000_000,
                force=True,
            )

            # make sure we have enough time to finish pool deregistration in one epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=5,
                                                     stop=-40)

            src_dereg_balance = cluster.get_address_balance(
                pool_owner.payment.address)
            stake_acount_balance = cluster.get_stake_addr_info(
                pool_owner.stake.address).reward_account_balance
            reward_acount_balance = cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance

            node_cold = pool_rec["cold_key_pair"]
            pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)

            # deregister stake pool
            depoch = cluster.get_epoch() + 1
            __, tx_raw_output = cluster.deregister_stake_pool(
                pool_owners=[pool_owner],
                cold_key_pair=node_cold,
                epoch=depoch,
                pool_name=pool_name,
                tx_name=temp_template,
            )
            assert cluster.get_pool_params(pool_id).retiring == depoch

            # check that the pool was deregistered
            cluster.wait_for_new_epoch()
            assert not cluster.get_pool_params(
                pool_id
            ).pool_params, f"The pool {pool_id} was not deregistered"

            # check command kes-period-info case: de-register pool
            # TODO: the query is currently broken
            kes_query_currently_broken = False
            try:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
            except clusterlib.CLIError as err:
                if "currentlyBroken" not in str(err):
                    raise
                kes_query_currently_broken = True

            if not kes_query_currently_broken:
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # check that the balance for source address was correctly updated
            assert src_dereg_balance - tx_raw_output.fee == cluster.get_address_balance(
                pool_owner.payment.address)

            # check that the pool deposit was NOT returned to reward or stake address
            assert (cluster.get_stake_addr_info(
                pool_owner.stake.address).reward_account_balance ==
                    stake_acount_balance)
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance ==
                    reward_acount_balance)

            # Return the pool to the original state - reregister the pool, register
            # the reward address, delegate the stake address to the pool.

            src_updated_balance = cluster.get_address_balance(
                pool_reward.payment.address)

            # reregister the pool by resubmitting the pool registration certificate,
            # delegate stake address to pool again, reregister reward address
            tx_files = clusterlib.TxFiles(
                certificate_files=[
                    pool_rec["reward_addr_registration_cert"],
                    pool_rec["pool_registration_cert"],
                    pool_rec["stake_addr_delegation_cert"],
                ],
                signing_key_files=[
                    pool_rec["payment"].skey_file,
                    pool_rec["stake"].skey_file,
                    pool_rec["reward"].skey_file,
                    node_cold.skey_file,
                ],
            )
            tx_raw_output = cluster.send_tx(
                src_address=pool_reward.payment.address,
                tx_name=f"{temp_template}_rereg_pool",
                tx_files=tx_files,
            )

            # check command kes-period-info case: re-register pool, check without
            # waiting to take effect
            if not kes_query_currently_broken:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # check that the balance for source address was correctly updated and that the
            # pool deposit was needed
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_updated_balance -
                tx_raw_output.fee - cluster.get_pool_deposit() -
                cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            LOGGER.info(
                "Waiting up to 5 epochs for stake pool to be reregistered.")
            for __ in range(5):
                cluster.wait_for_new_epoch(padding_seconds=10)
                if pool_id in cluster.get_stake_distribution():
                    break
            else:
                raise AssertionError(
                    f"Stake pool `{pool_id}` not registered even after 5 epochs."
                )

            # check command kes-period-info case: re-register pool
            if not kes_query_currently_broken:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # wait before checking delegation and rewards
            cluster.wait_for_new_epoch(3, padding_seconds=30)

            # check that the stake address was delegated
            stake_addr_info = cluster.get_stake_addr_info(
                pool_owner.stake.address)
            assert (
                stake_addr_info.delegation
            ), f"Stake address was not delegated yet: {stake_addr_info}"

            assert pool_id == stake_addr_info.delegation, "Stake address delegated to wrong pool"

            # check that pool owner is receiving rewards
            assert cluster.get_stake_addr_info(
                pool_reward.stake.address
            ).reward_account_balance, "New reward was not received by pool reward address"

        # check that pledge is still met after the owner address was used to pay for Txs
        pool_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id)
        owner_payment_balance = cluster.get_address_balance(
            pool_owner.payment.address)
        assert (owner_payment_balance >= pool_data.pool_pledge
                ), f"Pledge is not met for pool '{pool_name}'!"