예제 #1
0
def _wait_epoch_chores(cluster_obj: clusterlib.ClusterLib, temp_template: str, this_epoch: int):
    if cluster_obj.get_epoch() == this_epoch:
        LOGGER.info(f"{datetime.datetime.now()}: Waiting for next epoch.")
        cluster_obj.wait_for_new_epoch()

    LOGGER.info(f"{datetime.datetime.now()}: Waiting for the end of current epoch.")
    clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster_obj, start=-19, stop=-15)

    # save ledger state
    clusterlib_utils.save_ledger_state(
        cluster_obj=cluster_obj,
        state_name=f"{temp_template}_{cluster_obj.get_epoch()}",
    )
예제 #2
0
        def _wait_epoch_chores(this_epoch: int):
            # wait for next epoch
            if cluster.get_epoch() == this_epoch:
                cluster.wait_for_new_epoch()

            # wait for the end of the epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=-19,
                                                     stop=-9)

            # save ledger state
            clusterlib_utils.save_ledger_state(
                cluster_obj=cluster,
                state_name=f"{temp_template}_{cluster.get_epoch()}",
            )
예제 #3
0
    def test_unstable_stake_distribution(
        self,
        skip_leadership_schedule: None,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
    ):
        """Try to query leadership schedule for next epoch when stake distribution is unstable.

        Expect failure.
        """
        # pylint: disable=unused-argument
        common.get_test_id(cluster)

        pool_name = cluster_management.Resources.POOL3
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        # wait for epoch interval where stake distribution for next epoch is unstable,
        # that is anytime before last 300 slots of current epoch
        clusterlib_utils.wait_for_epoch_interval(
            cluster_obj=cluster,
            start=5,
            stop=-int(300 * cluster.slot_length + 5),
        )

        # it should NOT be possible to query leadership schedule
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.get_leadership_schedule(
                vrf_skey_file=pool_rec["vrf_key_pair"].skey_file,
                cold_vkey_file=pool_rec["cold_key_pair"].vkey_file,
                for_next=True,
            )
        err_str = str(excinfo.value)

        # TODO: the query is currently broken
        if "currently broken" in err_str:
            pytest.xfail("`query leadership-schedule` is currently broken")
        if "PastHorizon" in err_str:
            pytest.xfail(
                "`query leadership-schedule` is affected by cardano-node issue 4002"
            )

        assert "current stake distribution is currently unstable" in err_str, err_str
예제 #4
0
    def test_delegate_using_vkey(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_use_pool1: clusterlib.ClusterLib,
        use_build_cmd: bool,
    ):
        """Submit registration certificate and delegate to pool using cold vkey.

        * register stake address and delegate it to pool
        * check that the stake address was delegated
        * (optional) check records in db-sync
        """
        pool_name = cluster_management.Resources.POOL1
        cluster = cluster_use_pool1
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        node_cold = cluster_manager.cache.addrs_data[pool_name][
            "cold_key_pair"]
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            cold_vkey=node_cold.vkey_file,
            use_build_cmd=use_build_cmd,
        )

        tx_db_record = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_record,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )
예제 #5
0
    def test_delegate_using_pool_id(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
        use_build_cmd: bool,
    ):
        """Submit registration certificate and delegate to pool using pool id.

        * register stake address and delegate it to pool
        * check that the stake address was delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
            use_build_cmd=use_build_cmd,
        )

        tx_db_record = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_record,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )
예제 #6
0
    def test_update_valid_opcert(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Update a valid operational certificate with another valid operational certificate.

        * generate new operational certificate with valid `--kes-period`
        * restart the node with the new operational certificate
        * check that the pool is still producing blocks
        """
        pool_name = "node-pool2"
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = helpers.get_func_name()
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file = pool_rec["pool_operational_cert"]

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with valid `--kes-period`
            new_opcert_file = cluster.gen_node_operational_cert(
                node_name=node_name,
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )

            # restart the node with the new operational certificate
            logfiles.add_ignore_rule("*.stdout", "MuxBearerClosed")
            shutil.copy(new_opcert_file, opcert_file)
            cluster_nodes.restart_node(node_name)

            LOGGER.info("Checking blocks production for 5 epochs.")
            blocks_made_db = []
            this_epoch = -1
            updated_epoch = cluster.get_epoch()
            for __ in range(5):
                # wait for next epoch
                if cluster.get_epoch() == this_epoch:
                    cluster.wait_for_new_epoch()

                # wait for the end of the epoch
                clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                         start=-19,
                                                         stop=-9)
                this_epoch = cluster.get_epoch()

                ledger_state = clusterlib_utils.get_ledger_state(
                    cluster_obj=cluster)

                # save ledger state
                clusterlib_utils.save_ledger_state(
                    cluster_obj=cluster,
                    state_name=f"{temp_template}_{this_epoch}",
                    ledger_state=ledger_state,
                )

                # check that the pool is still producing blocks
                blocks_made = ledger_state["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(blocks_made_db), (
                f"The pool '{pool_name}' has not produced any blocks "
                f"since epoch {updated_epoch}")
예제 #7
0
    def test_stake_snapshot(self, cluster: clusterlib.ClusterLib):  # noqa: C901
        """Test the `stake-snapshot` and `ledger-state` commands and ledger state values."""
        # pylint: disable=too-many-statements,too-many-locals,too-many-branches
        temp_template = common.get_test_id(cluster)

        # make sure the queries can be finished in single epoch
        stop = (
            20 if cluster_nodes.get_cluster_type().type == cluster_nodes.ClusterType.LOCAL else 200
        )
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-stop)

        stake_pool_ids = cluster.get_stake_pools()
        if not stake_pool_ids:
            pytest.skip("No stake pools are available.")
        if len(stake_pool_ids) > 200:
            pytest.skip("Skipping on this testnet, there's too many pools.")

        ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)
        clusterlib_utils.save_ledger_state(
            cluster_obj=cluster,
            state_name=temp_template,
            ledger_state=ledger_state,
        )
        es_snapshot: dict = ledger_state["stateBefore"]["esSnapshots"]

        def _get_hashes(snapshot: str) -> Dict[str, int]:
            hashes: Dict[str, int] = {}
            for r in es_snapshot[snapshot]["stake"]:
                r_hash_rec = r[0]
                r_hash = r_hash_rec.get("script hash") or r_hash_rec.get("key hash")
                if r_hash in hashes:
                    hashes[r_hash] += r[1]
                else:
                    hashes[r_hash] = r[1]
            return hashes

        def _get_delegations(snapshot: str) -> Dict[str, List[str]]:
            delegations: Dict[str, List[str]] = {}
            for r in es_snapshot[snapshot]["delegations"]:
                r_hash_rec = r[0]
                r_hash = r_hash_rec.get("script hash") or r_hash_rec.get("key hash")
                r_pool_id = r[1]
                if r_pool_id in delegations:
                    delegations[r_pool_id].append(r_hash)
                else:
                    delegations[r_pool_id] = [r_hash]
            return delegations

        errors = []

        ledger_state_keys = set(ledger_state)
        if ledger_state_keys != LEDGER_STATE_KEYS:
            errors.append(
                "unexpected ledger state keys: "
                f"{ledger_state_keys.difference(LEDGER_STATE_KEYS)} and "
                f"{LEDGER_STATE_KEYS.difference(ledger_state_keys)}"
            )

        # stake addresses (hashes) and corresponding amounts
        stake_mark = _get_hashes("pstakeMark")
        stake_set = _get_hashes("pstakeSet")
        stake_go = _get_hashes("pstakeGo")

        # pools (hashes) and stake addresses (hashes) delegated to corresponding pool
        delegations_mark = _get_delegations("pstakeMark")
        delegations_set = _get_delegations("pstakeSet")
        delegations_go = _get_delegations("pstakeGo")

        # all delegated stake addresses (hashes)
        delegated_hashes_mark = set(itertools.chain.from_iterable(delegations_mark.values()))
        delegated_hashes_set = set(itertools.chain.from_iterable(delegations_set.values()))
        delegated_hashes_go = set(itertools.chain.from_iterable(delegations_go.values()))

        # check if all delegated addresses are listed among stake addresses
        stake_hashes_mark = set(stake_mark)
        if not delegated_hashes_mark.issubset(stake_hashes_mark):
            errors.append(
                "for 'mark', some delegations are not listed in 'stake': "
                f"{delegated_hashes_mark.difference(stake_hashes_mark)}"
            )

        stake_hashes_set = set(stake_set)
        if not delegated_hashes_set.issubset(stake_hashes_set):
            errors.append(
                "for 'set', some delegations are not listed in 'stake': "
                f"{delegated_hashes_set.difference(stake_hashes_set)}"
            )

        stake_hashes_go = set(stake_go)
        if not delegated_hashes_go.issubset(stake_hashes_go):
            errors.append(
                "for 'go', some delegations are not listed in 'stake': "
                f"{delegated_hashes_go.difference(stake_hashes_go)}"
            )

        sum_mark = sum_set = sum_go = 0
        seen_hashes_mark: Set[str] = set()
        seen_hashes_set: Set[str] = set()
        seen_hashes_go: Set[str] = set()
        delegation_pool_ids = {*delegations_mark, *delegations_set, *delegations_go}
        for pool_id_dec in delegation_pool_ids:
            pool_id = helpers.encode_bech32(prefix="pool", data=pool_id_dec)

            # get stake info from ledger state
            pstake_hashes_mark = delegations_mark.get(pool_id_dec) or ()
            seen_hashes_mark.update(pstake_hashes_mark)
            pstake_amounts_mark = [stake_mark[h] for h in pstake_hashes_mark]
            pstake_sum_mark = functools.reduce(lambda x, y: x + y, pstake_amounts_mark, 0)

            pstake_hashes_set = delegations_set.get(pool_id_dec) or ()
            seen_hashes_set.update(pstake_hashes_set)
            pstake_amounts_set = [stake_set[h] for h in pstake_hashes_set]
            pstake_sum_set = functools.reduce(lambda x, y: x + y, pstake_amounts_set, 0)

            pstake_hashes_go = delegations_go.get(pool_id_dec) or ()
            seen_hashes_go.update(pstake_hashes_go)
            pstake_amounts_go = [stake_go[h] for h in pstake_hashes_go]
            pstake_sum_go = functools.reduce(lambda x, y: x + y, pstake_amounts_go, 0)

            # get stake info from `stake-snapshot` command
            stake_snapshot = cluster.get_stake_snapshot(stake_pool_id=pool_id)
            pstake_mark_cmd = stake_snapshot["poolStakeMark"]
            pstake_set_cmd = stake_snapshot["poolStakeSet"]
            pstake_go_cmd = stake_snapshot["poolStakeGo"]

            if pstake_sum_mark != pstake_mark_cmd:
                errors.append(f"pool: {pool_id}, mark:\n  {pstake_sum_mark} != {pstake_mark_cmd}")
            if pstake_sum_set != pstake_set_cmd:
                errors.append(f"pool: {pool_id}, set:\n  {pstake_sum_set} != {pstake_set_cmd}")
            if pstake_sum_go != pstake_go_cmd:
                errors.append(f"pool: {pool_id}, go:\n  {pstake_sum_go} != {pstake_go_cmd}")

            sum_mark += pstake_mark_cmd
            sum_set += pstake_set_cmd
            sum_go += pstake_go_cmd

        if seen_hashes_mark != delegated_hashes_mark:
            errors.append(
                "seen hashes and existing hashes differ for 'mark': "
                f"{seen_hashes_mark.difference(delegated_hashes_mark)} and "
                f"{delegated_hashes_mark.difference(seen_hashes_mark)}"
            )

        if seen_hashes_set != delegated_hashes_set:
            errors.append(
                "seen hashes and existing hashes differ for 'set': "
                f"{seen_hashes_set.difference(delegated_hashes_set)} and "
                f"{delegated_hashes_set.difference(seen_hashes_set)}"
            )

        if seen_hashes_go != delegated_hashes_go:
            errors.append(
                "seen hashes and existing hashes differ for 'go': "
                f"{seen_hashes_go.difference(delegated_hashes_go)} and "
                f"{delegated_hashes_go.difference(seen_hashes_go)}"
            )

        # active stake can be lower than sum of stakes, as some pools may not be running
        # and minting blocks
        if sum_mark < stake_snapshot["activeStakeMark"]:
            errors.append(f"active_mark: {sum_mark} < {stake_snapshot['activeStakeMark']}")
        if sum_set < stake_snapshot["activeStakeSet"]:
            errors.append(f"active_set: {sum_set} < {stake_snapshot['activeStakeSet']}")
        if sum_go < stake_snapshot["activeStakeGo"]:
            errors.append(f"active_go: {sum_go} < {stake_snapshot['activeStakeGo']}")

        if errors:
            err_joined = "\n".join(errors)
            pytest.fail(f"Errors:\n{err_joined}")
예제 #8
0
    def test_addr_delegation_deregistration(
        self,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
        pool_users: List[clusterlib.PoolUser],
        pool_users_disposable: List[clusterlib.PoolUser],
        use_build_cmd: bool,
    ):
        """Submit delegation and deregistration certificates in single TX.

        * create stake address registration cert
        * create stake address deregistration cert
        * register stake address
        * create stake address delegation cert
        * delegate and deregister stake address in single TX
        * check that the balance for source address was correctly updated and that the key
          deposit was returned
        * check that the stake address was NOT delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = f"{common.get_test_id(cluster)}_{use_build_cmd}"

        user_registered = pool_users_disposable[0]
        user_payment = pool_users[0].payment
        src_init_balance = cluster.get_address_balance(user_payment.address)

        # create stake address registration cert
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # create stake address deregistration cert
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # register stake address
        tx_files = clusterlib.TxFiles(
            certificate_files=[stake_addr_reg_cert_file],
            signing_key_files=[user_payment.skey_file],
        )
        tx_raw_output_reg = cluster.send_tx(
            src_address=user_payment.address,
            tx_name=f"{temp_template}_reg",
            tx_files=tx_files,
        )

        tx_db_reg = dbsync_utils.check_tx(cluster_obj=cluster,
                                          tx_raw_output=tx_raw_output_reg)
        if tx_db_reg:
            assert user_registered.stake.address in tx_db_reg.stake_registration

        # check that the balance for source address was correctly updated
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_init_balance -
            tx_raw_output_reg.fee - cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{user_payment.address}`"

        src_registered_balance = cluster.get_address_balance(
            user_payment.address)

        # create stake address delegation cert
        stake_addr_deleg_cert_file = cluster.gen_stake_addr_delegation_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file,
            stake_pool_id=pool_id,
        )

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # delegate and deregister stake address in single TX
        tx_files = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_deleg_cert_file, stake_addr_dereg_cert
            ],
            signing_key_files=[
                user_payment.skey_file, user_registered.stake.skey_file
            ],
        )

        if use_build_cmd:
            tx_raw_output_deleg = cluster.build_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_deleg_dereg",
                tx_files=tx_files,
                fee_buffer=2_000_000,
                witness_override=len(tx_files.signing_key_files),
            )
            tx_signed = cluster.sign_tx(
                tx_body_file=tx_raw_output_deleg.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=f"{temp_template}_deleg_dereg",
            )
            cluster.submit_tx(tx_file=tx_signed,
                              txins=tx_raw_output_deleg.txins)
        else:
            tx_raw_output_deleg = cluster.send_tx(
                src_address=user_payment.address,
                tx_name=f"{temp_template}_deleg_dereg",
                tx_files=tx_files,
            )

        # check that the balance for source address was correctly updated and that the key
        # deposit was returned
        assert (
            cluster.get_address_balance(
                user_payment.address) == src_registered_balance -
            tx_raw_output_deleg.fee + cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{user_payment.address}`"

        # check that the stake address was NOT delegated
        stake_addr_info = cluster.get_stake_addr_info(
            user_registered.stake.address)
        assert not stake_addr_info.delegation, f"Stake address was delegated: {stake_addr_info}"

        tx_db_deleg = dbsync_utils.check_tx(cluster_obj=cluster,
                                            tx_raw_output=tx_raw_output_deleg)
        if tx_db_deleg:
            assert user_registered.stake.address in tx_db_deleg.stake_deregistration
            assert user_registered.stake.address == tx_db_deleg.stake_delegation[
                0].address
            assert tx_db_deleg.stake_delegation[
                0].active_epoch_no == init_epoch + 2
            assert pool_id == tx_db_deleg.stake_delegation[0].pool_id
예제 #9
0
    def test_undelegate(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Undelegate stake address.

        * submit registration certificate and delegate to pool
        * wait for first reward
        * undelegate stake address:

           - withdraw rewards to payment address
           - deregister stake address
           - re-register stake address

        * check that the key deposit was not returned
        * check that rewards were withdrawn
        * check that the stake address is still registered
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        # check records in db-sync
        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering / re-registering stake address
        stake_addr_dereg_cert_file = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_undeleg = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_dereg_cert_file, stake_addr_reg_cert_file
            ],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address; deregister and re-register stake address
        tx_raw_undeleg = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_undeleg_withdraw",
            tx_files=tx_files_undeleg,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was NOT returned and rewards were withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_undeleg.fee + reward_balance
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert stake_addr_info.address, f"Reward address is not registered: {stake_addr_info}"
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        this_epoch = cluster.wait_for_new_epoch(padding_seconds=20)
        assert cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address
        ).reward_account_balance, "No reward was received next epoch after undelegation"

        # check `transaction view` command
        tx_view.check_tx_view(cluster_obj=cluster,
                              tx_raw_output=tx_raw_undeleg)

        # check records in db-sync
        tx_db_undeleg = dbsync_utils.check_tx(cluster_obj=cluster,
                                              tx_raw_output=tx_raw_undeleg)
        if tx_db_undeleg:
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_deregistration
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_registration

            db_rewards = dbsync_utils.check_address_reward(
                address=delegation_out.pool_user.stake.address,
                epoch_from=init_epoch)
            assert db_rewards
            db_reward_epochs = sorted(r.spendable_epoch
                                      for r in db_rewards.rewards)
            assert db_reward_epochs[0] == init_epoch + 4
            assert this_epoch in db_reward_epochs
예제 #10
0
    def test_deregister(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Deregister stake address.

        * create two payment addresses that share single stake address
        * register and delegate the stake address to pool
        * attempt to deregister the stake address - deregistration is expected to fail
          because there are rewards in the stake address
        * withdraw rewards to payment address and deregister stake address
        * check that the key deposit was returned and rewards withdrawn
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        # create two payment addresses that share single stake address (just to test that
        # delegation works as expected even under such circumstances)
        stake_addr_rec = clusterlib_utils.create_stake_addr_records(
            f"{temp_template}_addr0", cluster_obj=cluster)[0]
        payment_addr_recs = clusterlib_utils.create_payment_addr_records(
            f"{temp_template}_addr0",
            f"{temp_template}_addr1",
            cluster_obj=cluster,
            stake_vkey_file=stake_addr_rec.vkey_file,
        )

        # fund payment address
        clusterlib_utils.fund_from_faucet(
            *payment_addr_recs,
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        pool_user = clusterlib.PoolUser(payment=payment_addr_recs[1],
                                        stake=stake_addr_rec)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_user=pool_user,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        if tx_db_deleg:
            # check in db-sync that both payment addresses share single stake address
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[0].address).stake_address ==
                    stake_addr_rec.address)
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[1].address).stake_address ==
                    stake_addr_rec.address)

        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering stake address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        # attempt to deregister the stake address - deregistration is expected to fail
        # because there are rewards in the stake address
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.send_tx(
                src_address=src_address,
                tx_name=f"{temp_template}_dereg_fail",
                tx_files=tx_files_deregister,
            )
        assert "StakeKeyNonZeroAccountBalanceDELEG" in str(excinfo.value)

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address, deregister stake address
        tx_raw_deregister_output = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_dereg_withdraw",
            tx_files=tx_files_deregister,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was returned and rewards withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_deregister_output.fee + reward_balance +
            cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        tx_db_dereg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=tx_raw_deregister_output)
        if tx_db_dereg:
            assert delegation_out.pool_user.stake.address in tx_db_dereg.stake_deregistration
            assert (
                cluster.get_address_balance(src_address) ==
                dbsync_utils.get_utxo(address=src_address).amount_sum
            ), f"Unexpected balance for source address `{src_address}` in db-sync"
예제 #11
0
    def test_update_valid_opcert(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Update a valid operational certificate with another valid operational certificate.

        * generate new operational certificate with valid `--kes-period`
        * copy new operational certificate to the node
        * stop the node so the corresponding pool is not minting new blocks
        * check `kes-period-info` while the pool is not minting blocks
        * start the node with the new operational certificate
        * check that the pool is minting blocks again
        * check that metrics reported by `kes-period-info` got updated once the pool started
          minting blocks again
        * check `kes-period-info` with the old (replaced) operational certificate
        """
        # pylint: disable=too-many-statements
        pool_name = cluster_management.Resources.POOL2
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = common.get_test_id(cluster)
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file = pool_rec["pool_operational_cert"]
        opcert_file_old = shutil.copy(opcert_file, f"{opcert_file}_old")

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with valid `--kes-period`
            new_opcert_file = cluster.gen_node_operational_cert(
                node_name=f"{node_name}_new_opcert_file",
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )

            # copy new operational certificate to the node
            logfiles.add_ignore_rule(
                files_glob="*.stdout",
                regex="MuxBearerClosed",
                ignore_file_id=cluster_manager.worker_id,
            )
            shutil.copy(new_opcert_file, opcert_file)

            # stop the node so the corresponding pool is not minting new blocks
            cluster_nodes.stop_nodes([node_name])

            time.sleep(10)

            # check kes-period-info while the pool is not minting blocks
            # TODO: the query is currently broken
            kes_query_currently_broken = False
            try:
                kes_period_info_new = cluster.get_kes_period_info(opcert_file)
            except clusterlib.CLIError as err:
                if "currentlyBroken" not in str(err):
                    raise
                kes_query_currently_broken = True

            if not kes_query_currently_broken:
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_new, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                kes_period_info_old = cluster.get_kes_period_info(opcert_file_old)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_old, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                assert (
                    kes_period_info_new["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                    == kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                )

            # start the node with the new operational certificate
            cluster_nodes.start_nodes([node_name])

            # make sure we are not at the very end of an epoch so we still have time for
            # the first block production check
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-18)

            LOGGER.info("Checking blocks production for 5 epochs.")
            blocks_made_db = []
            this_epoch = -1
            updated_epoch = cluster.get_epoch()
            for __ in range(5):
                # wait for next epoch
                if cluster.get_epoch() == this_epoch:
                    cluster.wait_for_new_epoch()

                # wait for the end of the epoch
                clusterlib_utils.wait_for_epoch_interval(
                    cluster_obj=cluster, start=-19, stop=-15, force_epoch=True
                )
                this_epoch = cluster.get_epoch()

                ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)

                # save ledger state
                clusterlib_utils.save_ledger_state(
                    cluster_obj=cluster,
                    state_name=f"{temp_template}_{this_epoch}",
                    ledger_state=ledger_state,
                )

                # check that the pool is minting blocks
                blocks_made = ledger_state["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(
                blocks_made_db
            ), f"The pool '{pool_name}' has not minted any blocks since epoch {updated_epoch}"

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            # check that metrics reported by kes-period-info got updated once the pool started
            # minting blocks again
            kes_period_info_updated = cluster.get_kes_period_info(opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_updated, expected_scenario=kes.KesScenarios.ALL_VALID
            )
            assert (
                kes_period_info_updated["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                != kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
            )

            # check kes-period-info with operational certificate with a wrong counter
            kes_period_info_invalid = cluster.get_kes_period_info(opcert_file_old)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_invalid,
                expected_scenario=kes.KesScenarios.INVALID_COUNTERS,
            )
예제 #12
0
    def test_oversaturated(  # noqa: C901
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pools: clusterlib.ClusterLib,
    ):
        """Check diminished rewards when stake pool is oversaturated.

        The stake pool continues to operate normally and those who delegate to that pool receive
        rewards, but the rewards are proportionally lower than those received from stake pool
        that is not oversaturated.

        * register and delegate stake address in "init epoch", for all available pools
        * in "init epoch" + 2, saturate all available pools (block distribution remains balanced
          among pools)
        * in "init epoch" + 3, oversaturate one pool
        * in "init epoch" + 5, for all available pools, withdraw rewards and transfer funds
          from delegated addresses so pools are no longer (over)saturated
        * while doing the steps above, collect rewards data for 9 epochs
        * compare proportionality of rewards in epochs where pools were non-saturated,
          saturated and oversaturated
        """
        # pylint: disable=too-many-statements,too-many-locals,too-many-branches
        epoch_saturate = 2
        epoch_oversaturate = 4
        epoch_withdrawal = 6

        cluster = cluster_lock_pools
        temp_template = common.get_test_id(cluster)
        initial_balance = 1_000_000_000

        faucet_rec = cluster_manager.cache.addrs_data["byron000"]
        pool_records: Dict[int, PoolRecord] = {}

        # make sure we have enough time to finish the delegation in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)
        init_epoch = cluster.get_epoch()

        # submit registration certificates and delegate to pools
        for idx, res in enumerate(
            [
                cluster_management.Resources.POOL1,
                cluster_management.Resources.POOL2,
                cluster_management.Resources.POOL3,
            ],
                start=1,
        ):
            pool_addrs_data = cluster_manager.cache.addrs_data[res]
            reward_addr = clusterlib.PoolUser(
                payment=pool_addrs_data["payment"],
                stake=pool_addrs_data["reward"])
            pool_id = delegation.get_pool_id(
                cluster_obj=cluster,
                addrs_data=cluster_manager.cache.addrs_data,
                pool_name=res,
            )
            pool_id_dec = helpers.decode_bech32(bech32=pool_id)

            delegation_out = delegation.delegate_stake_addr(
                cluster_obj=cluster,
                addrs_data=cluster_manager.cache.addrs_data,
                temp_template=f"{temp_template}_pool{idx}",
                pool_id=pool_id,
                amount=initial_balance,
            )

            pool_records[idx] = PoolRecord(
                name=res,
                id=pool_id,
                id_dec=pool_id_dec,
                reward_addr=reward_addr,
                delegation_out=delegation_out,
                user_rewards=[],
                owner_rewards=[],
                blocks_minted={},
                saturation_amounts={},
            )

        # record initial reward balance for each pool
        for pool_rec in pool_records.values():
            user_payment_balance = cluster.get_address_balance(
                pool_rec.delegation_out.pool_user.payment.address)
            owner_payment_balance = cluster.get_address_balance(
                pool_rec.reward_addr.payment.address)
            pool_rec.user_rewards.append(
                RewardRecord(
                    epoch_no=init_epoch,
                    reward_total=0,
                    reward_per_epoch=0,
                    stake_total=user_payment_balance,
                ))
            pool_rec.owner_rewards.append(
                RewardRecord(
                    epoch_no=init_epoch,
                    reward_total=cluster.get_stake_addr_info(
                        pool_rec.reward_addr.stake.address).
                    reward_account_balance,
                    reward_per_epoch=0,
                    stake_total=owner_payment_balance,
                ))

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        LOGGER.info("Checking rewards for 10 epochs.")
        for __ in range(10):
            # wait for new epoch
            if cluster.get_epoch(
            ) == pool_records[2].owner_rewards[-1].epoch_no:
                cluster.wait_for_new_epoch()

            # sleep till the end of epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=-50,
                                                     stop=-40,
                                                     force_epoch=True)
            this_epoch = cluster.get_epoch()

            ledger_state = clusterlib_utils.get_ledger_state(
                cluster_obj=cluster)
            clusterlib_utils.save_ledger_state(
                cluster_obj=cluster,
                state_name=f"{temp_template}_{this_epoch}",
                ledger_state=ledger_state,
            )

            for pool_rec in pool_records.values():
                # reward balance in previous epoch
                prev_user_reward = pool_rec.user_rewards[-1].reward_total
                prev_owner_reward = pool_rec.owner_rewards[-1].reward_total

                pool_rec.blocks_minted[this_epoch -
                                       1] = (ledger_state["blocksBefore"].get(
                                           pool_rec.id_dec) or 0)

                # current reward balance
                user_reward = cluster.get_stake_addr_info(
                    pool_rec.delegation_out.pool_user.stake.address
                ).reward_account_balance
                owner_reward = cluster.get_stake_addr_info(
                    pool_rec.reward_addr.stake.address).reward_account_balance

                # total reward amounts received this epoch
                owner_reward_epoch = owner_reward - prev_owner_reward
                # We cannot compare with previous rewards in epochs where
                # `this_epoch >= init_epoch + epoch_withdrawal`.
                # There's a withdrawal of rewards at the end of these epochs.
                if this_epoch > init_epoch + epoch_withdrawal:
                    user_reward_epoch = user_reward
                else:
                    user_reward_epoch = user_reward - prev_user_reward

                # store collected rewards info
                user_payment_balance = cluster.get_address_balance(
                    pool_rec.delegation_out.pool_user.payment.address)
                owner_payment_balance = cluster.get_address_balance(
                    pool_rec.reward_addr.payment.address)
                pool_rec.user_rewards.append(
                    RewardRecord(
                        epoch_no=this_epoch,
                        reward_total=user_reward,
                        reward_per_epoch=user_reward_epoch,
                        stake_total=user_payment_balance + user_reward,
                    ))
                pool_rec.owner_rewards.append(
                    RewardRecord(
                        epoch_no=this_epoch,
                        reward_total=owner_reward,
                        reward_per_epoch=owner_reward_epoch,
                        stake_total=owner_payment_balance,
                    ))

                pool_rec.saturation_amounts[
                    this_epoch] = _get_saturation_threshold(
                        cluster_obj=cluster,
                        ledger_state=ledger_state,
                        pool_id=pool_rec.id)

            # fund the delegated addresses - saturate all pools
            if this_epoch == init_epoch + epoch_saturate:
                clusterlib_utils.fund_from_faucet(
                    *[
                        p.delegation_out.pool_user.payment
                        for p in pool_records.values()
                    ],
                    cluster_obj=cluster,
                    faucet_data=faucet_rec,
                    amount=[
                        p.saturation_amounts[this_epoch] - 100_000_000_000
                        for p in pool_records.values()
                    ],
                    tx_name=f"{temp_template}_saturate_pools_ep{this_epoch}",
                    force=True,
                )

            with cluster_manager.restart_on_failure():
                # Fund the address delegated to "pool2" to oversaturate the pool.
                # New stake amount will be current (saturated) stake * 2.
                if this_epoch == init_epoch + epoch_oversaturate:
                    assert (pool_records[2].saturation_amounts[this_epoch] >
                            0), "Pool is already saturated"
                    current_stake = int(
                        cluster.get_stake_snapshot(
                            pool_records[2].id)["poolStakeMark"])
                    overstaturate_amount = current_stake * 2
                    saturation_threshold = pool_records[2].saturation_amounts[
                        this_epoch]
                    assert overstaturate_amount > saturation_threshold, (
                        f"{overstaturate_amount} Lovelace is not enough to oversature the pool "
                        f"({saturation_threshold} is needed)")
                    clusterlib_utils.fund_from_faucet(
                        pool_records[2].delegation_out.pool_user.payment,
                        cluster_obj=cluster,
                        faucet_data=faucet_rec,
                        amount=overstaturate_amount,
                        tx_name=f"{temp_template}_oversaturate_pool2",
                        force=True,
                    )

                # wait 4 epochs for first rewards
                if this_epoch >= init_epoch + 4:
                    assert (owner_reward > prev_owner_reward
                            ), "New reward was not received by pool owner"

                # transfer funds back to faucet so the pools are no longer (over)saturated
                # and staked amount is +- same as the `initial_balance`
                if this_epoch >= init_epoch + epoch_withdrawal:
                    _withdraw_rewards(
                        *[
                            p.delegation_out.pool_user
                            for p in pool_records.values()
                        ],
                        cluster_obj=cluster,
                        tx_name=f"{temp_template}_ep{this_epoch}",
                    )

                    return_to_addrs = []
                    return_amounts = []
                    for idx, pool_rec in pool_records.items():
                        deleg_payment_balance = cluster.get_address_balance(
                            pool_rec.delegation_out.pool_user.payment.address)
                        if deleg_payment_balance > initial_balance + 10_000_000:
                            return_to_addrs.append(
                                pool_rec.delegation_out.pool_user.payment)
                            return_amounts.append(deleg_payment_balance -
                                                  initial_balance)

                    clusterlib_utils.return_funds_to_faucet(
                        *return_to_addrs,
                        cluster_obj=cluster,
                        faucet_addr=faucet_rec["payment"].address,
                        amount=return_amounts,
                        tx_name=f"{temp_template}_ep{this_epoch}",
                    )

                    for return_addr in return_to_addrs:
                        deleg_payment_balance = cluster.get_address_balance(
                            return_addr.address)
                        assert (
                            deleg_payment_balance <= initial_balance
                        ), "Unexpected funds in payment address '{return_addr}'"

                assert (
                    cluster.get_epoch() == this_epoch
                ), "Failed to finish actions in single epoch, it would affect other checks"

        pool1_user_rewards_per_block = _get_reward_per_block(pool_records[1])
        pool2_user_rewards_per_block = _get_reward_per_block(pool_records[2])
        pool3_user_rewards_per_block = _get_reward_per_block(pool_records[3])

        pool1_owner_rewards_per_block = _get_reward_per_block(
            pool_records[1], owner_rewards=True)
        pool2_owner_rewards_per_block = _get_reward_per_block(
            pool_records[2], owner_rewards=True)
        pool3_owner_rewards_per_block = _get_reward_per_block(
            pool_records[3], owner_rewards=True)

        oversaturated_epoch = max(
            e for e, r in pool_records[2].saturation_amounts.items() if r < 0)
        saturated_epoch = oversaturated_epoch - 2
        nonsaturated_epoch = oversaturated_epoch - 4

        try:
            # check that rewards per block per stake for "pool2" in the epoch where the pool is
            # oversaturated is lower than in epochs where pools are not oversaturated
            assert (pool1_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool2_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool3_user_rewards_per_block[nonsaturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])

            assert (pool1_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool2_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])
            assert (pool3_user_rewards_per_block[saturated_epoch] >
                    pool2_user_rewards_per_block[oversaturated_epoch])

            # check that oversaturated pool doesn't lead to increased rewards for pool owner
            # when compared to saturated pool, i.e. total pool margin amount is not increased
            pool1_rew_fraction_sat = pool1_owner_rewards_per_block[
                saturated_epoch]
            pool2_rew_fraction_sat = pool2_owner_rewards_per_block[
                saturated_epoch]
            pool3_rew_fraction_sat = pool3_owner_rewards_per_block[
                saturated_epoch]

            pool2_rew_fraction_over = pool2_owner_rewards_per_block[
                oversaturated_epoch]

            assert pool2_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool2_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )
            assert pool1_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool1_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )
            assert pool3_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval(
                pool3_rew_fraction_sat,
                pool2_rew_fraction_over,
                frac=0.4,
            )

            # Compare rewards in last (non-saturated) epoch to rewards in next-to-last
            # (saturated / over-saturated) epoch.
            # This way check that staked amount for each pool was restored to `initial_balance`
            # and that rewards correspond to the restored amounts.
            for pool_rec in pool_records.values():
                assert (pool_rec.user_rewards[-1].reward_per_epoch * 100 <
                        pool_rec.user_rewards[-2].reward_per_epoch)
        except Exception:
            # save debugging data in case of test failure
            with open(f"{temp_template}_pool_records.pickle",
                      "wb") as out_data:
                pickle.dump(pool_records, out_data)
            raise
예제 #13
0
    def test_pool_blocks(
        self,
        skip_leadership_schedule: None,
        cluster_manager: cluster_management.ClusterManager,
        cluster_use_pool3: clusterlib.ClusterLib,
        for_epoch: str,
    ):
        """Check that blocks were minted according to leadership schedule.

        * query leadership schedule for selected pool for current epoch or next epoch
        * wait for epoch that comes after the queried epoch
        * get info about minted blocks in queried epoch for the selected pool
        * compare leadership schedule with blocks that were actually minted
        * compare db-sync records with ledger state dump
        """
        # pylint: disable=unused-argument
        cluster = cluster_use_pool3
        temp_template = common.get_test_id(cluster)

        pool_name = cluster_management.Resources.POOL3
        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_id = cluster.get_stake_pool_id(
            pool_rec["cold_key_pair"].vkey_file)

        if for_epoch == "current":
            # wait for beginning of an epoch
            queried_epoch = cluster.wait_for_new_epoch(padding_seconds=5)
        else:
            # wait for stable stake distribution for next epoch, that is last 300 slots of
            # current epoch
            clusterlib_utils.wait_for_epoch_interval(
                cluster_obj=cluster,
                start=-int(300 * cluster.slot_length),
                stop=-10,
                check_slot=True,
            )
            queried_epoch = cluster.get_epoch() + 1

        # query leadership schedule for selected pool
        # TODO: the query is currently broken
        query_currently_broken = False
        try:
            leadership_schedule = cluster.get_leadership_schedule(
                vrf_skey_file=pool_rec["vrf_key_pair"].skey_file,
                cold_vkey_file=pool_rec["cold_key_pair"].vkey_file,
                for_next=for_epoch != "current",
            )
        except clusterlib.CLIError as err:
            if "currently broken" not in str(err):
                raise
            query_currently_broken = True
        if query_currently_broken:
            pytest.xfail("`query leadership-schedule` is currently broken")

        # wait for epoch that comes after the queried epoch
        cluster.wait_for_new_epoch(
            new_epochs=1 if for_epoch == "current" else 2)

        # get info about minted blocks in queried epoch for the selected pool
        minted_blocks = list(
            dbsync_queries.query_blocks(pool_id_bech32=pool_id,
                                        epoch_from=queried_epoch,
                                        epoch_to=queried_epoch))
        slots_when_minted = {r.slot_no for r in minted_blocks}

        errors: List[str] = []

        # compare leadership schedule with blocks that were actually minted
        slots_when_scheduled = {r.slot_no for r in leadership_schedule}

        difference_scheduled = slots_when_minted.difference(
            slots_when_scheduled)
        if difference_scheduled:
            errors.append(
                f"Some blocks were minted in other slots than scheduled: {difference_scheduled}"
            )

        difference_minted = slots_when_scheduled.difference(slots_when_minted)
        if len(difference_minted) > len(leadership_schedule) // 2:
            errors.append(f"Lot of slots missed: {difference_minted}")

        # compare db-sync records with ledger state dump
        ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)
        clusterlib_utils.save_ledger_state(
            cluster_obj=cluster,
            state_name=temp_template,
            ledger_state=ledger_state,
        )
        blocks_before: Dict[str, int] = ledger_state["blocksBefore"]
        pool_id_dec = helpers.decode_bech32(pool_id)
        minted_blocks_ledger = blocks_before.get(pool_id_dec) or 0
        minted_blocks_db = len(slots_when_minted)
        if minted_blocks_ledger != minted_blocks_db:
            errors.append(
                "Numbers of minted blocks reported by ledger state and db-sync don't match: "
                f"{minted_blocks_ledger} vs {minted_blocks_db}")

        if errors:
            err_joined = "\n".join(errors)
            pytest.fail(f"Errors:\n{err_joined}")
예제 #14
0
    def test_deregister_reward_addr_retire_pool(  # noqa: C901
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pool2: clusterlib.ClusterLib,
    ):
        """Test deregistering reward address and retiring stake pool.

        The pool deposit is lost when reward address is deregistered before the pool is retired.

        * wait for first reward for the pool
        * withdraw pool rewards to payment address
        * deregister the pool reward address
        * check that the key deposit was returned
        * check that pool owner is NOT receiving rewards
        * deregister stake pool
        * check that the pool deposit was NOT returned to reward or stake address
        * return the pool to the original state - reregister the pool, register
          the reward address, delegate the stake address to the pool
        * check that pool deposit was needed
        * check that pool owner is receiving rewards
        """
        # pylint: disable=too-many-statements,too-many-locals
        __: Any  # mypy workaround
        pool_name = cluster_management.Resources.POOL2
        cluster = cluster_lock_pool2

        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_reward = clusterlib.PoolUser(payment=pool_rec["payment"],
                                          stake=pool_rec["reward"])
        pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"],
                                         stake=pool_rec["stake"])
        pool_opcert_file: Path = pool_rec["pool_operational_cert"]
        temp_template = common.get_test_id(cluster)

        LOGGER.info("Waiting up to 4 full epochs for first reward.")
        for i in range(5):
            if i > 0:
                cluster.wait_for_new_epoch(padding_seconds=10)
            if cluster.get_stake_addr_info(
                    pool_reward.stake.address).reward_account_balance:
                break
        else:
            pytest.skip(
                f"Pool '{pool_name}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish reward address deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # withdraw pool rewards to payment address
        cluster.withdraw_reward(
            stake_addr_record=pool_reward.stake,
            dst_addr_record=pool_reward.payment,
            tx_name=temp_template,
        )

        # deregister the pool reward address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=pool_reward.stake.vkey_file)
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                pool_reward.payment.skey_file, pool_reward.stake.skey_file
            ],
        )

        src_init_balance = cluster.get_address_balance(
            pool_reward.payment.address)

        tx_raw_deregister_output = cluster.send_tx(
            src_address=pool_reward.payment.address,
            tx_name=f"{temp_template}_dereg_reward",
            tx_files=tx_files_deregister,
        )

        with cluster_manager.restart_on_failure():
            # check that the key deposit was returned
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_init_balance -
                tx_raw_deregister_output.fee + cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            # check that the reward address is no longer delegated
            assert not cluster.get_stake_addr_info(
                pool_reward.stake.address), "Stake address still delegated"

            cluster.wait_for_new_epoch(3)

            # check that pool owner is NOT receiving rewards
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance == 0
                    ), "Pool owner received unexpected rewards"

            # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc.
            # are deducted
            clusterlib_utils.fund_from_faucet(
                pool_owner,
                cluster_obj=cluster,
                faucet_data=cluster_manager.cache.addrs_data["user1"],
                amount=900_000_000,
                force=True,
            )

            # make sure we have enough time to finish pool deregistration in one epoch
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                     start=5,
                                                     stop=-40)

            src_dereg_balance = cluster.get_address_balance(
                pool_owner.payment.address)
            stake_acount_balance = cluster.get_stake_addr_info(
                pool_owner.stake.address).reward_account_balance
            reward_acount_balance = cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance

            node_cold = pool_rec["cold_key_pair"]
            pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)

            # deregister stake pool
            depoch = cluster.get_epoch() + 1
            __, tx_raw_output = cluster.deregister_stake_pool(
                pool_owners=[pool_owner],
                cold_key_pair=node_cold,
                epoch=depoch,
                pool_name=pool_name,
                tx_name=temp_template,
            )
            assert cluster.get_pool_params(pool_id).retiring == depoch

            # check that the pool was deregistered
            cluster.wait_for_new_epoch()
            assert not cluster.get_pool_params(
                pool_id
            ).pool_params, f"The pool {pool_id} was not deregistered"

            # check command kes-period-info case: de-register pool
            # TODO: the query is currently broken
            kes_query_currently_broken = False
            try:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
            except clusterlib.CLIError as err:
                if "currentlyBroken" not in str(err):
                    raise
                kes_query_currently_broken = True

            if not kes_query_currently_broken:
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # check that the balance for source address was correctly updated
            assert src_dereg_balance - tx_raw_output.fee == cluster.get_address_balance(
                pool_owner.payment.address)

            # check that the pool deposit was NOT returned to reward or stake address
            assert (cluster.get_stake_addr_info(
                pool_owner.stake.address).reward_account_balance ==
                    stake_acount_balance)
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance ==
                    reward_acount_balance)

            # Return the pool to the original state - reregister the pool, register
            # the reward address, delegate the stake address to the pool.

            src_updated_balance = cluster.get_address_balance(
                pool_reward.payment.address)

            # reregister the pool by resubmitting the pool registration certificate,
            # delegate stake address to pool again, reregister reward address
            tx_files = clusterlib.TxFiles(
                certificate_files=[
                    pool_rec["reward_addr_registration_cert"],
                    pool_rec["pool_registration_cert"],
                    pool_rec["stake_addr_delegation_cert"],
                ],
                signing_key_files=[
                    pool_rec["payment"].skey_file,
                    pool_rec["stake"].skey_file,
                    pool_rec["reward"].skey_file,
                    node_cold.skey_file,
                ],
            )
            tx_raw_output = cluster.send_tx(
                src_address=pool_reward.payment.address,
                tx_name=f"{temp_template}_rereg_pool",
                tx_files=tx_files,
            )

            # check command kes-period-info case: re-register pool, check without
            # waiting to take effect
            if not kes_query_currently_broken:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # check that the balance for source address was correctly updated and that the
            # pool deposit was needed
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_updated_balance -
                tx_raw_output.fee - cluster.get_pool_deposit() -
                cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            LOGGER.info(
                "Waiting up to 5 epochs for stake pool to be reregistered.")
            for __ in range(5):
                cluster.wait_for_new_epoch(padding_seconds=10)
                if pool_id in cluster.get_stake_distribution():
                    break
            else:
                raise AssertionError(
                    f"Stake pool `{pool_id}` not registered even after 5 epochs."
                )

            # check command kes-period-info case: re-register pool
            if not kes_query_currently_broken:
                kes_period_info = cluster.get_kes_period_info(pool_opcert_file)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info,
                    expected_scenario=kes.KesScenarios.ALL_VALID)

            # wait before checking delegation and rewards
            cluster.wait_for_new_epoch(3, padding_seconds=30)

            # check that the stake address was delegated
            stake_addr_info = cluster.get_stake_addr_info(
                pool_owner.stake.address)
            assert (
                stake_addr_info.delegation
            ), f"Stake address was not delegated yet: {stake_addr_info}"

            assert pool_id == stake_addr_info.delegation, "Stake address delegated to wrong pool"

            # check that pool owner is receiving rewards
            assert cluster.get_stake_addr_info(
                pool_reward.stake.address
            ).reward_account_balance, "New reward was not received by pool reward address"

        # check that pledge is still met after the owner address was used to pay for Txs
        pool_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id)
        owner_payment_balance = cluster.get_address_balance(
            pool_owner.payment.address)
        assert (owner_payment_balance >= pool_data.pool_pledge
                ), f"Pledge is not met for pool '{pool_name}'!"
예제 #15
0
    def test_no_reward_deregistered_reward_addr(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pool2: clusterlib.ClusterLib,
    ):
        """Check that the reward address is not receiving rewards when deregistered.

        The stake pool continues to operate normally and those who delegate to that pool receive
        rewards.

        * delegate stake address
        * wait for first reward
        * withdraw pool rewards to payment address
        * deregister the pool reward address
        * check that the key deposit was returned
        * check that pool owner is NOT receiving rewards
        * check that new rewards are received by those delegating to the pool
        * return the pool to the original state - reregister reward address
        * check that pool owner is receiving rewards
        """
        pool_name = cluster_management.Resources.POOL2
        cluster = cluster_lock_pool2

        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_reward = clusterlib.PoolUser(payment=pool_rec["payment"],
                                          stake=pool_rec["reward"])
        temp_template = common.get_test_id(cluster)

        pool_id = delegation.get_pool_id(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=pool_name)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_name}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # withdraw pool rewards to payment address
        # use `transaction build` if possible
        if (VERSIONS.transaction_era >= VERSIONS.ALONZO
                and VERSIONS.transaction_era == VERSIONS.cluster_era):
            clusterlib_utils.withdraw_reward_w_build(
                cluster_obj=cluster,
                stake_addr_record=pool_reward.stake,
                dst_addr_record=pool_reward.payment,
                tx_name=temp_template,
            )
        else:
            cluster.withdraw_reward(
                stake_addr_record=pool_reward.stake,
                dst_addr_record=pool_reward.payment,
                tx_name=temp_template,
            )

        # deregister the pool reward address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=pool_reward.stake.vkey_file)
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                pool_reward.payment.skey_file, pool_reward.stake.skey_file
            ],
        )

        src_init_balance = cluster.get_address_balance(
            pool_reward.payment.address)

        tx_raw_deregister_output = cluster.send_tx(
            src_address=pool_reward.payment.address,
            tx_name=f"{temp_template}_dereg_reward",
            tx_files=tx_files_deregister,
        )

        with cluster_manager.restart_on_failure():
            # check that the key deposit was returned
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_init_balance -
                tx_raw_deregister_output.fee + cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            # check that the reward address is no longer delegated
            assert not cluster.get_stake_addr_info(
                pool_reward.stake.address), "Stake address still delegated"

            orig_user_reward = cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance

            cluster.wait_for_new_epoch(3)

            # check that pool owner is NOT receiving rewards
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance == 0
                    ), "Pool owner received unexpected rewards"

            # check that new rewards are received by those delegating to the pool
            assert (orig_user_reward < cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "New reward was not received by stake address"

            # Return the pool to the original state - reregister reward address.

            # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc.
            # are deducted
            clusterlib_utils.fund_from_faucet(
                pool_reward,
                cluster_obj=cluster,
                faucet_data=cluster_manager.cache.addrs_data["user1"],
                amount=900_000_000,
                force=True,
            )

            src_updated_balance = cluster.get_address_balance(
                pool_reward.payment.address)

            # reregister reward address
            tx_files = clusterlib.TxFiles(
                certificate_files=[
                    pool_rec["reward_addr_registration_cert"],
                ],
                signing_key_files=[
                    pool_reward.payment.skey_file, pool_reward.stake.skey_file
                ],
            )
            tx_raw_output = cluster.send_tx(
                src_address=pool_reward.payment.address,
                tx_name=f"{temp_template}_rereg_deleg",
                tx_files=tx_files,
            )

            # check that the balance for source address was correctly updated
            assert (
                cluster.get_address_balance(
                    pool_reward.payment.address) == src_updated_balance -
                tx_raw_output.fee - cluster.get_address_deposit()
            ), f"Incorrect balance for source address `{pool_reward.payment.address}`"

            cluster.wait_for_new_epoch(4, padding_seconds=30)

            # check that new rewards were received by those delegating to the pool
            assert (orig_user_reward < cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "New reward was not received by stake address"

            # check that pool owner is also receiving rewards
            assert (cluster.get_stake_addr_info(
                pool_reward.stake.address).reward_account_balance >
                    0), "New reward was not received by pool reward address"

        # check that pledge is still met after the owner address was used to pay for Txs
        pool_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id)
        owner_payment_balance = cluster.get_address_balance(
            pool_reward.payment.address)
        assert (owner_payment_balance >= pool_data.pool_pledge
                ), f"Pledge is not met for pool '{pool_name}'!"
예제 #16
0
    def test_no_reward_unmet_pledge1(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pool2: clusterlib.ClusterLib,
    ):
        """Check that the stake pool is not receiving rewards when pledge is not met.

        When the pledge is higher than available funds, neither pool owners nor those who
        delegate to that pool receive rewards.

        * delegate stake address
        * wait for first reward
        * increase the needed pledge amount - update the pool parameters by resubmitting the pool
          registration certificate - the funds are now lower than what is needed by the stake pool
        * check that NO new rewards were received by those delegating to the pool
        * check that pool owner is also NOT receiving rewards
        * return the pool to the original state - restore pledge settings
        * check that new rewards were received by those delegating to the pool
        * check that pool owner is also receiving rewards
        """
        pool_name = cluster_management.Resources.POOL2
        cluster = cluster_lock_pool2

        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"],
                                         stake=pool_rec["stake"])
        temp_template = common.get_test_id(cluster)

        pool_id = delegation.get_pool_id(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=pool_name)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_name}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish the pool update in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # load and update original pool data
        loaded_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster,
            pool_name=f"changed_{pool_name}",
            pool_id=pool_id)
        pool_data_updated = loaded_data._replace(
            pool_pledge=loaded_data.pool_pledge * 9)

        # increase the needed pledge amount - update the pool parameters by resubmitting the pool
        # registration certificate
        cluster.register_stake_pool(
            pool_data=pool_data_updated,
            pool_owners=[pool_owner],
            vrf_vkey_file=pool_rec["vrf_key_pair"].vkey_file,
            cold_key_pair=pool_rec["cold_key_pair"],
            tx_name=f"{temp_template}_update_param",
            reward_account_vkey_file=pool_rec["reward"].vkey_file,
            deposit=0,  # no additional deposit, the pool is already registered
        )

        cluster.wait_for_new_epoch(4, padding_seconds=30)

        orig_owner_reward = cluster.get_stake_addr_info(
            pool_rec["reward"].address).reward_account_balance
        orig_user_reward = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        cluster.wait_for_new_epoch(3)

        with cluster_manager.restart_on_failure():
            # check that NO new rewards were received by those delegating to the pool
            assert (orig_user_reward == cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "Received unexpected rewards"

            # check that pool owner is also NOT receiving rewards
            assert (orig_owner_reward == cluster.get_stake_addr_info(
                pool_rec["reward"].address).reward_account_balance
                    ), "Pool owner received unexpected rewards"

            # Return the pool to the original state - restore pledge settings.

            # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc.
            # are deducted
            clusterlib_utils.fund_from_faucet(
                pool_owner,
                cluster_obj=cluster,
                faucet_data=cluster_manager.cache.addrs_data["user1"],
                amount=900_000_000,
                force=True,
            )

            # update the pool to original parameters by resubmitting
            # the pool registration certificate
            cluster.register_stake_pool(
                pool_data=loaded_data,
                pool_owners=[pool_owner],
                vrf_vkey_file=pool_rec["vrf_key_pair"].vkey_file,
                cold_key_pair=pool_rec["cold_key_pair"],
                tx_name=f"{temp_template}_update_to_orig",
                reward_account_vkey_file=pool_rec["reward"].vkey_file,
                deposit=
                0,  # no additional deposit, the pool is already registered
            )

            cluster.wait_for_new_epoch(5, padding_seconds=30)

            # check that new rewards were received by those delegating to the pool
            assert (orig_user_reward < cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "New reward was not received by stake address"

            # check that pool owner is also receiving rewards
            assert (orig_owner_reward < cluster.get_stake_addr_info(
                pool_rec["reward"].address).reward_account_balance
                    ), "New reward was not received by pool reward address"

        # check that pledge is still met after the owner address was used to pay for Txs
        pool_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id)
        owner_payment_balance = cluster.get_address_balance(
            pool_owner.payment.address)
        assert (owner_payment_balance >= pool_data.pool_pledge
                ), f"Pledge is not met for pool '{pool_name}'!"
예제 #17
0
    def test_no_reward_unmet_pledge2(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_lock_pool2: clusterlib.ClusterLib,
    ):
        """Check that the stake pool is not receiving rewards when pledge is not met.

        When the pledge is higher than available funds, neither pool owners nor those who
        delegate to that pool receive rewards.

        * delegate stake address
        * wait for first reward
        * withdraw part of the pledge - the funds are lower than what is needed by the stake pool
        * check that NO new rewards were received by those delegating to the pool
        * check that pool owner is also NOT receiving rewards
        * return the pool to the original state - restore pledge funds
        * check that new rewards were received by those delegating to the pool
        * check that pool owner is also receiving rewards
        """
        pool_name = cluster_management.Resources.POOL2
        cluster = cluster_lock_pool2

        pool_rec = cluster_manager.cache.addrs_data[pool_name]
        pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"],
                                         stake=pool_rec["stake"])
        temp_template = common.get_test_id(cluster)

        pool_id = delegation.get_pool_id(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            pool_name=pool_name)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_name}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to withdraw the pledge in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # load pool data
        loaded_data = clusterlib_utils.load_registered_pool_data(
            cluster_obj=cluster,
            pool_name=f"changed_{pool_name}",
            pool_id=pool_id)

        pledge_amount = loaded_data.pool_pledge // 2

        # withdraw part of the pledge
        destinations = [
            clusterlib.TxOut(address=delegation_out.pool_user.payment.address,
                             amount=pledge_amount)
        ]
        tx_files = clusterlib.TxFiles(
            signing_key_files=[pool_owner.payment.skey_file])
        cluster.send_funds(
            src_address=pool_owner.payment.address,
            destinations=destinations,
            tx_name=f"{temp_template}_withdraw_pledge",
            tx_files=tx_files,
        )

        assert cluster.get_address_balance(
            pool_owner.payment.address
        ) < loaded_data.pool_pledge, (
            f"Pledge still high - pledge: {loaded_data.pool_pledge}, "
            f"funds: {cluster.get_address_balance(pool_owner.payment.address)}"
        )

        cluster.wait_for_new_epoch(4, padding_seconds=30)

        orig_owner_reward = cluster.get_stake_addr_info(
            pool_rec["reward"].address).reward_account_balance
        orig_user_reward = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        cluster.wait_for_new_epoch(3)

        with cluster_manager.restart_on_failure():
            # check that NO new rewards were received by those delegating to the pool
            assert (orig_user_reward == cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "Received unexpected rewards"

            # check that pool owner is also NOT receiving rewards
            assert (orig_owner_reward == cluster.get_stake_addr_info(
                pool_rec["reward"].address).reward_account_balance
                    ), "Pool owner received unexpected rewards"

            # Return the pool to the original state - restore pledge funds.

            # fund user address so it has enough funds for fees etc.
            clusterlib_utils.fund_from_faucet(
                delegation_out.pool_user,
                cluster_obj=cluster,
                faucet_data=cluster_manager.cache.addrs_data["user1"],
                amount=900_000_000,
                force=True,
            )

            # return pledge
            destinations = [
                clusterlib.TxOut(address=pool_owner.payment.address,
                                 amount=pledge_amount + 100_000_000)
            ]
            tx_files = clusterlib.TxFiles(
                signing_key_files=[delegation_out.pool_user.payment.skey_file])
            cluster.send_funds(
                src_address=delegation_out.pool_user.payment.address,
                destinations=destinations,
                tx_name=f"{temp_template}_return_pledge",
                tx_files=tx_files,
            )

            assert (
                cluster.get_address_balance(
                    pool_owner.payment.address) >= loaded_data.pool_pledge
            ), (f"Funds still low - pledge: {loaded_data.pool_pledge}, "
                f"funds: {cluster.get_address_balance(pool_owner.payment.address)}"
                )

            cluster.wait_for_new_epoch(5, padding_seconds=30)

            # check that new rewards were received by those delegating to the pool
            assert (orig_user_reward < cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance
                    ), "New reward was not received by stake address"

            # check that pool owner is also receiving rewards
            assert (orig_owner_reward < cluster.get_stake_addr_info(
                pool_rec["reward"].address).reward_account_balance
                    ), "New reward was not received by pool reward address"
예제 #18
0
    def test_delegate_deregister(
        self,
        cluster_lock_42stake: Tuple[clusterlib.ClusterLib, str],
        pool_user: delegation.PoolUserScript,
    ):
        """Delegate and deregister Plutus script stake address.

        * submit registration certificate and delegate stake address to pool
        * check that the stake address was delegated
        * withdraw rewards to payment address and deregister stake address
        * check that the key deposit was returned and rewards withdrawn
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_lock_42stake
        temp_template = common.get_test_id(cluster)

        collateral_fund_deleg = 1_500_000_000
        collateral_fund_withdraw = 1_500_000_000
        collateral_fund_dereg = 1_500_000_000
        deleg_fund = 1_500_000_000
        dereg_fund = 1_500_000_000

        if cluster.get_stake_addr_info(pool_user.stake.address):
            pytest.skip(
                f"The Plutus script stake address '{pool_user.stake.address}' is already "
                "registered, cannot continue.")

        # Step 1: create Tx inputs for step 2 and step 3
        txouts_step1 = [
            # for collateral
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=collateral_fund_deleg),
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=collateral_fund_withdraw),
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=collateral_fund_dereg),
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=collateral_fund_dereg),
            # for delegation
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=deleg_fund),
            # for deregistration
            clusterlib.TxOut(address=pool_user.payment.address,
                             amount=dereg_fund),
        ]

        tx_files_step1 = clusterlib.TxFiles(
            signing_key_files=[pool_user.payment.skey_file], )
        tx_output_step1 = cluster.build_tx(
            src_address=pool_user.payment.address,
            tx_name=f"{temp_template}_step1",
            tx_files=tx_files_step1,
            txouts=txouts_step1,
            fee_buffer=2_000_000,
            # don't join 'change' and 'collateral' txouts, we need separate UTxOs
            join_txouts=False,
        )
        tx_signed_step1 = cluster.sign_tx(
            tx_body_file=tx_output_step1.out_file,
            signing_key_files=tx_files_step1.signing_key_files,
            tx_name=f"{temp_template}_step1",
        )
        cluster.submit_tx(tx_file=tx_signed_step1, txins=tx_output_step1.txins)

        txid_step1 = cluster.get_txid(tx_body_file=tx_output_step1.out_file)
        collateral_deleg = cluster.get_utxo(txin=f"{txid_step1}#1")
        collateral_withdraw = cluster.get_utxo(txin=f"{txid_step1}#2")
        collateral_dereg = cluster.get_utxo(txin=f"{txid_step1}#3")
        deleg_utxos = cluster.get_utxo(txin=f"{txid_step1}#4")
        dereg_utxos = cluster.get_utxo(txin=f"{txid_step1}#5")

        # Step 2: register and delegate

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        tx_raw_delegation_out, plutus_cost_deleg = delegate_stake_addr(
            cluster_obj=cluster,
            temp_template=temp_template,
            txins=deleg_utxos,
            collaterals=collateral_deleg,
            pool_user=pool_user,
            pool_id=pool_id,
            redeemer_file=plutus_common.REDEEMER_42,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        tx_db_record = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=tx_raw_delegation_out)
        delegation.db_check_delegation(
            pool_user=pool_user,
            db_record=tx_db_record,
            deleg_epoch=init_epoch,
            pool_id=pool_id,
        )

        # Step 3: withdraw rewards and deregister

        reward_error = ""

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                pool_user.stake.address).reward_account_balance:
            reward_error = f"User of pool '{pool_id}' hasn't received any rewards."

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # submit deregistration certificate and withdraw rewards
        tx_raw_deregister_out = deregister_stake_addr(
            cluster_obj=cluster,
            temp_template=temp_template,
            txins=dereg_utxos,
            collaterals=[*collateral_withdraw, *collateral_dereg],
            pool_user=pool_user,
            redeemer_file=plutus_common.REDEEMER_42,
        )

        if reward_error:
            raise AssertionError(reward_error)

        # check tx_view of step 2 and step 3
        tx_view.check_tx_view(cluster_obj=cluster,
                              tx_raw_output=tx_raw_delegation_out)
        tx_view.check_tx_view(cluster_obj=cluster,
                              tx_raw_output=tx_raw_deregister_out)

        # compare cost of Plutus script with data from db-sync
        if tx_db_record:
            dbsync_utils.check_plutus_cost(
                redeemer_record=tx_db_record.redeemers[0],
                cost_record=plutus_cost_deleg[0])