Esempio n. 1
0
    def test_slot_length(self, cluster_slot_length: clusterlib.ClusterLib):
        """Test the *slotLength* configuration."""
        cluster = cluster_slot_length
        common.get_test_id(cluster)

        assert cluster.slot_length == 0.3
        assert cluster.epoch_length == 1_000
        check_epoch_length(cluster)
Esempio n. 2
0
    def test_whole_utxo(self, cluster: clusterlib.ClusterLib):
        """Check that it is possible to return the whole UTxO on local cluster."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")

        common.get_test_id(cluster)

        cluster.cli(
            [
                "query",
                "utxo",
                "--whole-utxo",
                *cluster.magic_args,
            ]
        )
Esempio n. 3
0
def pool_user(
    cluster_manager: cluster_management.ClusterManager,
    cluster_lock_42stake: Tuple[clusterlib.ClusterLib, str],
) -> delegation.PoolUserScript:
    """Create pool user."""
    cluster, *__ = cluster_lock_42stake
    test_id = common.get_test_id(cluster)

    script_stake_address = cluster.gen_stake_addr(
        addr_name=f"{test_id}_pool_user",
        stake_script_file=plutus_common.STAKE_GUESS_42_PLUTUS_V1,
    )
    payment_addr_rec = cluster.gen_payment_addr_and_keys(
        name=f"{test_id}_pool_user",
        stake_script_file=plutus_common.STAKE_GUESS_42_PLUTUS_V1,
    )
    pool_user = delegation.PoolUserScript(
        payment=payment_addr_rec,
        stake=delegation.AddressRecordScript(
            address=script_stake_address,
            script_file=plutus_common.STAKE_GUESS_42_PLUTUS_V1,
        ),
    )

    # fund source addresses
    clusterlib_utils.fund_from_faucet(
        payment_addr_rec,
        cluster_obj=cluster,
        faucet_data=cluster_manager.cache.addrs_data["user1"],
        amount=10_000_000_000,
    )

    return pool_user
    def test_lock_tx_invalid_datum(
        self,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
        datum_value: str,
    ):
        """Test locking a Tx output with an invalid datum.

        Expect failure.
        """
        temp_template = common.get_test_id(cluster)

        datum_file = f"{temp_template}.datum"
        with open(datum_file, "w", encoding="utf-8") as outfile:
            json.dump(f'{{"{datum_value}"}}', outfile)

        plutus_op = plutus_common.PlutusOp(
            script_file=plutus_common.ALWAYS_SUCCEEDS_PLUTUS_V2,
            datum_file=Path(datum_file),
            redeemer_cbor_file=plutus_common.REDEEMER_42_CBOR,
            execution_cost=plutus_common.ALWAYS_SUCCEEDS_COST,
        )

        # create a Tx output with an inline datum at the script address

        with pytest.raises(clusterlib.CLIError) as excinfo:
            _build_fund_script(
                temp_template=temp_template,
                cluster=cluster,
                payment_addr=payment_addrs[0],
                dst_addr=payment_addrs[1],
                plutus_op=plutus_op,
            )
        err_str = str(excinfo.value)
        assert "JSON object expected. Unexpected value" in err_str, err_str
    def test_lock_tx_big_datum(
        self,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
        datum_content: str,
    ):
        """Test locking a Tx output with a datum bigger than the allowed size.

        Expect failure.
        """
        hypothesis.assume(datum_content)
        temp_template = common.get_test_id(cluster)

        plutus_op = plutus_common.PlutusOp(
            script_file=plutus_common.ALWAYS_SUCCEEDS_PLUTUS_V2,
            datum_value=f'"{datum_content}"',
            redeemer_cbor_file=plutus_common.REDEEMER_42_CBOR,
            execution_cost=plutus_common.ALWAYS_SUCCEEDS_COST,
        )

        # create a Tx output with an inline datum at the script address

        with pytest.raises(clusterlib.CLIError) as excinfo:
            _build_fund_script(
                temp_template=temp_template,
                cluster=cluster,
                payment_addr=payment_addrs[0],
                dst_addr=payment_addrs[1],
                plutus_op=plutus_op,
            )
        err_str = str(excinfo.value)
        assert "Byte strings in script data must consist of at most 64 bytes" in err_str, err_str
    def test_register_addr_with_wrong_key(
        self,
        cluster: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
        pool_users_disposable: List[clusterlib.PoolUser],
    ):
        """Try to register stake address using wrong payment skey.

        Expect failure.
        """
        temp_template = common.get_test_id(cluster)

        user_registered = pool_users_disposable[0]
        user_payment = pool_users[0].payment

        # create stake address registration cert
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=user_registered.stake.vkey_file)

        # register stake address, use wrong payment skey
        tx_files = clusterlib.TxFiles(
            certificate_files=[stake_addr_reg_cert_file],
            signing_key_files=[pool_users[1].payment.skey_file],
        )

        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.send_tx(src_address=user_payment.address,
                            tx_name=temp_template,
                            tx_files=tx_files)
        assert "MissingVKeyWitnessesUTXOW" in str(excinfo.value)
Esempio n. 7
0
    def test_address_info_script(self, cluster: clusterlib.ClusterLib):
        """Check script address info."""
        temp_template = common.get_test_id(cluster)

        # create payment address
        payment_rec = cluster.gen_payment_addr_and_keys(
            name=temp_template,
        )

        # create multisig script
        multisig_script = cluster.build_multisig_script(
            script_name=temp_template,
            script_type_arg=clusterlib.MultiSigTypeArgs.ALL,
            payment_vkey_files=[payment_rec.vkey_file],
            slot=100,
            slot_type_arg=clusterlib.MultiSlotTypeArgs.AFTER,
        )

        # create script address
        address = cluster.gen_payment_addr(
            addr_name=temp_template, payment_script_file=multisig_script
        )

        addr_info = cluster.get_address_info(address=address)

        assert addr_info.address == address
        assert addr_info.era == "shelley"
        assert addr_info.encoding == "bech32"
        assert addr_info.type == "payment"
Esempio n. 8
0
    def test_protocol_state_keys(self, cluster: clusterlib.ClusterLib):
        """Check output of `query protocol-state`."""
        common.get_test_id(cluster)

        # TODO: the query is currently broken
        query_currently_broken = False
        try:
            protocol_state = cluster.get_protocol_state()
        except clusterlib.CLIError as err:
            if "currentlyBroken" not in str(err):
                raise
            query_currently_broken = True
        if query_currently_broken:
            pytest.xfail("`query protocol-state` is currently broken")

        assert tuple(sorted(protocol_state)) == PROTOCOL_STATE_KEYS
    def past_horizon_funds(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
    ) -> Tuple[List[clusterlib.UTXOData], List[clusterlib.UTXOData],
               clusterlib.TxRawOutput]:
        """Create UTxOs for `test_past_horizon`."""
        with cluster_manager.cache_fixture() as fixture_cache:
            if fixture_cache.value:
                return fixture_cache.value  # type: ignore

            temp_template = common.get_test_id(cluster)
            payment_addr = payment_addrs[0]
            issuer_addr = payment_addrs[1]

            script_fund = 200_000_000

            minting_cost = plutus_common.compute_cost(
                execution_cost=plutus_common.MINTING_WITNESS_REDEEMER_COST,
                protocol_params=cluster.get_protocol_params(),
            )
            mint_utxos, collateral_utxos, tx_raw_output = _fund_issuer(
                cluster_obj=cluster,
                temp_template=temp_template,
                payment_addr=payment_addr,
                issuer_addr=issuer_addr,
                minting_cost=minting_cost,
                amount=script_fund,
            )

            retval = mint_utxos, collateral_utxos, tx_raw_output
            fixture_cache.value = retval

        return retval
Esempio n. 10
0
    def test_protocol_mode(self, cluster: clusterlib.ClusterLib):
        """Check the default protocol mode - command works even without specifying protocol mode."""
        if cluster.protocol != clusterlib.Protocols.CARDANO:
            pytest.skip("runs on cluster in full cardano mode")

        common.get_test_id(cluster)

        cluster.cli(
            [
                "query",
                "utxo",
                "--address",
                "addr_test1vpst87uzwafqkxumyf446zr2jsyn44cfpu9fe8yqanyuh6glj2hkl",
                *cluster.magic_args,
            ]
        )
Esempio n. 11
0
    def test_no_kes_period_arg(
        self,
        cluster: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Try to generate new operational certificate without specifying the `--kes-period`.

        Expect failure.
        """
        pool_name = cluster_management.Resources.POOL2
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        temp_template = common.get_test_id(cluster)
        out_file = Path(f"{temp_template}_shouldnt_exist.opcert")

        # try to generate new operational certificate without specifying the `--kes-period`
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.cli(
                [
                    "node",
                    "issue-op-cert",
                    "--kes-verification-key-file",
                    str(pool_rec["kes_key_pair"].vkey_file),
                    "--cold-signing-key-file",
                    str(pool_rec["cold_key_pair"].skey_file),
                    "--operational-certificate-issue-counter",
                    str(pool_rec["cold_key_pair"].counter_file),
                    "--out-file",
                    str(out_file),
                ]
            )
        assert "Missing: --kes-period NATURAL" in str(excinfo.value)
        assert not out_file.exists(), "New operational certificate was generated"
Esempio n. 12
0
    def test_tx_view(self, cluster: clusterlib.ClusterLib):
        """Check that the output of `transaction view` is as expected."""
        common.get_test_id(cluster)

        tx_body = cluster.view_tx(tx_body_file=self.TX_BODY_FILE)
        tx = cluster.view_tx(tx_file=self.TX_FILE)

        if "payment credential key hash" in tx_body:
            with open(self.TX_BODY_OUT, encoding="utf-8") as infile:
                tx_body_view_out = infile.read()
            assert tx_body == tx_body_view_out.strip()

        if "witnesses:" in tx:
            with open(self.TX_OUT, encoding="utf-8") as infile:
                tx_view_out = infile.read()
            assert tx == tx_view_out.strip()
        else:
            assert tx == tx_body
Esempio n. 13
0
def pool_users_disposable(
    cluster: clusterlib.ClusterLib, ) -> List[clusterlib.PoolUser]:
    """Create function scoped pool users."""
    test_id = common.get_test_id(cluster)
    pool_users = clusterlib_utils.create_pool_users(
        cluster_obj=cluster,
        name_template=f"{test_id}_pool_user",
        no_of_addr=2,
    )
    return pool_users
Esempio n. 14
0
    def test_unstable_stake_distribution(
        self,
        skip_leadership_schedule: None,
        cluster_manager: cluster_management.ClusterManager,
        cluster: clusterlib.ClusterLib,
    ):
        """Try to query leadership schedule for next epoch when stake distribution is unstable.

        Expect failure.
        """
        # pylint: disable=unused-argument
        common.get_test_id(cluster)

        pool_name = cluster_management.Resources.POOL3
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        # wait for epoch interval where stake distribution for next epoch is unstable,
        # that is anytime before last 300 slots of current epoch
        clusterlib_utils.wait_for_epoch_interval(
            cluster_obj=cluster,
            start=5,
            stop=-int(300 * cluster.slot_length + 5),
        )

        # it should NOT be possible to query leadership schedule
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.get_leadership_schedule(
                vrf_skey_file=pool_rec["vrf_key_pair"].skey_file,
                cold_vkey_file=pool_rec["cold_key_pair"].vkey_file,
                for_next=True,
            )
        err_str = str(excinfo.value)

        # TODO: the query is currently broken
        if "currently broken" in err_str:
            pytest.xfail("`query leadership-schedule` is currently broken")
        if "PastHorizon" in err_str:
            pytest.xfail(
                "`query leadership-schedule` is affected by cardano-node issue 4002"
            )

        assert "current stake distribution is currently unstable" in err_str, err_str
Esempio n. 15
0
    def test_registration_cert_with_wrong_key(
        self,
        cluster: clusterlib.ClusterLib,
        pool_users: List[clusterlib.PoolUser],
    ):
        """Try to generate stake address registration certificate using wrong stake vkey.

        Expect failure.
        """
        temp_template = common.get_test_id(cluster)

        # create stake address registration cert, use wrong stake vkey
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.gen_stake_addr_registration_cert(
                addr_name=f"{temp_template}_addr0",
                stake_vkey_file=pool_users[0].payment.vkey_file)
        assert "Expected: StakeVerificationKeyShelley" in str(excinfo.value)
Esempio n. 16
0
    def test_non_extended_key_error(self, cluster: clusterlib.ClusterLib):
        """Try to get a non-extended verification key with a signing key file.

        Expect failure. Should only allow extended verification key files.
        """
        temp_template = common.get_test_id(cluster)

        # get an extended key
        payment_keys = cluster.gen_payment_key_pair(
            key_name=f"{temp_template}_extended", extended=True
        )

        # try to get a non-extended verification key using the extended signing key
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.gen_non_extended_verification_key(
                key_name=temp_template, extended_verification_key_file=payment_keys.skey_file
            )

        assert "TextEnvelope type error:  Expected one of:" in str(excinfo.value)
Esempio n. 17
0
    def test_shelley_cddl(self, cluster: clusterlib.ClusterLib,
                          payment_addrs: List[clusterlib.AddressRecord]):
        """Check expected failure when Shelley Tx is used with CDDL format."""
        temp_template = common.get_test_id(cluster)

        src_address = payment_addrs[0].address
        dst_address = payment_addrs[1].address

        # amount value -1 means all available funds
        destinations = [clusterlib.TxOut(address=dst_address, amount=-1)]
        tx_files = clusterlib.TxFiles(
            signing_key_files=[payment_addrs[1].skey_file])

        fee = cluster.calculate_tx_fee(
            src_address=src_address,
            tx_name=temp_template,
            txouts=destinations,
            tx_files=tx_files,
        )

        orig_cddl_value = cluster.use_cddl
        try:
            cluster.use_cddl = True
            tx_raw_output = cluster.build_raw_tx(
                src_address=src_address,
                tx_name=temp_template,
                txouts=destinations,
                tx_files=tx_files,
                fee=fee,
            )
        finally:
            cluster.use_cddl = orig_cddl_value

        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.sign_tx(
                tx_body_file=tx_raw_output.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=temp_template,
            )
        if "TextEnvelope error" in str(excinfo.value):
            pytest.xfail("TextEnvelope error")
        else:
            pytest.fail(f"Unexpected error:\n{excinfo.value}")
Esempio n. 18
0
    def test_address_info_payment(self, cluster: clusterlib.ClusterLib, addr_gen: str):
        """Check payment address info."""
        temp_template = common.get_test_id(cluster)

        if addr_gen == "static":
            address = "addr_test1vzp4kj0rmnl5q5046e2yy697fndej56tm35jekemj6ew2gczp74wk"
        else:
            payment_rec = cluster.gen_payment_addr_and_keys(
                name=temp_template,
            )
            address = payment_rec.address

        addr_info = cluster.get_address_info(address=address)

        assert addr_info.address == address
        assert addr_info.era == "shelley"
        assert addr_info.encoding == "bech32"
        assert addr_info.type == "payment"
        if addr_gen == "static":
            assert addr_info.base16 == "60835b49e3dcff4051f5d6544268be4cdb99534bdc692cdb3b96b2e523"
def payment_addrs(
    cluster_manager: cluster_management.ClusterManager,
    cluster: clusterlib.ClusterLib,
) -> List[clusterlib.AddressRecord]:
    """Create new payment addresses."""
    test_id = common.get_test_id(cluster)
    addrs = clusterlib_utils.create_payment_addr_records(
        *[f"{test_id}_payment_addr_{i}" for i in range(2)],
        cluster_obj=cluster,
    )

    # fund source address
    clusterlib_utils.fund_from_faucet(
        addrs[0],
        cluster_obj=cluster,
        faucet_data=cluster_manager.cache.addrs_data["user1"],
        amount=3_000_000_000,
    )

    return addrs
Esempio n. 20
0
    def test_address_info_stake(self, cluster: clusterlib.ClusterLib, addr_gen: str):
        """Check stake address info."""
        temp_template = common.get_test_id(cluster)

        if addr_gen == "static":
            address = "stake_test1uz5mstpskyhpcvaw2enlfk8fa5k335cpd0lfz6chd5c2xpck3nld4"
        else:
            stake_rec = cluster.gen_stake_addr_and_keys(
                name=temp_template,
            )
            address = stake_rec.address

        addr_info = cluster.get_address_info(address=address)

        assert addr_info.address == address
        assert addr_info.era == "shelley"
        assert addr_info.encoding == "bech32"
        assert addr_info.type == "stake"
        if addr_gen == "static":
            assert addr_info.base16 == "e0a9b82c30b12e1c33ae5667f4d8e9ed2d18d3016bfe916b176d30a307"
    def test_lock_tx_big_datum(
        self,
        cluster: clusterlib.ClusterLib,
        payment_addrs: List[clusterlib.AddressRecord],
        datum_content: str,
    ):
        """Test locking a Tx output with a datum bigger than the allowed size.

        Expect failure.
        """
        hypothesis.assume(datum_content)
        temp_template = common.get_test_id(cluster)
        amount = 2_000_000

        plutus_op = plutus_common.PlutusOp(
            script_file=plutus_common.ALWAYS_SUCCEEDS_PLUTUS_V2,
            datum_value=f'"{datum_content}"',
            redeemer_cbor_file=plutus_common.REDEEMER_42_CBOR,
            execution_cost=plutus_common.ALWAYS_SUCCEEDS_COST,
        )

        # for mypy
        assert plutus_op.execution_cost

        redeem_cost = plutus_common.compute_cost(
            execution_cost=plutus_op.execution_cost, protocol_params=cluster.get_protocol_params()
        )

        with pytest.raises(clusterlib.CLIError) as excinfo:
            _fund_script(
                temp_template=temp_template,
                cluster=cluster,
                payment_addr=payment_addrs[0],
                dst_addr=payment_addrs[1],
                plutus_op=plutus_op,
                amount=amount,
                redeem_cost=redeem_cost,
                use_inline_datum=True,
            )
        err_str = str(excinfo.value)
        assert "Byte strings in script data must consist of at most 64 bytes" in err_str, err_str
Esempio n. 22
0
    def test_non_extended_key_valid(self, cluster: clusterlib.ClusterLib):
        """Check that the non-extended verification key is according the verification key."""
        temp_template = common.get_test_id(cluster)

        # get an extended verification key
        payment_keys = cluster.gen_payment_key_pair(
            key_name=f"{temp_template}_extended", extended=True
        )

        with open(payment_keys.vkey_file, encoding="utf-8") as in_file:
            # ignore the first 4 chars, just an informative keyword
            extended_vkey = json.loads(in_file.read().strip()).get("cborHex", "")[4:]

        # get a non-extended verification key using the extended key
        non_extended_key_file = cluster.gen_non_extended_verification_key(
            key_name=temp_template, extended_verification_key_file=payment_keys.vkey_file
        )

        with open(non_extended_key_file, encoding="utf-8") as in_file:
            # ignore the first 4 chars, just an informative keyword
            non_extended_vkey = json.loads(in_file.read().strip()).get("cborHex", "")[4:]

        assert extended_vkey.startswith(non_extended_vkey)
Esempio n. 23
0
    def test_update_valid_opcert(
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Update a valid operational certificate with another valid operational certificate.

        * generate new operational certificate with valid `--kes-period`
        * copy new operational certificate to the node
        * stop the node so the corresponding pool is not minting new blocks
        * check `kes-period-info` while the pool is not minting blocks
        * start the node with the new operational certificate
        * check that the pool is minting blocks again
        * check that metrics reported by `kes-period-info` got updated once the pool started
          minting blocks again
        * check `kes-period-info` with the old (replaced) operational certificate
        """
        # pylint: disable=too-many-statements
        pool_name = cluster_management.Resources.POOL2
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = common.get_test_id(cluster)
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file = pool_rec["pool_operational_cert"]
        opcert_file_old = shutil.copy(opcert_file, f"{opcert_file}_old")

        with cluster_manager.restart_on_failure():
            # generate new operational certificate with valid `--kes-period`
            new_opcert_file = cluster.gen_node_operational_cert(
                node_name=f"{node_name}_new_opcert_file",
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=pool_rec["cold_key_pair"].counter_file,
                kes_period=cluster.get_kes_period(),
            )

            # copy new operational certificate to the node
            logfiles.add_ignore_rule(
                files_glob="*.stdout",
                regex="MuxBearerClosed",
                ignore_file_id=cluster_manager.worker_id,
            )
            shutil.copy(new_opcert_file, opcert_file)

            # stop the node so the corresponding pool is not minting new blocks
            cluster_nodes.stop_nodes([node_name])

            time.sleep(10)

            # check kes-period-info while the pool is not minting blocks
            # TODO: the query is currently broken
            kes_query_currently_broken = False
            try:
                kes_period_info_new = cluster.get_kes_period_info(opcert_file)
            except clusterlib.CLIError as err:
                if "currentlyBroken" not in str(err):
                    raise
                kes_query_currently_broken = True

            if not kes_query_currently_broken:
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_new, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                kes_period_info_old = cluster.get_kes_period_info(opcert_file_old)
                kes.check_kes_period_info_result(
                    kes_output=kes_period_info_old, expected_scenario=kes.KesScenarios.ALL_VALID
                )
                assert (
                    kes_period_info_new["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                    == kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                )

            # start the node with the new operational certificate
            cluster_nodes.start_nodes([node_name])

            # make sure we are not at the very end of an epoch so we still have time for
            # the first block production check
            clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-18)

            LOGGER.info("Checking blocks production for 5 epochs.")
            blocks_made_db = []
            this_epoch = -1
            updated_epoch = cluster.get_epoch()
            for __ in range(5):
                # wait for next epoch
                if cluster.get_epoch() == this_epoch:
                    cluster.wait_for_new_epoch()

                # wait for the end of the epoch
                clusterlib_utils.wait_for_epoch_interval(
                    cluster_obj=cluster, start=-19, stop=-15, force_epoch=True
                )
                this_epoch = cluster.get_epoch()

                ledger_state = clusterlib_utils.get_ledger_state(cluster_obj=cluster)

                # save ledger state
                clusterlib_utils.save_ledger_state(
                    cluster_obj=cluster,
                    state_name=f"{temp_template}_{this_epoch}",
                    ledger_state=ledger_state,
                )

                # check that the pool is minting blocks
                blocks_made = ledger_state["blocksCurrent"]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(
                blocks_made_db
            ), f"The pool '{pool_name}' has not minted any blocks since epoch {updated_epoch}"

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            # check that metrics reported by kes-period-info got updated once the pool started
            # minting blocks again
            kes_period_info_updated = cluster.get_kes_period_info(opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_updated, expected_scenario=kes.KesScenarios.ALL_VALID
            )
            assert (
                kes_period_info_updated["metrics"]["qKesNodeStateOperationalCertificateNumber"]
                != kes_period_info_old["metrics"]["qKesNodeStateOperationalCertificateNumber"]
            )

            # check kes-period-info with operational certificate with a wrong counter
            kes_period_info_invalid = cluster.get_kes_period_info(opcert_file_old)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info_invalid,
                expected_scenario=kes.KesScenarios.INVALID_COUNTERS,
            )
Esempio n. 24
0
    def test_undelegate(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Undelegate stake address.

        * submit registration certificate and delegate to pool
        * wait for first reward
        * undelegate stake address:

           - withdraw rewards to payment address
           - deregister stake address
           - re-register stake address

        * check that the key deposit was not returned
        * check that rewards were withdrawn
        * check that the stake address is still registered
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        # check records in db-sync
        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering / re-registering stake address
        stake_addr_dereg_cert_file = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        stake_addr_reg_cert_file = cluster.gen_stake_addr_registration_cert(
            addr_name=f"{temp_template}_undeleg_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_undeleg = clusterlib.TxFiles(
            certificate_files=[
                stake_addr_dereg_cert_file, stake_addr_reg_cert_file
            ],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address; deregister and re-register stake address
        tx_raw_undeleg = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_undeleg_withdraw",
            tx_files=tx_files_undeleg,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was NOT returned and rewards were withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_undeleg.fee + reward_balance
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert stake_addr_info.address, f"Reward address is not registered: {stake_addr_info}"
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        this_epoch = cluster.wait_for_new_epoch(padding_seconds=20)
        assert cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address
        ).reward_account_balance, "No reward was received next epoch after undelegation"

        # check `transaction view` command
        tx_view.check_tx_view(cluster_obj=cluster,
                              tx_raw_output=tx_raw_undeleg)

        # check records in db-sync
        tx_db_undeleg = dbsync_utils.check_tx(cluster_obj=cluster,
                                              tx_raw_output=tx_raw_undeleg)
        if tx_db_undeleg:
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_deregistration
            assert delegation_out.pool_user.stake.address in tx_db_undeleg.stake_registration

            db_rewards = dbsync_utils.check_address_reward(
                address=delegation_out.pool_user.stake.address,
                epoch_from=init_epoch)
            assert db_rewards
            db_reward_epochs = sorted(r.spendable_epoch
                                      for r in db_rewards.rewards)
            assert db_reward_epochs[0] == init_epoch + 4
            assert this_epoch in db_reward_epochs
Esempio n. 25
0
    def test_deregister(
        self,
        cluster_manager: cluster_management.ClusterManager,
        cluster_and_pool: Tuple[clusterlib.ClusterLib, str],
    ):
        """Deregister stake address.

        * create two payment addresses that share single stake address
        * register and delegate the stake address to pool
        * attempt to deregister the stake address - deregistration is expected to fail
          because there are rewards in the stake address
        * withdraw rewards to payment address and deregister stake address
        * check that the key deposit was returned and rewards withdrawn
        * check that the stake address is no longer delegated
        * (optional) check records in db-sync
        """
        cluster, pool_id = cluster_and_pool
        temp_template = common.get_test_id(cluster)

        # create two payment addresses that share single stake address (just to test that
        # delegation works as expected even under such circumstances)
        stake_addr_rec = clusterlib_utils.create_stake_addr_records(
            f"{temp_template}_addr0", cluster_obj=cluster)[0]
        payment_addr_recs = clusterlib_utils.create_payment_addr_records(
            f"{temp_template}_addr0",
            f"{temp_template}_addr1",
            cluster_obj=cluster,
            stake_vkey_file=stake_addr_rec.vkey_file,
        )

        # fund payment address
        clusterlib_utils.fund_from_faucet(
            *payment_addr_recs,
            cluster_obj=cluster,
            faucet_data=cluster_manager.cache.addrs_data["user1"],
        )

        pool_user = clusterlib.PoolUser(payment=payment_addr_recs[1],
                                        stake=stake_addr_rec)

        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-20)
        init_epoch = cluster.get_epoch()

        # submit registration certificate and delegate to pool
        delegation_out = delegation.delegate_stake_addr(
            cluster_obj=cluster,
            addrs_data=cluster_manager.cache.addrs_data,
            temp_template=temp_template,
            pool_user=pool_user,
            pool_id=pool_id,
        )

        assert (
            cluster.get_epoch() == init_epoch
        ), "Delegation took longer than expected and would affect other checks"

        tx_db_deleg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=delegation_out.tx_raw_output)
        if tx_db_deleg:
            # check in db-sync that both payment addresses share single stake address
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[0].address).stake_address ==
                    stake_addr_rec.address)
            assert (dbsync_utils.get_utxo(
                address=payment_addr_recs[1].address).stake_address ==
                    stake_addr_rec.address)

        delegation.db_check_delegation(
            pool_user=delegation_out.pool_user,
            db_record=tx_db_deleg,
            deleg_epoch=init_epoch,
            pool_id=delegation_out.pool_id,
        )

        src_address = delegation_out.pool_user.payment.address

        LOGGER.info("Waiting 4 epochs for first reward.")
        cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10)
        if not cluster.get_stake_addr_info(
                delegation_out.pool_user.stake.address).reward_account_balance:
            pytest.skip(
                f"User of pool '{pool_id}' hasn't received any rewards, cannot continue."
            )

        # make sure we have enough time to finish deregistration in one epoch
        clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster,
                                                 start=5,
                                                 stop=-40)

        # files for deregistering stake address
        stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert(
            addr_name=f"{temp_template}_addr0",
            stake_vkey_file=delegation_out.pool_user.stake.vkey_file,
        )
        tx_files_deregister = clusterlib.TxFiles(
            certificate_files=[stake_addr_dereg_cert],
            signing_key_files=[
                delegation_out.pool_user.payment.skey_file,
                delegation_out.pool_user.stake.skey_file,
            ],
        )

        # attempt to deregister the stake address - deregistration is expected to fail
        # because there are rewards in the stake address
        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.send_tx(
                src_address=src_address,
                tx_name=f"{temp_template}_dereg_fail",
                tx_files=tx_files_deregister,
            )
        assert "StakeKeyNonZeroAccountBalanceDELEG" in str(excinfo.value)

        src_payment_balance = cluster.get_address_balance(src_address)
        reward_balance = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address).reward_account_balance

        # withdraw rewards to payment address, deregister stake address
        tx_raw_deregister_output = cluster.send_tx(
            src_address=src_address,
            tx_name=f"{temp_template}_dereg_withdraw",
            tx_files=tx_files_deregister,
            withdrawals=[
                clusterlib.TxOut(
                    address=delegation_out.pool_user.stake.address, amount=-1)
            ],
        )

        # check that the key deposit was returned and rewards withdrawn
        assert (
            cluster.get_address_balance(src_address) == src_payment_balance -
            tx_raw_deregister_output.fee + reward_balance +
            cluster.get_address_deposit()
        ), f"Incorrect balance for source address `{src_address}`"

        # check that the stake address is no longer delegated
        stake_addr_info = cluster.get_stake_addr_info(
            delegation_out.pool_user.stake.address)
        assert (not stake_addr_info.delegation
                ), f"Stake address is still delegated: {stake_addr_info}"

        tx_db_dereg = dbsync_utils.check_tx(
            cluster_obj=cluster, tx_raw_output=tx_raw_deregister_output)
        if tx_db_dereg:
            assert delegation_out.pool_user.stake.address in tx_db_dereg.stake_deregistration
            assert (
                cluster.get_address_balance(src_address) ==
                dbsync_utils.get_utxo(address=src_address).amount_sum
            ), f"Unexpected balance for source address `{src_address}` in db-sync"
Esempio n. 26
0
    def test_lobster_name(self, cluster: clusterlib.ClusterLib,
                          payment_addrs: List[clusterlib.AddressRecord]):
        """Test the Lobster Challenge.

        Uses `cardano-cli transaction build` command for building the transactions.

        * fund token issuer and create a UTxO for collateral
        * mint the LobsterNFT token
        * deploy the LobsterNFT token to address of lobster spending script
        * generate random votes and determine the expected final value
        * perform voting and check that the final value matches the expected value
        * (optional) check transactions in db-sync
        """
        # pylint: disable=too-many-locals,too-many-statements
        temp_template = common.get_test_id(cluster)
        payment_addr = payment_addrs[0]
        issuer_addr = payment_addrs[1]

        votes_num = 50
        names_num = 1219
        io_random_seed = 42

        issuer_fund = 200_000_000
        lovelace_setup_amount = 1_724_100
        lovelace_vote_amount = 2_034_438
        collateral_amount = 20_000_000
        nft_amount = 1

        # Step 1: fund the token issuer and create UTXO for collaterals

        mint_utxos, collateral_utxos, tx_output_step1 = _fund_issuer(
            cluster_obj=cluster,
            temp_template=temp_template,
            payment_addr=payment_addr,
            issuer_addr=issuer_addr,
            amount=issuer_fund,
            collateral_amount=collateral_amount,
        )

        # Step 2: mint the LobsterNFT token

        lobster_nft_token, token_utxos_step2, tx_output_step2 = _mint_lobster_nft(
            cluster_obj=cluster,
            temp_template=temp_template,
            issuer_addr=issuer_addr,
            mint_utxos=mint_utxos,
            collateral_utxos=collateral_utxos,
            nft_amount=nft_amount,
            lovelace_amount=lovelace_setup_amount,
        )

        # Step 3: deploy the LobsterNFT token to script address

        script_address, token_utxos_step3, tx_output_step3 = _deploy_lobster_nft(
            cluster_obj=cluster,
            temp_template=temp_template,
            issuer_addr=issuer_addr,
            token_utxos=token_utxos_step2,
            lobster_nft_token=lobster_nft_token,
            nft_amount=nft_amount,
            lovelace_amount=lovelace_setup_amount,
        )

        tx_outputs_all = [tx_output_step1, tx_output_step2, tx_output_step3]

        # Step 4: prepare for voting

        # there's 50 votes, each vote is int between 1 and 100
        votes = [random.randint(1, 100) for __ in range(votes_num)]
        _votes_sum = sum(votes)
        # Add "random" seed to the sum of all votes. Taking the remainder after
        # division by the number of potential names (`names_num`) gives us the
        # final counter value.
        # The final counter value is used as an index. Looking into the list of
        # names, we can see the name the index points to. We don't need to do
        # that in automated test, we will just check that the final counter
        # value matches the expected counter value.
        expected_counter_val = (io_random_seed + _votes_sum) % names_num
        votes.append(expected_counter_val)

        # Step 5: vote

        other_policyid = cluster.get_policyid(OTHER_MINT_PLUTUS)
        asset_name_counter = b"LobsterCounter".hex()
        asset_name_votes = b"LobsterVotes".hex()
        counter_token = f"{other_policyid}.{asset_name_counter}"
        votes_token = f"{other_policyid}.{asset_name_votes}"

        vote_utxos = token_utxos_step3
        vote_counter = 0
        utxo_votes_token: Optional[clusterlib.UTXOData] = None
        utxo_counter_token: Optional[clusterlib.UTXOData] = None
        for vote_num, vote_val in enumerate(votes, start=1):
            # normal votes
            if vote_num <= votes_num:
                vote_counter += vote_val
                mint_val = vote_val
            # final IO vote
            else:
                # set new counter value to `(seed + counter value) % number of names`
                # and burn excesive LobsterCounter tokens
                mint_val = vote_val - vote_counter
                vote_counter = vote_val

            txouts = [
                # Lovelace amount
                clusterlib.TxOut(
                    address=script_address,
                    amount=lovelace_vote_amount,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
                # LobsterNFT token
                clusterlib.TxOut(
                    address=script_address,
                    amount=nft_amount,
                    coin=lobster_nft_token,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
                # LobsterCounter token
                clusterlib.TxOut(
                    address=script_address,
                    amount=vote_counter,
                    coin=counter_token,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
                # LobsterVotes token
                clusterlib.TxOut(
                    address=script_address,
                    amount=vote_num,
                    coin=votes_token,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
            ]

            mint_txouts = [
                # mint new LobsterCounter tokens
                clusterlib.TxOut(
                    address=script_address,
                    amount=mint_val,
                    coin=counter_token,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
                # mint 1 new LobsterVotes token
                clusterlib.TxOut(
                    address=script_address,
                    amount=1,
                    coin=votes_token,
                    datum_hash=LOBSTER_DATUM_HASH,
                ),
            ]
            mint_script_data = [
                clusterlib.Mint(
                    txouts=mint_txouts,
                    script_file=OTHER_MINT_PLUTUS,
                    redeemer_value="[]",
                )
            ]

            txin_script_data = [
                clusterlib.ScriptTxIn(
                    txins=vote_utxos,
                    script_file=LOBSTER_PLUTUS,
                    collaterals=collateral_utxos,
                    datum_value="[]",
                    redeemer_value="[]",
                )
            ]

            tx_files = clusterlib.TxFiles(signing_key_files=[
                payment_addr.skey_file, issuer_addr.skey_file
            ], )
            funds_txin = cluster.get_utxo_with_highest_amount(
                address=payment_addr.address)
            tx_output_vote = cluster.build_tx(
                src_address=payment_addr.address,
                tx_name=f"{temp_template}_voting_{vote_num}",
                txins=[funds_txin],
                tx_files=tx_files,
                txouts=txouts,
                script_txins=txin_script_data,
                mint=mint_script_data,
            )
            tx_signed = cluster.sign_tx(
                tx_body_file=tx_output_vote.out_file,
                signing_key_files=tx_files.signing_key_files,
                tx_name=f"{temp_template}_voting_{vote_num}",
            )
            cluster.submit_tx(tx_file=tx_signed, txins=vote_utxos)

            tx_outputs_all.append(tx_output_vote)

            txid_vote = cluster.get_txid(tx_body_file=tx_output_vote.out_file)
            vote_utxos = cluster.get_utxo(txin=f"{txid_vote}#1")

            # check expected balances
            utxos_lovelace = [
                u for u in vote_utxos if u.coin == clusterlib.DEFAULT_COIN
            ][0]
            assert (
                utxos_lovelace.amount == lovelace_vote_amount
            ), f"Incorrect Lovelace balance for script address `{script_address}`"

            utxo_votes_token = [
                u for u in vote_utxos if u.coin == votes_token
            ][0]
            assert (
                utxo_votes_token.amount == vote_num
            ), f"Incorrect LobsterVotes token balance for script address `{script_address}`"

            utxo_counter_token = [
                u for u in vote_utxos if u.coin == counter_token
            ][0]
            assert (
                utxo_counter_token.amount == vote_counter
            ), f"Incorrect LobsterCounter token balance for script address `{script_address}`"

        assert (
            utxo_counter_token
            and utxo_counter_token.amount == expected_counter_val
        ), "Final balance of LobsterCounter token doesn't match the expected balance"

        # check transactions in db-sync
        for tx_out_rec in tx_outputs_all:
            dbsync_utils.check_tx(cluster_obj=cluster,
                                  tx_raw_output=tx_out_rec)
    def test_lock_tx_datum_as_witness(
        self, cluster: clusterlib.ClusterLib, payment_addrs: List[clusterlib.AddressRecord]
    ):
        """Test unlock a Tx output with a datum as witness.

        Expect failure.
        """
        __: Any  # mypy workaround
        temp_template = common.get_test_id(cluster)
        amount = 2_000_000

        plutus_op = PLUTUS_OP_ALWAYS_SUCCEEDS

        # for mypy
        assert plutus_op.execution_cost
        assert plutus_op.datum_file
        assert plutus_op.redeemer_cbor_file

        redeem_cost = plutus_common.compute_cost(
            execution_cost=plutus_op.execution_cost, protocol_params=cluster.get_protocol_params()
        )

        script_utxos, collateral_utxos, __, __ = _fund_script(
            temp_template=temp_template,
            cluster=cluster,
            payment_addr=payment_addrs[0],
            dst_addr=payment_addrs[1],
            plutus_op=plutus_op,
            amount=amount,
            redeem_cost=redeem_cost,
            use_inline_datum=True,
        )

        plutus_txins = [
            clusterlib.ScriptTxIn(
                txins=script_utxos,
                script_file=plutus_op.script_file,
                collaterals=collateral_utxos,
                execution_units=(
                    plutus_op.execution_cost.per_time,
                    plutus_op.execution_cost.per_space,
                ),
                redeemer_cbor_file=plutus_op.redeemer_cbor_file,
                datum_file=plutus_op.datum_file,
            )
        ]

        tx_files_redeem = clusterlib.TxFiles(
            signing_key_files=[payment_addrs[1].skey_file],
        )
        txouts_redeem = [
            clusterlib.TxOut(address=payment_addrs[1].address, amount=amount),
        ]

        tx_output_redeem = cluster.build_raw_tx_bare(
            out_file=f"{temp_template}_step2_tx.body",
            txouts=txouts_redeem,
            tx_files=tx_files_redeem,
            fee=redeem_cost.fee + FEE_REDEEM_TXSIZE,
            script_txins=plutus_txins,
        )

        tx_signed_redeem = cluster.sign_tx(
            tx_body_file=tx_output_redeem.out_file,
            signing_key_files=tx_files_redeem.signing_key_files,
            tx_name=f"{temp_template}_step2",
        )

        with pytest.raises(clusterlib.CLIError) as excinfo:
            cluster.submit_tx(
                tx_file=tx_signed_redeem,
                txins=[t.txins[0] for t in tx_output_redeem.script_txins if t.txins],
            )
        err_str = str(excinfo.value)
        assert "NonOutputSupplimentaryDatums" in err_str, err_str
Esempio n. 28
0
    def test_expired_kes(
        self,
        cluster_kes: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
        worker_id: str,
    ):
        """Test expired KES.

        * start local cluster instance configured with short KES period and low number of key
          evolutions, so KES expires soon on all pools
        * refresh opcert on 2 of the 3 pools, so KES doesn't expire on those 2 pools and
          the pools keep minting blocks
        * wait for KES expiration on the selected pool
        * check that the pool with expired KES didn't mint blocks in an epoch that followed after
          KES expiration
        * check KES period info command with an operational certificate with an expired KES
        * check KES period info command with operational certificates with a valid KES
        """
        cluster = cluster_kes
        temp_template = common.get_test_id(cluster)

        expire_timeout = 200
        expire_node_name = "pool1"
        expire_pool_name = f"node-{expire_node_name}"
        expire_pool_rec = cluster_manager.cache.addrs_data[expire_pool_name]
        expire_pool_id = cluster.get_stake_pool_id(expire_pool_rec["cold_key_pair"].vkey_file)
        expire_pool_id_dec = helpers.decode_bech32(expire_pool_id)

        # refresh opcert on 2 of the 3 pools, so KES doesn't expire on those 2 pools and
        # the pools keep minting blocks
        refreshed_nodes = ["pool2", "pool3"]

        def _refresh_opcerts():
            for n in refreshed_nodes:
                refreshed_pool_rec = cluster_manager.cache.addrs_data[f"node-{n}"]
                refreshed_opcert_file = cluster.gen_node_operational_cert(
                    node_name=f"{n}_refreshed_opcert",
                    kes_vkey_file=refreshed_pool_rec["kes_key_pair"].vkey_file,
                    cold_skey_file=refreshed_pool_rec["cold_key_pair"].skey_file,
                    cold_counter_file=refreshed_pool_rec["cold_key_pair"].counter_file,
                    kes_period=cluster.get_kes_period(),
                )
                shutil.copy(refreshed_opcert_file, refreshed_pool_rec["pool_operational_cert"])
            cluster_nodes.restart_nodes(refreshed_nodes)

        _refresh_opcerts()

        expected_err_regexes = ["KESKeyAlreadyPoisoned", "KESCouldNotEvolve"]
        # ignore expected errors in bft1 node log file, as bft1 opcert will not get refreshed
        logfiles.add_ignore_rule(
            files_glob="bft1.stdout",
            regex="|".join(expected_err_regexes),
            ignore_file_id=worker_id,
        )
        # search for expected errors only in log file corresponding to pool with expired KES
        expected_errors = [(f"{expire_node_name}.stdout", err) for err in expected_err_regexes]

        this_epoch = -1
        with logfiles.expect_errors(expected_errors, ignore_file_id=worker_id):
            LOGGER.info(
                f"{datetime.datetime.now()}: Waiting for {expire_timeout} sec for KES expiration."
            )
            time.sleep(expire_timeout)

            _wait_epoch_chores(
                cluster_obj=cluster, temp_template=temp_template, this_epoch=this_epoch
            )
            this_epoch = cluster.get_epoch()

            # check that the pool is not producing any blocks
            blocks_made = clusterlib_utils.get_ledger_state(cluster_obj=cluster)["blocksCurrent"]
            if blocks_made:
                assert (
                    expire_pool_id_dec not in blocks_made
                ), f"The pool '{expire_pool_name}' has minted blocks in epoch {this_epoch}"

            # refresh opcerts one more time
            _refresh_opcerts()

            LOGGER.info(
                f"{datetime.datetime.now()}: Waiting 120 secs to make sure the expected errors "
                "make it to log files."
            )
            time.sleep(120)

        # check kes-period-info with an operational certificate with KES expired
        # TODO: the query is currently broken
        kes_query_currently_broken = False
        try:
            kes_info_expired = cluster.get_kes_period_info(
                opcert_file=expire_pool_rec["pool_operational_cert"]
            )
        except clusterlib.CLIError as err:
            if "currentlyBroken" not in str(err):
                raise
            kes_query_currently_broken = True

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            kes.check_kes_period_info_result(
                kes_output=kes_info_expired, expected_scenario=kes.KesScenarios.INVALID_KES_PERIOD
            )

            # check kes-period-info with valid operational certificates
            for n in refreshed_nodes:
                refreshed_pool_rec = cluster_manager.cache.addrs_data[f"node-{n}"]
                kes_info_valid = cluster.get_kes_period_info(
                    opcert_file=refreshed_pool_rec["pool_operational_cert"]
                )
                kes.check_kes_period_info_result(
                    kes_output=kes_info_valid, expected_scenario=kes.KesScenarios.ALL_VALID
                )
    def test_update_proposal(
        self,
        cluster_update_proposal: clusterlib.ClusterLib,
        payment_addr: clusterlib.AddressRecord,
    ):
        """Test changing protocol parameters using update proposal.

        * if era >= Alonzo, update Alonzo-specific parameters and:

           - wait for next epoch
           - check that parameters were updated

        * submit update proposal
        * in the same epoch, submit another update proposal
        * wait for next epoch
        * check that parameters were updated with the values submitted in the second
          update proposal, i.e. the second update proposal overwritten the first one
        """
        cluster = cluster_update_proposal
        temp_template = common.get_test_id(cluster)

        max_tx_execution_units = 11_000_000_000
        max_block_execution_units = 110_000_000_000
        price_execution_steps = "12/10"
        price_execution_memory = "1.3"

        this_epoch = cluster.wait_for_new_epoch()

        protocol_params = cluster.get_protocol_params()
        with open(f"{temp_template}_pparams_ep{this_epoch}.json",
                  "w",
                  encoding="utf-8") as fp_out:
            json.dump(protocol_params, fp_out, indent=4)

        # update Alonzo-speciffic parameters in separate update proposal
        if VERSIONS.cluster_era >= VERSIONS.ALONZO:
            if VERSIONS.cluster_era >= VERSIONS.BABBAGE:
                utxo_cost = clusterlib_utils.UpdateProposal(
                    arg="--utxo-cost-per-word",
                    value=8001,
                    name="",  # needs custom check
                )
            else:
                utxo_cost = clusterlib_utils.UpdateProposal(
                    arg="--utxo-cost-per-word",
                    value=8001,
                    name="utxoCostPerWord",
                )

            update_proposals_alonzo = [
                utxo_cost,
                clusterlib_utils.UpdateProposal(
                    arg="--max-value-size",
                    value=5000,
                    name="maxValueSize",
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--collateral-percent",
                    value=90,
                    name="collateralPercentage",
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--max-collateral-inputs",
                    value=4,
                    name="maxCollateralInputs",
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--max-tx-execution-units",
                    value=
                    f"({max_tx_execution_units},{max_tx_execution_units})",
                    name="",  # needs custom check
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--max-block-execution-units",
                    value=
                    f"({max_block_execution_units},{max_block_execution_units})",
                    name="",  # needs custom check
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--price-execution-steps",
                    value=price_execution_steps,
                    name="",  # needs custom check
                ),
                clusterlib_utils.UpdateProposal(
                    arg="--price-execution-memory",
                    value=price_execution_memory,
                    name="",  # needs custom check
                ),
            ]

            clusterlib_utils.update_params_build(
                cluster_obj=cluster,
                src_addr_record=payment_addr,
                update_proposals=update_proposals_alonzo,
            )

            this_epoch = cluster.wait_for_new_epoch()

            protocol_params = cluster.get_protocol_params()
            with open(f"{temp_template}_pparams_ep{this_epoch}.json",
                      "w",
                      encoding="utf-8") as fp_out:
                json.dump(protocol_params, fp_out, indent=4)

            clusterlib_utils.check_updated_params(
                update_proposals=update_proposals_alonzo,
                protocol_params=protocol_params)
            assert protocol_params["maxTxExecutionUnits"][
                "memory"] == max_tx_execution_units
            assert protocol_params["maxTxExecutionUnits"][
                "steps"] == max_tx_execution_units
            assert protocol_params["maxBlockExecutionUnits"][
                "memory"] == max_block_execution_units
            assert protocol_params["maxBlockExecutionUnits"][
                "steps"] == max_block_execution_units
            assert protocol_params["executionUnitPrices"]["priceSteps"] == 1.2
            assert protocol_params["executionUnitPrices"]["priceMemory"] == 1.3

            if VERSIONS.cluster_era >= VERSIONS.BABBAGE:
                # the resulting number will be multiple of 8, i.e. 8000
                assert protocol_params["utxoCostPerWord"] == math.floor(
                    utxo_cost.value / 8) * 8
            else:
                assert protocol_params["utxoCostPerWord"] == utxo_cost.value

        # Check that only one update proposal can be applied each epoch and that the last
        # update proposal cancels the previous one. Following parameter values will be
        # overwritten by the next update proposal.
        update_proposal_canceled = [
            clusterlib_utils.UpdateProposal(
                arg="--min-fee-linear",
                value=47,
                name="txFeePerByte",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-reg-deposit",
                value=410_000_000,
                name="stakePoolDeposit",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--decentralization-parameter",
                value=0.2,
                name="decentralization",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-retirement-epoch-boundary",
                value=18,
                name="poolRetireMaxEpoch",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--number-of-pools",
                value=10,
                name="stakePoolTargetNum",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-block-body-size",
                value=65_555,
                name="maxBlockBodySize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-tx-size",
                value=16_400,
                name="maxTxSize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--min-pool-cost",
                value=2,
                name="minPoolCost",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-block-header-size",
                value=1_400,
                name="maxBlockHeaderSize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--min-fee-constant",
                value=155_390,
                name="txFeeFixed",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--key-reg-deposit-amt",
                value=300_050,
                name="stakeAddressDeposit",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-influence",
                value=0.5,
                name="poolPledgeInfluence",
            ),
        ]

        clusterlib_utils.update_params(
            cluster_obj=cluster,
            src_addr_record=payment_addr,
            update_proposals=update_proposal_canceled,
        )
        time.sleep(2)

        # the final update proposal
        decentralization = clusterlib_utils.UpdateProposal(
            arg="--decentralization-parameter",
            value=0.1,
            name="",  # needs custom check
        )
        update_proposals = [
            decentralization,
            clusterlib_utils.UpdateProposal(
                arg="--min-fee-linear",
                value=45,
                name="txFeePerByte",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-reg-deposit",
                value=400_000_000,
                name="stakePoolDeposit",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-retirement-epoch-boundary",
                value=19,
                name="poolRetireMaxEpoch",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--number-of-pools",
                value=9,
                name="stakePoolTargetNum",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-block-body-size",
                value=65_544,
                name="maxBlockBodySize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-tx-size",
                value=16_392,
                name="maxTxSize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--min-pool-cost",
                value=1,
                name="minPoolCost",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--max-block-header-size",
                value=1_200,
                name="maxBlockHeaderSize",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--min-fee-constant",
                value=155_380,
                name="txFeeFixed",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--key-reg-deposit-amt",
                value=300_000,
                name="stakeAddressDeposit",
            ),
            clusterlib_utils.UpdateProposal(
                arg="--pool-influence",
                value=0.4,
                name="poolPledgeInfluence",
            ),
        ]
        if VERSIONS.cluster_era < VERSIONS.ALONZO:
            update_proposals.append(
                clusterlib_utils.UpdateProposal(
                    arg="--min-utxo-value",
                    value=2,
                    name="minUTxOValue",
                ))

        clusterlib_utils.update_params(
            cluster_obj=cluster,
            src_addr_record=payment_addr,
            update_proposals=update_proposals,
        )

        this_epoch = cluster.wait_for_new_epoch()

        protocol_params = cluster.get_protocol_params()
        with open(f"{temp_template}_pparams_ep{this_epoch}.json",
                  "w",
                  encoding="utf-8") as fp_out:
            json.dump(protocol_params, fp_out, indent=4)

        clusterlib_utils.check_updated_params(
            update_proposals=update_proposals, protocol_params=protocol_params)

        if VERSIONS.cluster_era >= VERSIONS.BABBAGE:
            assert protocol_params["decentralization"] is None
        else:
            assert protocol_params[
                "decentralization"] == decentralization.value
Esempio n. 30
0
    def test_opcert_future_kes_period(  # noqa: C901
        self,
        cluster_lock_pool2: clusterlib.ClusterLib,
        cluster_manager: cluster_management.ClusterManager,
    ):
        """Start a stake pool with an operational certificate created with invalid `--kes-period`.

        * generate new operational certificate with `--kes-period` in the future
        * restart the node with the new operational certificate
        * check that the pool is not producing any blocks
        * if network era > Alonzo

            - generate new operational certificate with valid `--kes-period`, but counter value +2
              from last used operational ceritificate
            - restart the node
            - check that the pool is not producing any blocks

        * generate new operational certificate with valid `--kes-period` and restart the node
        * check that the pool is producing blocks again
        """
        # pylint: disable=too-many-statements,too-many-branches
        pool_name = cluster_management.Resources.POOL2
        node_name = "pool2"
        cluster = cluster_lock_pool2

        temp_template = common.get_test_id(cluster)
        pool_rec = cluster_manager.cache.addrs_data[pool_name]

        node_cold = pool_rec["cold_key_pair"]
        stake_pool_id = cluster.get_stake_pool_id(node_cold.vkey_file)
        stake_pool_id_dec = helpers.decode_bech32(stake_pool_id)

        opcert_file: Path = pool_rec["pool_operational_cert"]
        cold_counter_file: Path = pool_rec["cold_key_pair"].counter_file

        expected_errors = [
            (f"{node_name}.stdout", "PraosCannotForgeKeyNotUsableYet"),
        ]

        if VERSIONS.cluster_era > VERSIONS.ALONZO:
            expected_errors.append((f"{node_name}.stdout", "CounterOverIncrementedOCERT"))
            # In Babbage we get `CounterOverIncrementedOCERT` error if counter for new opcert
            # is not exactly +1 from last used opcert. We'll backup the original counter
            # file so we can use it for issuing next valid opcert.
            cold_counter_file_orig = Path(
                f"{cold_counter_file.stem}_orig{cold_counter_file.suffix}"
            ).resolve()
            shutil.copy(cold_counter_file, cold_counter_file_orig)

        logfiles.add_ignore_rule(
            files_glob="*.stdout",
            regex="MuxBearerClosed|CounterOverIncrementedOCERT",
            ignore_file_id=cluster_manager.worker_id,
        )

        # generate new operational certificate with `--kes-period` in the future
        invalid_opcert_file = cluster.gen_node_operational_cert(
            node_name=f"{node_name}_invalid_opcert_file",
            kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
            cold_skey_file=pool_rec["cold_key_pair"].skey_file,
            cold_counter_file=cold_counter_file,
            kes_period=cluster.get_kes_period() + 100,
        )

        kes_query_currently_broken = False

        with cluster_manager.restart_on_failure():
            with logfiles.expect_errors(expected_errors, ignore_file_id=cluster_manager.worker_id):
                # restart the node with the new operational certificate
                shutil.copy(invalid_opcert_file, opcert_file)
                cluster_nodes.restart_nodes([node_name])
                cluster.wait_for_new_epoch()

                LOGGER.info("Checking blocks production for 4 epochs.")
                this_epoch = -1
                for invalid_opcert_epoch in range(4):
                    _wait_epoch_chores(
                        cluster_obj=cluster, temp_template=temp_template, this_epoch=this_epoch
                    )
                    this_epoch = cluster.get_epoch()

                    # check that the pool is not producing any blocks
                    blocks_made = clusterlib_utils.get_ledger_state(cluster_obj=cluster)[
                        "blocksCurrent"
                    ]
                    if blocks_made:
                        assert (
                            stake_pool_id_dec not in blocks_made
                        ), f"The pool '{pool_name}' has produced blocks in epoch {this_epoch}"

                    if invalid_opcert_epoch == 1:
                        # check kes-period-info with operational certificate with
                        # invalid `--kes-period`
                        # TODO: the query is currently broken
                        try:
                            kes_period_info = cluster.get_kes_period_info(invalid_opcert_file)
                        except clusterlib.CLIError as err:
                            if "currentlyBroken" not in str(err):
                                raise
                            kes_query_currently_broken = True

                        if not kes_query_currently_broken:
                            kes.check_kes_period_info_result(
                                kes_output=kes_period_info,
                                expected_scenario=kes.KesScenarios.INVALID_KES_PERIOD,
                            )

                    # test the `CounterOverIncrementedOCERT` error - the counter will now be +2 from
                    # last used opcert counter value
                    if invalid_opcert_epoch == 2 and VERSIONS.cluster_era > VERSIONS.ALONZO:
                        overincrement_opcert_file = cluster.gen_node_operational_cert(
                            node_name=f"{node_name}_overincrement_opcert_file",
                            kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                            cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                            cold_counter_file=cold_counter_file,
                            kes_period=cluster.get_kes_period(),
                        )
                        # copy the new certificate and restart the node
                        shutil.copy(overincrement_opcert_file, opcert_file)
                        cluster_nodes.restart_nodes([node_name])

                    if invalid_opcert_epoch == 3:
                        # check kes-period-info with operational certificate with
                        # invalid counter
                        # TODO: the query is currently broken, implement once it is fixed
                        pass

            # in Babbage we'll use the original counter for issuing new valid opcert so the counter
            # value of new valid opcert equals to counter value of the original opcert +1
            if VERSIONS.cluster_era > VERSIONS.ALONZO:
                shutil.copy(cold_counter_file_orig, cold_counter_file)

            # generate new operational certificate with valid `--kes-period`
            valid_opcert_file = cluster.gen_node_operational_cert(
                node_name=f"{node_name}_valid_opcert_file",
                kes_vkey_file=pool_rec["kes_key_pair"].vkey_file,
                cold_skey_file=pool_rec["cold_key_pair"].skey_file,
                cold_counter_file=cold_counter_file,
                kes_period=cluster.get_kes_period(),
            )
            # copy the new certificate and restart the node
            shutil.copy(valid_opcert_file, opcert_file)
            cluster_nodes.restart_nodes([node_name])
            this_epoch = cluster.wait_for_new_epoch()

            LOGGER.info("Checking blocks production for another 2 epochs.")
            blocks_made_db = []
            active_again_epoch = this_epoch
            for __ in range(2):
                _wait_epoch_chores(
                    cluster_obj=cluster, temp_template=temp_template, this_epoch=this_epoch
                )
                this_epoch = cluster.get_epoch()

                # check that the pool is producing blocks
                blocks_made = clusterlib_utils.get_ledger_state(cluster_obj=cluster)[
                    "blocksCurrent"
                ]
                blocks_made_db.append(stake_pool_id_dec in blocks_made)

            assert any(blocks_made_db), (
                f"The pool '{pool_name}' has not produced any blocks "
                f"since epoch {active_again_epoch}"
            )

        if kes_query_currently_broken:
            pytest.xfail("`query kes-period-info` is currently broken")
        else:
            # check kes-period-info with valid operational certificate
            kes_period_info = cluster.get_kes_period_info(valid_opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info, expected_scenario=kes.KesScenarios.ALL_VALID
            )

            # check kes-period-info with invalid operational certificate, wrong counter and period
            kes_period_info = cluster.get_kes_period_info(invalid_opcert_file)
            kes.check_kes_period_info_result(
                kes_output=kes_period_info,
                expected_scenario=kes.KesScenarios.INVALID_KES_PERIOD
                if VERSIONS.cluster_era > VERSIONS.ALONZO
                else kes.KesScenarios.ALL_INVALID,
            )