def cluster_lock_42stake( cluster_manager: cluster_management.ClusterManager, ) -> Tuple[clusterlib.ClusterLib, str]: """Make sure just one staking Plutus test run at a time. Plutus script always has the same address. When one script is used in multiple tests that are running in parallel, the balances etc. don't add up. """ cluster_obj = cluster_manager.get( lock_resources=[str(plutus_common.STAKE_GUESS_42_PLUTUS_V1.stem)], use_resources=[cluster_management.Resources.POOL3], ) pool_id = delegation.get_pool_id( cluster_obj=cluster_obj, addrs_data=cluster_manager.cache.addrs_data, pool_name=cluster_management.Resources.POOL3, ) return cluster_obj, pool_id
def test_oversaturated( # noqa: C901 self, cluster_manager: cluster_management.ClusterManager, cluster_lock_pools: clusterlib.ClusterLib, ): """Check diminished rewards when stake pool is oversaturated. The stake pool continues to operate normally and those who delegate to that pool receive rewards, but the rewards are proportionally lower than those received from stake pool that is not oversaturated. * register and delegate stake address in "init epoch", for all available pools * in "init epoch" + 2, saturate all available pools (block distribution remains balanced among pools) * in "init epoch" + 3, oversaturate one pool * in "init epoch" + 5, for all available pools, withdraw rewards and transfer funds from delegated addresses so pools are no longer (over)saturated * while doing the steps above, collect rewards data for 9 epochs * compare proportionality of rewards in epochs where pools were non-saturated, saturated and oversaturated """ # pylint: disable=too-many-statements,too-many-locals,too-many-branches epoch_saturate = 2 epoch_oversaturate = 4 epoch_withdrawal = 6 cluster = cluster_lock_pools temp_template = common.get_test_id(cluster) initial_balance = 1_000_000_000 faucet_rec = cluster_manager.cache.addrs_data["byron000"] pool_records: Dict[int, PoolRecord] = {} # make sure we have enough time to finish the delegation in one epoch clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-40) init_epoch = cluster.get_epoch() # submit registration certificates and delegate to pools for idx, res in enumerate( [ cluster_management.Resources.POOL1, cluster_management.Resources.POOL2, cluster_management.Resources.POOL3, ], start=1, ): pool_addrs_data = cluster_manager.cache.addrs_data[res] reward_addr = clusterlib.PoolUser( payment=pool_addrs_data["payment"], stake=pool_addrs_data["reward"]) pool_id = delegation.get_pool_id( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, pool_name=res, ) pool_id_dec = helpers.decode_bech32(bech32=pool_id) delegation_out = delegation.delegate_stake_addr( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, temp_template=f"{temp_template}_pool{idx}", pool_id=pool_id, amount=initial_balance, ) pool_records[idx] = PoolRecord( name=res, id=pool_id, id_dec=pool_id_dec, reward_addr=reward_addr, delegation_out=delegation_out, user_rewards=[], owner_rewards=[], blocks_minted={}, saturation_amounts={}, ) # record initial reward balance for each pool for pool_rec in pool_records.values(): user_payment_balance = cluster.get_address_balance( pool_rec.delegation_out.pool_user.payment.address) owner_payment_balance = cluster.get_address_balance( pool_rec.reward_addr.payment.address) pool_rec.user_rewards.append( RewardRecord( epoch_no=init_epoch, reward_total=0, reward_per_epoch=0, stake_total=user_payment_balance, )) pool_rec.owner_rewards.append( RewardRecord( epoch_no=init_epoch, reward_total=cluster.get_stake_addr_info( pool_rec.reward_addr.stake.address). reward_account_balance, reward_per_epoch=0, stake_total=owner_payment_balance, )) assert ( cluster.get_epoch() == init_epoch ), "Delegation took longer than expected and would affect other checks" LOGGER.info("Checking rewards for 10 epochs.") for __ in range(10): # wait for new epoch if cluster.get_epoch( ) == pool_records[2].owner_rewards[-1].epoch_no: cluster.wait_for_new_epoch() # sleep till the end of epoch clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=-50, stop=-40, force_epoch=True) this_epoch = cluster.get_epoch() ledger_state = clusterlib_utils.get_ledger_state( cluster_obj=cluster) clusterlib_utils.save_ledger_state( cluster_obj=cluster, state_name=f"{temp_template}_{this_epoch}", ledger_state=ledger_state, ) for pool_rec in pool_records.values(): # reward balance in previous epoch prev_user_reward = pool_rec.user_rewards[-1].reward_total prev_owner_reward = pool_rec.owner_rewards[-1].reward_total pool_rec.blocks_minted[this_epoch - 1] = (ledger_state["blocksBefore"].get( pool_rec.id_dec) or 0) # current reward balance user_reward = cluster.get_stake_addr_info( pool_rec.delegation_out.pool_user.stake.address ).reward_account_balance owner_reward = cluster.get_stake_addr_info( pool_rec.reward_addr.stake.address).reward_account_balance # total reward amounts received this epoch owner_reward_epoch = owner_reward - prev_owner_reward # We cannot compare with previous rewards in epochs where # `this_epoch >= init_epoch + epoch_withdrawal`. # There's a withdrawal of rewards at the end of these epochs. if this_epoch > init_epoch + epoch_withdrawal: user_reward_epoch = user_reward else: user_reward_epoch = user_reward - prev_user_reward # store collected rewards info user_payment_balance = cluster.get_address_balance( pool_rec.delegation_out.pool_user.payment.address) owner_payment_balance = cluster.get_address_balance( pool_rec.reward_addr.payment.address) pool_rec.user_rewards.append( RewardRecord( epoch_no=this_epoch, reward_total=user_reward, reward_per_epoch=user_reward_epoch, stake_total=user_payment_balance + user_reward, )) pool_rec.owner_rewards.append( RewardRecord( epoch_no=this_epoch, reward_total=owner_reward, reward_per_epoch=owner_reward_epoch, stake_total=owner_payment_balance, )) pool_rec.saturation_amounts[ this_epoch] = _get_saturation_threshold( cluster_obj=cluster, ledger_state=ledger_state, pool_id=pool_rec.id) # fund the delegated addresses - saturate all pools if this_epoch == init_epoch + epoch_saturate: clusterlib_utils.fund_from_faucet( *[ p.delegation_out.pool_user.payment for p in pool_records.values() ], cluster_obj=cluster, faucet_data=faucet_rec, amount=[ p.saturation_amounts[this_epoch] - 100_000_000_000 for p in pool_records.values() ], tx_name=f"{temp_template}_saturate_pools_ep{this_epoch}", force=True, ) with cluster_manager.restart_on_failure(): # Fund the address delegated to "pool2" to oversaturate the pool. # New stake amount will be current (saturated) stake * 2. if this_epoch == init_epoch + epoch_oversaturate: assert (pool_records[2].saturation_amounts[this_epoch] > 0), "Pool is already saturated" current_stake = int( cluster.get_stake_snapshot( pool_records[2].id)["poolStakeMark"]) overstaturate_amount = current_stake * 2 saturation_threshold = pool_records[2].saturation_amounts[ this_epoch] assert overstaturate_amount > saturation_threshold, ( f"{overstaturate_amount} Lovelace is not enough to oversature the pool " f"({saturation_threshold} is needed)") clusterlib_utils.fund_from_faucet( pool_records[2].delegation_out.pool_user.payment, cluster_obj=cluster, faucet_data=faucet_rec, amount=overstaturate_amount, tx_name=f"{temp_template}_oversaturate_pool2", force=True, ) # wait 4 epochs for first rewards if this_epoch >= init_epoch + 4: assert (owner_reward > prev_owner_reward ), "New reward was not received by pool owner" # transfer funds back to faucet so the pools are no longer (over)saturated # and staked amount is +- same as the `initial_balance` if this_epoch >= init_epoch + epoch_withdrawal: _withdraw_rewards( *[ p.delegation_out.pool_user for p in pool_records.values() ], cluster_obj=cluster, tx_name=f"{temp_template}_ep{this_epoch}", ) return_to_addrs = [] return_amounts = [] for idx, pool_rec in pool_records.items(): deleg_payment_balance = cluster.get_address_balance( pool_rec.delegation_out.pool_user.payment.address) if deleg_payment_balance > initial_balance + 10_000_000: return_to_addrs.append( pool_rec.delegation_out.pool_user.payment) return_amounts.append(deleg_payment_balance - initial_balance) clusterlib_utils.return_funds_to_faucet( *return_to_addrs, cluster_obj=cluster, faucet_addr=faucet_rec["payment"].address, amount=return_amounts, tx_name=f"{temp_template}_ep{this_epoch}", ) for return_addr in return_to_addrs: deleg_payment_balance = cluster.get_address_balance( return_addr.address) assert ( deleg_payment_balance <= initial_balance ), "Unexpected funds in payment address '{return_addr}'" assert ( cluster.get_epoch() == this_epoch ), "Failed to finish actions in single epoch, it would affect other checks" pool1_user_rewards_per_block = _get_reward_per_block(pool_records[1]) pool2_user_rewards_per_block = _get_reward_per_block(pool_records[2]) pool3_user_rewards_per_block = _get_reward_per_block(pool_records[3]) pool1_owner_rewards_per_block = _get_reward_per_block( pool_records[1], owner_rewards=True) pool2_owner_rewards_per_block = _get_reward_per_block( pool_records[2], owner_rewards=True) pool3_owner_rewards_per_block = _get_reward_per_block( pool_records[3], owner_rewards=True) oversaturated_epoch = max( e for e, r in pool_records[2].saturation_amounts.items() if r < 0) saturated_epoch = oversaturated_epoch - 2 nonsaturated_epoch = oversaturated_epoch - 4 try: # check that rewards per block per stake for "pool2" in the epoch where the pool is # oversaturated is lower than in epochs where pools are not oversaturated assert (pool1_user_rewards_per_block[nonsaturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) assert (pool2_user_rewards_per_block[nonsaturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) assert (pool3_user_rewards_per_block[nonsaturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) assert (pool1_user_rewards_per_block[saturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) assert (pool2_user_rewards_per_block[saturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) assert (pool3_user_rewards_per_block[saturated_epoch] > pool2_user_rewards_per_block[oversaturated_epoch]) # check that oversaturated pool doesn't lead to increased rewards for pool owner # when compared to saturated pool, i.e. total pool margin amount is not increased pool1_rew_fraction_sat = pool1_owner_rewards_per_block[ saturated_epoch] pool2_rew_fraction_sat = pool2_owner_rewards_per_block[ saturated_epoch] pool3_rew_fraction_sat = pool3_owner_rewards_per_block[ saturated_epoch] pool2_rew_fraction_over = pool2_owner_rewards_per_block[ oversaturated_epoch] assert pool2_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval( pool2_rew_fraction_sat, pool2_rew_fraction_over, frac=0.4, ) assert pool1_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval( pool1_rew_fraction_sat, pool2_rew_fraction_over, frac=0.4, ) assert pool3_rew_fraction_sat > pool2_rew_fraction_over or helpers.is_in_interval( pool3_rew_fraction_sat, pool2_rew_fraction_over, frac=0.4, ) # Compare rewards in last (non-saturated) epoch to rewards in next-to-last # (saturated / over-saturated) epoch. # This way check that staked amount for each pool was restored to `initial_balance` # and that rewards correspond to the restored amounts. for pool_rec in pool_records.values(): assert (pool_rec.user_rewards[-1].reward_per_epoch * 100 < pool_rec.user_rewards[-2].reward_per_epoch) except Exception: # save debugging data in case of test failure with open(f"{temp_template}_pool_records.pickle", "wb") as out_data: pickle.dump(pool_records, out_data) raise
def test_no_reward_deregistered_reward_addr( self, cluster_manager: cluster_management.ClusterManager, cluster_lock_pool2: clusterlib.ClusterLib, ): """Check that the reward address is not receiving rewards when deregistered. The stake pool continues to operate normally and those who delegate to that pool receive rewards. * delegate stake address * wait for first reward * withdraw pool rewards to payment address * deregister the pool reward address * check that the key deposit was returned * check that pool owner is NOT receiving rewards * check that new rewards are received by those delegating to the pool * return the pool to the original state - reregister reward address * check that pool owner is receiving rewards """ pool_name = cluster_management.Resources.POOL2 cluster = cluster_lock_pool2 pool_rec = cluster_manager.cache.addrs_data[pool_name] pool_reward = clusterlib.PoolUser(payment=pool_rec["payment"], stake=pool_rec["reward"]) temp_template = common.get_test_id(cluster) pool_id = delegation.get_pool_id( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, pool_name=pool_name) clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-20) init_epoch = cluster.get_epoch() # submit registration certificate and delegate to pool delegation_out = delegation.delegate_stake_addr( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, temp_template=temp_template, pool_id=pool_id, ) assert ( cluster.get_epoch() == init_epoch ), "Delegation took longer than expected and would affect other checks" LOGGER.info("Waiting 4 epochs for first reward.") cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10) if not cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance: pytest.skip( f"User of pool '{pool_name}' hasn't received any rewards, cannot continue." ) # make sure we have enough time to finish deregistration in one epoch clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-40) # withdraw pool rewards to payment address # use `transaction build` if possible if (VERSIONS.transaction_era >= VERSIONS.ALONZO and VERSIONS.transaction_era == VERSIONS.cluster_era): clusterlib_utils.withdraw_reward_w_build( cluster_obj=cluster, stake_addr_record=pool_reward.stake, dst_addr_record=pool_reward.payment, tx_name=temp_template, ) else: cluster.withdraw_reward( stake_addr_record=pool_reward.stake, dst_addr_record=pool_reward.payment, tx_name=temp_template, ) # deregister the pool reward address stake_addr_dereg_cert = cluster.gen_stake_addr_deregistration_cert( addr_name=f"{temp_template}_addr0", stake_vkey_file=pool_reward.stake.vkey_file) tx_files_deregister = clusterlib.TxFiles( certificate_files=[stake_addr_dereg_cert], signing_key_files=[ pool_reward.payment.skey_file, pool_reward.stake.skey_file ], ) src_init_balance = cluster.get_address_balance( pool_reward.payment.address) tx_raw_deregister_output = cluster.send_tx( src_address=pool_reward.payment.address, tx_name=f"{temp_template}_dereg_reward", tx_files=tx_files_deregister, ) with cluster_manager.restart_on_failure(): # check that the key deposit was returned assert ( cluster.get_address_balance( pool_reward.payment.address) == src_init_balance - tx_raw_deregister_output.fee + cluster.get_address_deposit() ), f"Incorrect balance for source address `{pool_reward.payment.address}`" # check that the reward address is no longer delegated assert not cluster.get_stake_addr_info( pool_reward.stake.address), "Stake address still delegated" orig_user_reward = cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance cluster.wait_for_new_epoch(3) # check that pool owner is NOT receiving rewards assert (cluster.get_stake_addr_info( pool_reward.stake.address).reward_account_balance == 0 ), "Pool owner received unexpected rewards" # check that new rewards are received by those delegating to the pool assert (orig_user_reward < cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "New reward was not received by stake address" # Return the pool to the original state - reregister reward address. # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc. # are deducted clusterlib_utils.fund_from_faucet( pool_reward, cluster_obj=cluster, faucet_data=cluster_manager.cache.addrs_data["user1"], amount=900_000_000, force=True, ) src_updated_balance = cluster.get_address_balance( pool_reward.payment.address) # reregister reward address tx_files = clusterlib.TxFiles( certificate_files=[ pool_rec["reward_addr_registration_cert"], ], signing_key_files=[ pool_reward.payment.skey_file, pool_reward.stake.skey_file ], ) tx_raw_output = cluster.send_tx( src_address=pool_reward.payment.address, tx_name=f"{temp_template}_rereg_deleg", tx_files=tx_files, ) # check that the balance for source address was correctly updated assert ( cluster.get_address_balance( pool_reward.payment.address) == src_updated_balance - tx_raw_output.fee - cluster.get_address_deposit() ), f"Incorrect balance for source address `{pool_reward.payment.address}`" cluster.wait_for_new_epoch(4, padding_seconds=30) # check that new rewards were received by those delegating to the pool assert (orig_user_reward < cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "New reward was not received by stake address" # check that pool owner is also receiving rewards assert (cluster.get_stake_addr_info( pool_reward.stake.address).reward_account_balance > 0), "New reward was not received by pool reward address" # check that pledge is still met after the owner address was used to pay for Txs pool_data = clusterlib_utils.load_registered_pool_data( cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id) owner_payment_balance = cluster.get_address_balance( pool_reward.payment.address) assert (owner_payment_balance >= pool_data.pool_pledge ), f"Pledge is not met for pool '{pool_name}'!"
def test_no_reward_unmet_pledge1( self, cluster_manager: cluster_management.ClusterManager, cluster_lock_pool2: clusterlib.ClusterLib, ): """Check that the stake pool is not receiving rewards when pledge is not met. When the pledge is higher than available funds, neither pool owners nor those who delegate to that pool receive rewards. * delegate stake address * wait for first reward * increase the needed pledge amount - update the pool parameters by resubmitting the pool registration certificate - the funds are now lower than what is needed by the stake pool * check that NO new rewards were received by those delegating to the pool * check that pool owner is also NOT receiving rewards * return the pool to the original state - restore pledge settings * check that new rewards were received by those delegating to the pool * check that pool owner is also receiving rewards """ pool_name = cluster_management.Resources.POOL2 cluster = cluster_lock_pool2 pool_rec = cluster_manager.cache.addrs_data[pool_name] pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"], stake=pool_rec["stake"]) temp_template = common.get_test_id(cluster) pool_id = delegation.get_pool_id( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, pool_name=pool_name) clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-20) init_epoch = cluster.get_epoch() # submit registration certificate and delegate to pool delegation_out = delegation.delegate_stake_addr( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, temp_template=temp_template, pool_id=pool_id, ) assert ( cluster.get_epoch() == init_epoch ), "Delegation took longer than expected and would affect other checks" LOGGER.info("Waiting 4 epochs for first reward.") cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10) if not cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance: pytest.skip( f"User of pool '{pool_name}' hasn't received any rewards, cannot continue." ) # make sure we have enough time to finish the pool update in one epoch clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-40) # load and update original pool data loaded_data = clusterlib_utils.load_registered_pool_data( cluster_obj=cluster, pool_name=f"changed_{pool_name}", pool_id=pool_id) pool_data_updated = loaded_data._replace( pool_pledge=loaded_data.pool_pledge * 9) # increase the needed pledge amount - update the pool parameters by resubmitting the pool # registration certificate cluster.register_stake_pool( pool_data=pool_data_updated, pool_owners=[pool_owner], vrf_vkey_file=pool_rec["vrf_key_pair"].vkey_file, cold_key_pair=pool_rec["cold_key_pair"], tx_name=f"{temp_template}_update_param", reward_account_vkey_file=pool_rec["reward"].vkey_file, deposit=0, # no additional deposit, the pool is already registered ) cluster.wait_for_new_epoch(4, padding_seconds=30) orig_owner_reward = cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance orig_user_reward = cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance cluster.wait_for_new_epoch(3) with cluster_manager.restart_on_failure(): # check that NO new rewards were received by those delegating to the pool assert (orig_user_reward == cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "Received unexpected rewards" # check that pool owner is also NOT receiving rewards assert (orig_owner_reward == cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance ), "Pool owner received unexpected rewards" # Return the pool to the original state - restore pledge settings. # fund pool owner's addresses so balance keeps higher than pool pledge after fees etc. # are deducted clusterlib_utils.fund_from_faucet( pool_owner, cluster_obj=cluster, faucet_data=cluster_manager.cache.addrs_data["user1"], amount=900_000_000, force=True, ) # update the pool to original parameters by resubmitting # the pool registration certificate cluster.register_stake_pool( pool_data=loaded_data, pool_owners=[pool_owner], vrf_vkey_file=pool_rec["vrf_key_pair"].vkey_file, cold_key_pair=pool_rec["cold_key_pair"], tx_name=f"{temp_template}_update_to_orig", reward_account_vkey_file=pool_rec["reward"].vkey_file, deposit= 0, # no additional deposit, the pool is already registered ) cluster.wait_for_new_epoch(5, padding_seconds=30) # check that new rewards were received by those delegating to the pool assert (orig_user_reward < cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "New reward was not received by stake address" # check that pool owner is also receiving rewards assert (orig_owner_reward < cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance ), "New reward was not received by pool reward address" # check that pledge is still met after the owner address was used to pay for Txs pool_data = clusterlib_utils.load_registered_pool_data( cluster_obj=cluster, pool_name=pool_name, pool_id=pool_id) owner_payment_balance = cluster.get_address_balance( pool_owner.payment.address) assert (owner_payment_balance >= pool_data.pool_pledge ), f"Pledge is not met for pool '{pool_name}'!"
def test_no_reward_unmet_pledge2( self, cluster_manager: cluster_management.ClusterManager, cluster_lock_pool2: clusterlib.ClusterLib, ): """Check that the stake pool is not receiving rewards when pledge is not met. When the pledge is higher than available funds, neither pool owners nor those who delegate to that pool receive rewards. * delegate stake address * wait for first reward * withdraw part of the pledge - the funds are lower than what is needed by the stake pool * check that NO new rewards were received by those delegating to the pool * check that pool owner is also NOT receiving rewards * return the pool to the original state - restore pledge funds * check that new rewards were received by those delegating to the pool * check that pool owner is also receiving rewards """ pool_name = cluster_management.Resources.POOL2 cluster = cluster_lock_pool2 pool_rec = cluster_manager.cache.addrs_data[pool_name] pool_owner = clusterlib.PoolUser(payment=pool_rec["payment"], stake=pool_rec["stake"]) temp_template = common.get_test_id(cluster) pool_id = delegation.get_pool_id( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, pool_name=pool_name) clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-20) init_epoch = cluster.get_epoch() # submit registration certificate and delegate to pool delegation_out = delegation.delegate_stake_addr( cluster_obj=cluster, addrs_data=cluster_manager.cache.addrs_data, temp_template=temp_template, pool_id=pool_id, ) assert ( cluster.get_epoch() == init_epoch ), "Delegation took longer than expected and would affect other checks" LOGGER.info("Waiting 4 epochs for first reward.") cluster.wait_for_new_epoch(new_epochs=4, padding_seconds=10) if not cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance: pytest.skip( f"User of pool '{pool_name}' hasn't received any rewards, cannot continue." ) # make sure we have enough time to withdraw the pledge in one epoch clusterlib_utils.wait_for_epoch_interval(cluster_obj=cluster, start=5, stop=-40) # load pool data loaded_data = clusterlib_utils.load_registered_pool_data( cluster_obj=cluster, pool_name=f"changed_{pool_name}", pool_id=pool_id) pledge_amount = loaded_data.pool_pledge // 2 # withdraw part of the pledge destinations = [ clusterlib.TxOut(address=delegation_out.pool_user.payment.address, amount=pledge_amount) ] tx_files = clusterlib.TxFiles( signing_key_files=[pool_owner.payment.skey_file]) cluster.send_funds( src_address=pool_owner.payment.address, destinations=destinations, tx_name=f"{temp_template}_withdraw_pledge", tx_files=tx_files, ) assert cluster.get_address_balance( pool_owner.payment.address ) < loaded_data.pool_pledge, ( f"Pledge still high - pledge: {loaded_data.pool_pledge}, " f"funds: {cluster.get_address_balance(pool_owner.payment.address)}" ) cluster.wait_for_new_epoch(4, padding_seconds=30) orig_owner_reward = cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance orig_user_reward = cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance cluster.wait_for_new_epoch(3) with cluster_manager.restart_on_failure(): # check that NO new rewards were received by those delegating to the pool assert (orig_user_reward == cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "Received unexpected rewards" # check that pool owner is also NOT receiving rewards assert (orig_owner_reward == cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance ), "Pool owner received unexpected rewards" # Return the pool to the original state - restore pledge funds. # fund user address so it has enough funds for fees etc. clusterlib_utils.fund_from_faucet( delegation_out.pool_user, cluster_obj=cluster, faucet_data=cluster_manager.cache.addrs_data["user1"], amount=900_000_000, force=True, ) # return pledge destinations = [ clusterlib.TxOut(address=pool_owner.payment.address, amount=pledge_amount + 100_000_000) ] tx_files = clusterlib.TxFiles( signing_key_files=[delegation_out.pool_user.payment.skey_file]) cluster.send_funds( src_address=delegation_out.pool_user.payment.address, destinations=destinations, tx_name=f"{temp_template}_return_pledge", tx_files=tx_files, ) assert ( cluster.get_address_balance( pool_owner.payment.address) >= loaded_data.pool_pledge ), (f"Funds still low - pledge: {loaded_data.pool_pledge}, " f"funds: {cluster.get_address_balance(pool_owner.payment.address)}" ) cluster.wait_for_new_epoch(5, padding_seconds=30) # check that new rewards were received by those delegating to the pool assert (orig_user_reward < cluster.get_stake_addr_info( delegation_out.pool_user.stake.address).reward_account_balance ), "New reward was not received by stake address" # check that pool owner is also receiving rewards assert (orig_owner_reward < cluster.get_stake_addr_info( pool_rec["reward"].address).reward_account_balance ), "New reward was not received by pool reward address"