def cluster_and_pool( cluster_manager: cluster_management.ClusterManager, ) -> Tuple[clusterlib.ClusterLib, str]: """Return instance of `clusterlib.ClusterLib`, and pool id to delegate to. We need to mark the pool as "in use" when requesting local cluster instance, that's why cluster instance and pool id are tied together in single fixture. """ cluster_type = cluster_nodes.get_cluster_type() if cluster_type.type == cluster_nodes.ClusterType.TESTNET_NOPOOLS: cluster_obj: clusterlib.ClusterLib = cluster_manager.get() # getting ledger state on official testnet is too expensive, # use one of hardcoded pool IDs if possible if cluster_type.testnet_type == cluster_nodes.Testnets.testnet: # type: ignore stake_pools = cluster_obj.get_stake_pools() for pool_id in configuration.TESTNET_POOL_IDS: if pool_id in stake_pools: return cluster_obj, pool_id blocks_before = clusterlib_utils.get_blocks_before(cluster_obj) # sort pools by how many blocks they produce pool_ids_s = sorted(blocks_before, key=blocks_before.get, reverse=True) # type: ignore # select a pool with reasonable margin for pool_id in pool_ids_s: pool_params = cluster_obj.get_pool_params(pool_id) if pool_params.pool_params[ "margin"] <= 0.5 and not pool_params.retiring: break else: pytest.skip("Cannot find any usable pool.") elif cluster_type.type == cluster_nodes.ClusterType.TESTNET: # the "testnet" cluster has just single pool, "node-pool1" cluster_obj = cluster_manager.get( use_resources=[cluster_management.Resources.POOL1]) pool_id = get_pool_id( cluster_obj=cluster_obj, addrs_data=cluster_manager.cache.addrs_data, pool_name=cluster_management.Resources.POOL1, ) else: cluster_obj = cluster_manager.get( use_resources=[cluster_management.Resources.POOL3]) pool_id = get_pool_id( cluster_obj=cluster_obj, addrs_data=cluster_manager.cache.addrs_data, pool_name=cluster_management.Resources.POOL3, ) return cluster_obj, pool_id
def cluster_update_proposal( self, cluster_manager: cluster_management.ClusterManager, ) -> clusterlib.ClusterLib: return cluster_manager.get( lock_resources=[cluster_management.Resources.CLUSTER], cleanup=True)
def cluster_kes( cluster_manager: cluster_management.ClusterManager, short_kes_start_cluster: Path ) -> clusterlib.ClusterLib: return cluster_manager.get( lock_resources=[cluster_management.Resources.CLUSTER], cleanup=True, start_cmd=str(short_kes_start_cluster), )
def cluster_lock_pools( cluster_manager: cluster_management.ClusterManager ) -> clusterlib.ClusterLib: return cluster_manager.get(lock_resources=[ cluster_management.Resources.POOL1, cluster_management.Resources.POOL2, cluster_management.Resources.POOL3, ])
def cluster_pots( self, cluster_manager: cluster_management.ClusterManager, ) -> clusterlib.ClusterLib: return cluster_manager.get(lock_resources=[ cluster_management.Resources.RESERVES, cluster_management.Resources.TREASURY, ])
def cluster_epoch_length( cluster_manager: cluster_management.ClusterManager, epoch_length_start_cluster: Path) -> clusterlib.ClusterLib: return cluster_manager.get( lock_resources=[cluster_management.Resources.CLUSTER], cleanup=True, start_cmd=str(epoch_length_start_cluster), )
def cluster_lock_42stake( cluster_manager: cluster_management.ClusterManager, ) -> Tuple[clusterlib.ClusterLib, str]: """Make sure just one staking Plutus test run at a time. Plutus script always has the same address. When one script is used in multiple tests that are running in parallel, the balances etc. don't add up. """ cluster_obj = cluster_manager.get( lock_resources=[str(plutus_common.STAKE_GUESS_42_PLUTUS_V1.stem)], use_resources=[cluster_management.Resources.POOL3], ) pool_id = delegation.get_pool_id( cluster_obj=cluster_obj, addrs_data=cluster_manager.cache.addrs_data, pool_name=cluster_management.Resources.POOL3, ) return cluster_obj, pool_id
def cluster_kes(cluster_manager: cluster_management.ClusterManager, short_kes_start_cluster: Path) -> clusterlib.ClusterLib: return cluster_manager.get(singleton=True, cleanup=True, start_cmd=str(short_kes_start_cluster))
def cluster_lock_pool2( cluster_manager: cluster_management.ClusterManager ) -> clusterlib.ClusterLib: return cluster_manager.get(lock_resources=["node-pool2"])
def cluster_epoch_length( cluster_manager: cluster_management.ClusterManager, epoch_length_start_cluster: Path) -> clusterlib.ClusterLib: return cluster_manager.get(singleton=True, cleanup=True, start_cmd=str(epoch_length_start_cluster))
def cluster_use_pool1( cluster_manager: cluster_management.ClusterManager ) -> clusterlib.ClusterLib: return cluster_manager.get( use_resources=[cluster_management.Resources.POOL1])
def cluster( cluster_manager: cluster_management.ClusterManager, ) -> clusterlib.ClusterLib: """Return instance of `clusterlib.ClusterLib`.""" return cluster_manager.get()
def cluster_update_proposal( self, cluster_manager: cluster_management.ClusterManager, ) -> clusterlib.ClusterLib: return cluster_manager.get(singleton=True, cleanup=True)