def test_get_pool_name_list(mock_listdir, mock_get_cluster_config_directory): mock_get_cluster_config_directory.return_value = '/tmp/somedir/cluster-A' mock_listdir.return_value = ['pool-A.mesos', 'pool-B.xml', 'pool-C.mesos', 'pool-D', 'pool-F.kubernetes'] assert set(get_pool_name_list('cluster-A', 'mesos')) == {'pool-A', 'pool-C'} assert set(get_pool_name_list('cluster-A', 'kubernetes')) == {'pool-F'} assert mock_get_cluster_config_directory.call_args == mock.call('cluster-A') assert mock_listdir.call_args == mock.call('/tmp/somedir/cluster-A')
def main(args: argparse.Namespace) -> None: setup_config(args) for pool in get_pool_name_list(args.cluster, 'mesos'): load_cluster_pool_config(args.cluster, pool, 'mesos', None) for pool in get_pool_name_list(args.cluster, 'kubernetes'): load_cluster_pool_config(args.cluster, pool, 'kubernetes', None) process_queues(args.cluster)
def list_pools(args): # pragma: no cover if args.json: obj = { scheduler: list(get_pool_name_list(args.cluster, scheduler)) for scheduler in ['mesos', 'kubernetes'] } print(json.dumps(obj)) else: for scheduler in ['mesos', 'kubernetes']: print(f'\n{scheduler.capitalize()} pools\n--------------------') print('\n'.join(get_pool_name_list(args.cluster, scheduler)))
def process_warning_queue(self) -> None: host_to_process = self.get_warned_host() if host_to_process: logger.info( f'Processing spot warning for {host_to_process.hostname}') spot_fleet_resource_groups = [] for pool in get_pool_name_list( self.cluster, 'mesos'): # draining only supported for Mesos clusters pool_config = staticconf.NamespaceReaders( POOL_NAMESPACE.format(pool=pool, scheduler='mesos')) for resource_group_conf in pool_config.read_list( 'resource_groups'): spot_fleet_resource_groups.extend( list( SpotFleetResourceGroup.load( cluster=self.cluster, pool=pool, config=list(resource_group_conf.values())[0], ).keys())) # we should definitely ignore termination warnings that aren't from this # cluster or maybe not even paasta instances... if host_to_process.group_id in spot_fleet_resource_groups: logger.info( f'Sending spot warned host to drain: {host_to_process.hostname}' ) self.submit_host_for_draining(host_to_process) else: logger.info( f'Ignoring spot warned host because not in our SFRs: {host_to_process.hostname}' ) self.delete_warning_messages([host_to_process])
def configure_initial(self) -> None: setup_config(self.options) # Since we want to collect metrics for all the pools, we need to call setup_config # first to load the cluster config path, and then read all the entries in that directory self.pools: MutableMapping[str, List[str]] = {} for scheduler in {'mesos', 'kubernetes'}: self.pools[scheduler] = get_pool_name_list(self.options.cluster, scheduler) for scheduler, pools in self.pools.items(): for pool in pools: self.config.watchers.append({ f'{pool}.{scheduler}': get_pool_config_path(self.options.cluster, pool, scheduler), }) load_cluster_pool_config(self.options.cluster, pool, scheduler, None) self.region = staticconf.read_string('aws.region') self.run_interval = staticconf.read_int( 'batches.cluster_metrics.run_interval_seconds') self.logger = logger self.metrics_client = ClustermanMetricsBotoClient( region_name=self.region)
def list_pools(args): # pragma: no cover for scheduler in ['mesos', 'kubernetes']: print(f'\n{scheduler.capitalize()} pools\n--------------------') print('\n'.join(get_pool_name_list(args.cluster, scheduler)))
def main(args: argparse.Namespace) -> None: setup_config(args) for pool in get_pool_name_list(args.cluster, 'mesos'): load_cluster_pool_config(args.cluster, pool, 'mesos', None) # drainer only supported for mesos process_queues(args.cluster)