def configure(self) -> None: setup_config(self.options) self.logger = logger self.fetch_proc_count, self.run_proc_count = setup_signals_environment( self.options.pool, self.options.scheduler, )
def main(args: argparse.Namespace) -> None: setup_config(args) for pool in get_pool_name_list(args.cluster, 'mesos'): load_cluster_pool_config(args.cluster, pool, 'mesos', None) for pool in get_pool_name_list(args.cluster, 'kubernetes'): load_cluster_pool_config(args.cluster, pool, 'kubernetes', None) process_queues(args.cluster)
def test_setup_config_cluster(cluster, pool, scheduler, tag, mock_config_files): args = argparse.Namespace( env_config_path='/nail/etc/config.yaml', cluster=cluster, pool=pool, scheduler=scheduler, signals_branch_or_tag=tag, ) with mock.patch( 'clusterman.config.load_cluster_pool_config', autospec=True, ) as mock_pool_load, mock.patch('clusterman.config._load_module_configs', ) as mock_load_module_configs: config.setup_config(args) assert mock_load_module_configs.call_args == mock.call( '/nail/etc/config.yaml') assert staticconf.read_string('aws.region') == 'us-test-3' if pool: assert mock_pool_load.call_args == mock.call( cluster, pool, scheduler, tag) else: assert mock_pool_load.call_count == 0 if tag: assert staticconf.read_string( 'autoscale_signal.branch_or_tag') == tag
def configure_initial(self) -> None: setup_config(self.options) # Since we want to collect metrics for all the pools, we need to call setup_config # first to load the cluster config path, and then read all the entries in that directory self.pools: MutableMapping[str, List[str]] = {} for scheduler in {'mesos', 'kubernetes'}: self.pools[scheduler] = get_pool_name_list(self.options.cluster, scheduler) for scheduler, pools in self.pools.items(): for pool in pools: self.config.watchers.append({ f'{pool}.{scheduler}': get_pool_config_path(self.options.cluster, pool, scheduler), }) load_cluster_pool_config(self.options.cluster, pool, scheduler, None) self.region = staticconf.read_string('aws.region') self.run_interval = staticconf.read_int( 'batches.cluster_metrics.run_interval_seconds') self.logger = logger self.metrics_client = ClustermanMetricsBotoClient( region_name=self.region)
def configure_initial(self): setup_config(self.options) self.autoscaler = None self.logger = logger self.apps = [ self.options.pool ] # TODO (CLUSTERMAN-126) someday these should not be the same thing pool_manager = PoolManager( self.options.cluster, self.options.pool, self.options.scheduler, ) self.autoscaler = Autoscaler( self.options.cluster, self.options.pool, self.options.scheduler, self.apps, monitoring_enabled=(not self.options.dry_run), pool_manager=pool_manager, ) # We don't want to watch anything here because the autoscaler bootstrap script takes care of that for us self.config.watchers.clear()
def main(argv=None): if argv is None: argv = sys.argv[1:] args = parse_args( argv, 'Cluster scaling and management for Mesos and Kubernetes') setup_logging(args.log_level) setup_config(args) args.entrypoint(args)
def test_setup_config_region(mock_load_module_configs, mock_config_files): args = argparse.Namespace( env_config_path='/nail/etc/config.yaml', aws_region='fake-region-A', ) config.setup_config(args) assert staticconf.read_string('aws.region') == 'fake-region-A' assert mock_load_module_configs.call_args == mock.call( '/nail/etc/config.yaml')
def test_setup_config_region_and_cluster(): args = argparse.Namespace( env_config_path='/nail/etc/config.yaml', cluster='foo', aws_region='bar', ) with mock.patch('clusterman.config._load_module_configs'), pytest.raises( argparse.ArgumentError): config.setup_config(args)
def configure_initial(self): # Any keys in the env_config will override defaults in config.yaml. setup_config(self.options) self.logger = logger self.region = staticconf.read_string('aws.region') self.last_time_called = self.options.start_time self.run_interval = staticconf.read_int('batches.spot_prices.run_interval_seconds') self.dedupe_interval = staticconf.read_int('batches.spot_prices.dedupe_interval_seconds') self.metrics_client = ClustermanMetricsBotoClient(region_name=self.region)
def configure_initial(self) -> None: setup_config(self.options) self.logger = logger self.fetch_proc_count, self.run_proc_count = setup_signals_environment( self.options.pool, self.options.scheduler, ) self.config.watchers.append( { self.options.pool: get_pool_config_path(self.options.cluster, self.options.pool, self.options.scheduler) }, )
def configure(self) -> None: setup_config(self.options) self.autoscaler = None self.logger = logger self.apps = [self.options.pool] # TODO (CLUSTERMAN-126) someday these should not be the same thing pool_manager = PoolManager( self.options.cluster, self.options.pool, self.options.scheduler, ) self.autoscaler = Autoscaler( self.options.cluster, self.options.pool, self.options.scheduler, self.apps, monitoring_enabled=(not self.options.dry_run), pool_manager=pool_manager, )
def main(argv=None): if argv is None: argv = sys.argv[1:] args = parse_args( argv, 'Cluster scaling and management for Mesos and Kubernetes') setup_logging(args.log_level) setup_config(args) try: args.entrypoint(args) except Exception as e: print(f'Exception of type {e.__class__.__name__} occured') if e.args: for arg in e.args: print(arg) exit(1)
request_items = response.get('UnprocessedItems', {}) if request_items: time.sleep(5) print('\n') def parse_args(): parser = argparse.ArgumentParser() add_env_config_path_arg(parser) add_region_arg(parser, required=True) parser.add_argument( '--metric-type', choices=list(METRIC_TYPES), required=True, help='The type of metric to rename', ) parser.add_argument( '--mapping-file', required=True, help= 'A file containing a list of from -> two mappings to rename, one per line, separated by white space' ) return parser.parse_args() if __name__ == '__main__': args = parse_args() setup_config(args) main(args)
def main(args: argparse.Namespace) -> None: setup_config(args) for pool in get_pool_name_list(args.cluster, 'mesos'): load_cluster_pool_config(args.cluster, pool, 'mesos', None) # drainer only supported for mesos process_queues(args.cluster)
def main(): args = parse_args('Mesos cluster scaling and management') setup_logging(args.log_level) setup_config(args) args.entrypoint(args)