def config(_, args, config_path): """ Gets or sets the value for a given configuration key, where the key is specified using a dot-separated path, e.g.: $ cs config metrics.disabled false In the example above, we will set the disabled key in the metrics map to false. """ get = args.get('get') key = args.get('key') value = args.get('value') if len(key) > 1: # argparse should prevent this, but we'll be defensive anyway raise Exception(f'You can only provide a single key.') keys = key[0].split('.') config_path, config_map = load_config_with_defaults(config_path) if get: return get_config_value(config_map, keys) else: if not config_path: raise Exception(f'Unable to locate configuration file.') return set_config_value(config_map, keys, value, config_path)
def run(args): """ Main entrypoint to the cook scheduler CLI. Loads configuration files, processes global command line arguments, and calls other command line sub-commands (actions) if necessary. """ args = vars(parser.parse_args(args)) print_version = args.pop('version') if print_version: print(f'cs version {version.VERSION}') return 0 util.silent = args.pop('silent') verbose = args.pop('verbose') and not util.silent log_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s' if verbose: logging.getLogger('').handlers = [] logging.basicConfig(format=log_format, level=logging.DEBUG) else: logging.disable(logging.FATAL) logging.debug('args: %s' % args) action = args.pop('action') config_path = args.pop('config') cluster = args.pop('cluster') url = args.pop('url') if action is None: parser.print_help() else: config_map = configuration.load_config_with_defaults(config_path) try: metrics.initialize(config_map) metrics.inc('command.%s.runs' % action) clusters = load_target_clusters(config_map, url, cluster) http.configure(config_map) args = {k: v for k, v in args.items() if v is not None} defaults = config_map.get('defaults') action_defaults = (defaults.get(action) if defaults else None) or {} result = actions[action](clusters, deep_merge(action_defaults, args), config_path) logging.debug('result: %s' % result) return result finally: metrics.close() return None
def copy_limits(args, config_path): """Copies limits (share and quota) for a particular user from one cluster to another cluster""" user = args.get('user') from_cluster = args.get('from') from_url = args.get('from_url') if not from_cluster and not from_url: copy_limits_parser.print_help() print() raise Exception(f'You must provide either a from-cluster name (--from) or URL (--from-url).') to_cluster = args.get('to') to_url = args.get('to_url') if not to_cluster and not to_url: copy_limits_parser.print_help() print() raise Exception(f'You must provide either a to-cluster name (--to) or URL (--to-url).') _, config_map = configuration.load_config_with_defaults(config_path) from_clusters = load_target_clusters(config_map, from_url, from_cluster) to_clusters = load_target_clusters(config_map, to_url, to_cluster) assert len(from_clusters) == 1, 'Only a single from-cluster is supported.' assert len(to_clusters) == 1, 'Only a single to-cluster is supported.' from_cluster = from_clusters[0] to_cluster = to_clusters[0] from_cluster_name = from_cluster['name'] to_cluster_name = to_cluster['name'] print(f'Copying limits for {terminal.bold(user)} user ' f'from {terminal.bold(from_cluster_name)} ' f'to {terminal.bold(to_cluster_name)}:') from_pools = http.make_data_request(from_cluster, lambda: http.get(from_cluster, 'pools', params={})) to_pools = http.make_data_request(to_cluster, lambda: http.get(to_cluster, 'pools', params={})) from_pools_dict = {pool['name']: pool for pool in from_pools} to_pools_dict = {pool['name']: pool for pool in to_pools} for pool_name, from_pool in from_pools_dict.items(): if pool_name in to_pools_dict and to_pools_dict[pool_name]['state'] != 'inactive': print(f'\n=== Pool: {pool_name} ===') query_result = query([from_cluster, to_cluster], user) query_result = filter_query_result_by_pools(query_result, [pool_name]) print_formatted(query_result) answer = input(f'Copy limits for {terminal.bold(pool_name)} pool ' f'from {terminal.bold(from_cluster_name)} ' f'to {terminal.bold(to_cluster_name)}? ') should_copy = str2bool(answer) if should_copy: from_dict = query_result['clusters'][from_cluster_name]['pools'][pool_name] reason = f'Copying limits for {user} user from {from_cluster_name} to {to_cluster_name}' from_share = from_dict['share'] resp = http.post(to_cluster, 'share', {'pool': pool_name, 'user': user, 'reason': reason, 'share': from_share}) if resp.status_code != 201: print_error(f'Setting share for {pool_name} on {to_cluster_name} ' f'failed with status code {resp.status_code}: {resp.text}') else: print(terminal.success(f'Copied share for {pool_name} pool ' f'from {from_cluster_name} ' f'to {to_cluster_name}.')) from_quota = from_dict['quota'] resp = http.post(to_cluster, 'quota', {'pool': pool_name, 'user': user, 'reason': reason, 'quota': from_quota}) if resp.status_code != 201: print_error(f'Setting quota for {pool_name} on {to_cluster_name} ' f'failed with status code {resp.status_code}: {resp.text}') else: print(terminal.success(f'Copied quota for {pool_name} pool ' f'from {from_cluster_name} ' f'to {to_cluster_name}.'))
def run(args, plugins): """ Main entrypoint to the cook scheduler CLI. Loads configuration files, processes global command line arguments, and calls other command line sub-commands (actions) if necessary. plugins is a map from plugin-name -> function or Class.SubCommandPlugin """ # This has to happen before we parse the args, otherwise we might # get subcommand not found. for name, instance in plugins.items(): if isinstance(instance, SubCommandPlugin): logging.debug('Adding SubCommandPlugin %s' % name) try: instance.register(subparsers.add_parser, configuration.add_defaults) logging.debug('Done adding SubCommandPlugin %s' % name) name = instance.name() if name in actions: raise Exception( 'SubCommandPlugin %s clashes with an existing subcommand.' % name) actions[name] = instance.run except Exception as e: print('Failed to load SubCommandPlugin %s: %s' % (name, e), file=sys.stderr) args = vars(parser.parse_args(args)) util.silent = args.pop('silent') verbose = args.pop('verbose') and not util.silent log_format = '%(asctime)s [%(levelname)s] [%(name)s] %(message)s' if verbose: logging.getLogger('').handlers = [] logging.basicConfig(format=log_format, level=logging.DEBUG) else: logging.disable(logging.FATAL) logging.debug('args: %s', args) action = args.pop('action') config_path = args.pop('config') cluster = args.pop('cluster') url = args.pop('url') if action is None: parser.print_help() else: _, config_map = configuration.load_config_with_defaults(config_path) try: metrics.initialize(config_map) metrics.inc('command.%s.runs' % action) clusters = load_target_clusters(config_map, url, cluster) http.configure(config_map, plugins) cook.plugins.configure(plugins) args = {k: v for k, v in args.items() if v is not None} defaults = config_map.get('defaults') action_defaults = (defaults.get(action) if defaults else None) or {} logging.debug('going to execute % action' % action) result = actions[action](clusters, deep_merge(action_defaults, args), config_path) logging.debug('result: %s' % result) return result finally: metrics.close() return None