def parse_pod_list(self, cli_obj, config): self.pod_list = [] args = util.coerce_to_iter(config.config.pop('pods', []), set) if 'example' in args or 'examples' in args: self.pod_list = [pod for pod in config.pods \ if pod.startswith('example')] elif 'all' in args: self.pod_list = [pod for pod in config.pods \ if not pod.startswith('example')] else: # specify pods by realm realms = args.intersection(set(config.all_realms)) args = args.difference(set(config.all_realms)) # remainder for key in config.pod_realms: if util.coerce_to_iter(key, set).issubset(realms): self.pod_list.extend(config.pod_realms[key]) # specify pods by name pods = args.intersection(set(config.pods)) self.pod_list.extend(list(pods)) for arg in args.difference(set(config.pods)): # remainder: print("WARNING: Didn't recognize POD {}, ignoring".format(arg)) # exclude examples self.pod_list = [pod for pod in self.pod_list \ if not pod.startswith('example')] if not self.pod_list: print(( "WARNING: no PODs selected to be run. Do `./mdtf info pods`" " for a list of available PODs, and check your -p/--pods argument." )) print('Received --pods = {}'.format(list(args))) exit()
def make_parser(self, d): args = util.coerce_to_iter(d.pop('arguments', None)) arg_groups = util.coerce_to_iter(d.pop('argument_groups', None)) d['formatter_class'] = CustomHelpFormatter p_kwargs = util.filter_kwargs(d, argparse.ArgumentParser.__init__) p = argparse.ArgumentParser(**p_kwargs) for arg in args: # add arguments not in any group self.add_parser_argument(arg, p, 'parser') for group in arg_groups: # add groups and arguments therein self.add_parser_group(group, p) return p
def framework_test(code_root, output_dir, cli_config): print("Starting framework test run") abs_out_dir = util.resolve_path(output_dir, root_path=code_root, env=os.environ) try: log_str = shell_command_wrapper('./mdtf -f {input_file}'.format( input_file=os.path.join(code_root, cli_config['config_out'])), cwd=code_root) log_str = util.coerce_to_iter(log_str) # write to most recent directory in output_dir runs = [ d for d in glob.glob(os.path.join(abs_out_dir, '*')) if os.path.isdir(d) ] if not runs: raise IOError( "Can't find framework output in {}".format(abs_out_dir)) run_output = max(runs, key=os.path.getmtime) with io.open(os.path.join(run_output, 'mdtf_test.log'), 'w', encoding='utf-8') as f: f.write('\n'.join(log_str)) except Exception as exc: fatal_exception_handler(exc, "ERROR: framework test run failed.") print("Finished framework test run at {}".format(run_output)) return run_output
def spawn_subprocess(self, cmd_list, env_name, env=None, cwd=None, stdout=None, stderr=None): if stdout is None: stdout = subprocess.STDOUT if stderr is None: stderr = subprocess.STDOUT run_cmds = util.coerce_to_iter(cmd_list, list) if self.test_mode: run_cmds = [ 'echo "TEST MODE: call {}"'.format('; '.join(run_cmds)) ] commands = self.activate_env_commands(env_name) \ + run_cmds \ + self.deactivate_env_commands(env_name) # '&&' so we abort if any command in the sequence fails. if self.test_mode: for cmd in commands: print('TEST MODE: call {}'.format(cmd)) else: print("Calling : {}".format(run_cmds[-1])) commands = ' && '.join([s for s in commands if s]) # Need to run bash explicitly because 'conda activate' sources # env vars (can't do that in posix sh). tcsh could also work. return subprocess.Popen(['bash', '-c', commands], env=env, cwd=cwd, stdout=stdout, stderr=stderr)
def bump_version(path, new_v=None, extra_dirs=None): # return a filename that doesn't conflict with existing files. # if extra_dirs supplied, make sure path doesn't conflict with pre-existing # files at those locations either. def _split_version(file_): match = re.match(r""" ^(?P<file_base>.*?) # arbitrary characters (lazy match) (\.v(?P<version>\d+)) # literal '.v' followed by digits ? # previous group may occur 0 or 1 times $ # end of string """, file_, re.VERBOSE) if match: return (match.group('file_base'), match.group('version')) else: return (file_, '') def _reassemble(dir_, file_, version, ext_, final_sep): if version: file_ = ''.join([file_, '.v', str(version), ext_]) else: # get here for version == 0, '' or None file_ = ''.join([file_, ext_]) return os.path.join(dir_, file_) + final_sep def _path_exists(dir_list, file_, new_v, ext_, sep): new_paths = [_reassemble(d, file_, new_v, ext_, sep) for d in dir_list] return any([os.path.exists(p) for p in new_paths]) if path.endswith(os.sep): # remove any terminating slash on directory path = path.rstrip(os.sep) final_sep = os.sep else: final_sep = '' dir_, file_ = os.path.split(path) if not extra_dirs: dir_list = [] else: dir_list = util.coerce_to_iter(extra_dirs) dir_list.append(dir_) file_, old_v = _split_version(file_) if not old_v: # maybe it has an extension and then a version number file_, ext_ = os.path.splitext(file_) file_, old_v = _split_version(file_) else: ext_ = '' if new_v is not None: # removes version if new_v ==0 new_path = _reassemble(dir_, file_, new_v, ext_, final_sep) else: if not old_v: new_v = 0 else: new_v = int(old_v) while _path_exists(dir_list, file_, new_v, ext_, final_sep): new_v = new_v + 1 new_path = _reassemble(dir_, file_, new_v, ext_, final_sep) return (new_path, new_v)
def __init__(self, unittest=False, verbose=0): if unittest: # value not used, when we're testing will mock out call to read_json # below with actual translation table to use for test config_files = ['dummy_filename'] else: config = ConfigManager() glob_pattern = os.path.join( config.paths.CODE_ROOT, 'src', 'fieldlist_*.jsonc' ) config_files = glob.glob(glob_pattern) # always have CF-compliant option, which does no translation self.axes = { 'CF': { "lon" : {"axis" : "X", "MDTF_envvar" : "lon_coord"}, "lat" : {"axis" : "Y", "MDTF_envvar" : "lat_coord"}, "lev" : {"axis" : "Z", "MDTF_envvar" : "lev_coord"}, "time" : {"axis" : "T", "MDTF_envvar" : "time_coord"} }} self.variables = {'CF': dict()} self.units = {'CF': dict()} for filename in config_files: d = util.read_json(filename) for conv in util.coerce_to_iter(d['convention_name']): if verbose > 0: print('XXX found ', conv) if conv in self.variables: print("ERROR: convention "+conv+" defined in "+filename+" already exists") raise ConventionError self.axes[conv] = d.get('axes', dict()) self.variables[conv] = util.MultiMap(d.get('var_names', dict())) self.units[conv] = util.MultiMap(d.get('units', dict()))
def _parse_pod_varlist(self, varlist, verbose=0): """Private method called by :meth:`~shared_diagnostic.Diagnostic.__init__`. Args: varlist (:py:obj:`list` of :py:obj:`dict`): Contents of the varlist portion of the POD's settings.json file. verbose (:py:obj:`int`, optional): Logging verbosity level. Default 0. Returns: varlist """ default_file_required = True for i, var in enumerate(varlist): if 'requirement' in var: varlist[i]['required'] = ( var['requirement'].lower() == 'required') elif 'required' not in varlist[i]: varlist[i]['required'] = default_file_required if 'alternates' not in var: varlist[i]['alternates'] = [] else: varlist[i]['alternates'] = util.coerce_to_iter( var['alternates']) if (verbose > 0): print(self.name + " varlist: ") print(varlist) return varlist
def get_lookup(self, source, dest): """Find the appropriate lookup table to convert values in *source* (keys) to values in *dest* (values), generating it if necessary. Args: source (str): the CV category to use for the keys. dest (str): the CV category to use for the values. Returns: :class:`util.MultiMap` providing a dict-like lookup interface, ie dest_value = d[source_key]. """ if (source, dest) in self._lookups: return self._lookups[(source, dest)] elif (dest, source) in self._lookups: return self._lookups[(dest, source)].inverse() elif source in self._contents: k = list(self._contents[source])[0] if dest not in self._contents[source][k]: raise KeyError("Can't find {} in attributes for {}.".format( dest, source)) mm = util.MultiMap() for k in self._contents[source]: mm[k].update( util.coerce_to_iter(self._contents[source][k][dest], set)) self._lookups[(source, dest)] = mm return mm elif dest in self._contents: return self._lookups[(dest, source)].inverse() else: raise KeyError('Neither {} or {} in CV table list.'.format( source, dest))
def parse_cli(self, args=None): # call preparse_cli if child class hasn't done so already if not self.config: self.preparse_cli(args) # if no additional defaults were set, that's sufficient, otherwise need # to take into account their intermediate priority if isinstance(self.partial_defaults, dict): # not handled correctly by coerce_to_iter self.partial_defaults = [self.partial_defaults] self.partial_defaults = util.coerce_to_iter(self.partial_defaults) partial_defaults = [] for d in self.partial_defaults: # drop empty strings partial_defaults.append({k: v for k, v in d.items() if v != ""}) # self.config was populated by preparse_cli() # Options explicitly set by user on CLI; is_default = None if no default cli_opts = {k:v for k,v in self.config.items() \ if not self.is_default.get(k, True)} # full set of defaults from cli.jsonc, from running parser on empty input defaults = vars(self.parser.parse_args([])) chained_dict_list = [cli_opts] + partial_defaults + [defaults] # CLI opts override options set from file, which override defaults self.config = dict(ChainMap(*chained_dict_list))
def makedirs(self, path_keys, delete_existing): path_keys = util.coerce_to_iter(path_keys) for key in path_keys: path = self.config[key] if path: if not os.path.isdir(path): os.makedirs(path) # recursive mkdir if needed elif delete_existing: shutil.rmtree(path) # overwrite everything
def iteritems_cli(self, group_nm=None): if not group_nm: _groups = self.parser_groups else: _groups = util.coerce_to_iter(group_nm) for group in _groups: for action in self.parser_args_from_group[group]: key = action.dest yield (key, self.config[key])
def _make_cv(self): """Populate the *cv* attribute of :class:`CMIP6_CVs` with the tables read in during __init__(). Do this on-demand rather than in __init__, in case this information isn't needed for this run of the framework. """ if self.cv: return for k in self._contents: self.cv[k] = util.coerce_to_iter(self._contents[k])
def add_parser_group(self, d, target_obj): gp_nm = d.pop('name') _ = d.setdefault('title', gp_nm) args = util.coerce_to_iter(d.pop('arguments', None)) if args: # only add group if it has > 0 arguments gp_kwargs = util.filter_kwargs(d, argparse._ArgumentGroup.__init__) gp_obj = target_obj.add_argument_group(**gp_kwargs) self.parser_groups[gp_nm] = gp_obj for arg in args: self.add_parser_argument(arg, gp_obj, gp_nm)
def parse_case_list(self, cli_obj, config): case_list_in = util.coerce_to_iter(cli_obj.case_list) cli_d = self._populate_from_cli(cli_obj, 'MODEL') if 'CASE_ROOT_DIR' not in cli_d and cli_obj.config.get( 'root_dir', None): # CASE_ROOT was set positionally cli_d['CASE_ROOT_DIR'] = cli_obj.config['root_dir'] if not case_list_in: case_list_in = [cli_d] case_list = [] for case_tup in enumerate(case_list_in): case_list.append(self.parse_case(case_tup, cli_d, cli_obj, config)) self.case_list = [case for case in case_list if case is not None]
def add_parser_argument(self, d, target_obj, target_name): # set flags: if 'name' not in d: raise ValueError("No argument name found in {}".format(d)) arg_nm = self.canonical_arg_name(d.pop('name')) arg_flags = [arg_nm] if d.pop('is_positional', False): # code to handle positional arguments pass else: # argument is a command-line flag (default) if 'dest' not in d: d['dest'] = arg_nm if '_' in arg_nm: # recognize both --hyphen_opt and --hyphen-opt (GNU CLI convention) arg_flags = [arg_nm.replace('_', '-'), arg_nm] arg_flags = ['--' + s for s in arg_flags] if 'short_name' in d: # recognize both --option and -O, if short_name defined arg_flags.append('-' + d.pop('short_name')) # type conversion of default value if 'type' in d: d['type'] = eval(d['type']) if 'default' in d: d['default'] = d['type'](d['default']) if d.get('action', '') == 'count' and 'default' in d: d['default'] = int(d['default']) if d.get('parse_type', None): # make list of args requiring custom post-parsing later self.custom_types[d.pop('parse_type')].append(d['dest']) # TODO: what if following require env vars, etc?? if d.get('eval', None): for attr in util.coerce_to_iter(d.pop('eval')): if attr in d: d[attr] = eval(d[attr]) _ = d.setdefault('action', RecordDefaultsAction) # change help string based on default value if d.pop('hidden', False): # do not list argument in "mdtf --help", but recognize it d['help'] = argparse.SUPPRESS self.parser_args_from_group[target_name].append( target_obj.add_argument(*arg_flags, **d))
def _list_filtered_subdirs(self, dirs_in, subdir_filter=None): subdir_filter = util.coerce_to_iter(subdir_filter) found_dirs = [] for dir_ in dirs_in: found_subdirs = {d for d \ in self._listdir(os.path.join(self.root_dir, dir_)) \ if not (d.startswith('.') or d.endswith('.nc')) } if subdir_filter: found_subdirs = found_subdirs.intersection(subdir_filter) if not found_subdirs: print("\tCouldn't find subdirs (in {}) at {}, skipping".format( subdir_filter, os.path.join(self.root_dir, dir_))) continue found_dirs.extend([ os.path.join(dir_, subdir_) for subdir_ in found_subdirs \ if os.path.isdir(os.path.join(self.root_dir, dir_, subdir_)) ]) return found_dirs
def _parse_pod_settings(self, settings, verbose=0): """Private method called by :meth:`~shared_diagnostic.Diagnostic.__init__`. Args: settings (:py:obj:`dict`): Contents of the settings portion of the POD's settings.json file. verbose (:py:obj:`int`, optional): Logging verbosity level. Default 0. Returns: Dict of parsed settings. """ d = {} d['pod_name'] = self.name # redundant # define empty defaults to avoid having to test existence of attrs for str_attr in ['long_name', 'description', 'env', 'convention']: d[str_attr] = '' for list_attr in ['varlist']: d[list_attr] = [] for dict_attr in ['runtime_requirements']: d[dict_attr] = dict() for obj_attr in ['process_obj', 'logfile_obj']: d[obj_attr] = None # overwrite with contents of settings.json file d.update(settings) if 'variable_convention' in d: d['convention'] = d['variable_convention'] del d['variable_convention'] elif not d.get('convention', None): d['convention'] = 'CF' for key, val in iter(d['runtime_requirements'].items()): d['runtime_requirements'][key] = util.coerce_to_iter(val) if (verbose > 0): print(self.name + " settings: ") print(d) return d
def _print_pod_info(self, pod, verbose): ds = self.pods[pod]['settings'] dv = self.pods[pod]['varlist'] if verbose == 1: print(' {}: {}.'.format(pod, ds['long_name'])) elif verbose == 2: print(' {}: {}.'.format(pod, ds['long_name'])) print(' {}'.format(ds['description'])) print(' Variables: {}'.format(', '.join( [v['var_name'].replace('_var', '') for v in dv]))) elif verbose == 3: print('{}: {}.'.format(pod, ds['long_name'])) print(' Realm: {}.'.format(' and '.join( util.coerce_to_iter(ds['realm'])))) print(' {}'.format(ds['description'])) print(' Variables:') for var in dv: var_str = ' {} ({}) @ {} frequency'.format( var['var_name'].replace('_var', ''), var.get('requirement', ''), var['freq']) if 'alternates' in var: var_str = var_str + '; alternates: {}'.format(', '.join( [s.replace('_var', '') for s in var['alternates']])) print(var_str)
def _add_topic_handler(keywords, function): # keep cmd_list ordered keywords = util.coerce_to_iter(keywords) self.cmd_list.extend(keywords) for k in keywords: self.cmds[k] = function
def load_pod_settings(code_root, pod=None, pod_list=None): """Wrapper to load POD settings files, used by ConfigManager and CLIInfoHandler. """ # only place we can put it would be util.py if we want to avoid circular imports _pod_dir = 'diagnostics' _pod_settings = 'settings.jsonc' def _load_one_json(pod): d = dict() try: d = util.read_json( os.path.join(code_root, _pod_dir, pod, _pod_settings)) assert 'settings' in d except Exception: pass # better error handling? return d # get list of pods if not pod_list: pod_list = os.listdir(os.path.join(code_root, _pod_dir)) pod_list = [s for s in pod_list if not s.startswith(('_', '.'))] pod_list.sort(key=six.text_type.lower) if pod == 'list': return pod_list # load JSON files if not pod: # load all of them pods = dict() realm_list = set() bad_pods = [] realms = collections.defaultdict(list) for p in pod_list: d = _load_one_json(p) if not d: bad_pods.append(p) continue pods[p] = d # PODs requiring data from multiple realms get stored in the dict # under a tuple of those realms; realms stored indivudally in realm_list _realm = util.coerce_to_iter(d['settings'].get('realm', None), tuple) if len(_realm) == 0: continue elif len(_realm) == 1: _realm = _realm[0] realm_list.add(_realm) else: realm_list.update(_realm) realms[_realm].append(p) for p in bad_pods: pod_list.remove(p) return PodDataTuple(pod_data=pods, realm_data=realms, sorted_lists={ "pods": pod_list, "realms": sorted(list(realm_list), key=six.text_type.lower) }) else: if pod not in pod_list: print( "Couldn't recognize POD {} out of the following diagnostics:". format(pod)) print(', '.join(pod_list)) return dict() return _load_one_json(pod)