comparison = util.get_comparison_from_threshold(v_thresh) number = util.get_number_from_threshold(v_thresh) if comparison in ["gt", "ge", ">", ">="]: thresh_str += "thresh_lo=" + str(number) + ";" elif comparison in ["lt", "le", "<", "<="]: thresh_str += "thresh_hi=" + str(number) + ";" if self.c_dict[d_type+'_INPUT_DATATYPE'] == "NETCDF" or \ self.c_dict[d_type+'_INPUT_DATATYPE'] == "GEMPAK": field = "{ name=\"" + v_name + "\"; level=\"" + \ level+"\"; prob=TRUE; " else: field = "{ name=\"PROB\"; level=\""+level_type + \ level.zfill(2) + "\"; prob={ name=\"" + \ v_name + "\"; " + thresh_str + "} " else: if self.config.getbool('config', d_type + '_PCP_COMBINE_RUN', False): field = "{ name=\""+v_name+"_"+level + \ "\"; level=\"(*,*)\"; " else: field = "{ name=\""+v_name + \ "\"; level=\""+v_level+"\"; " field += v_extra + " }" return field if __name__ == "__main__": util.run_stand_alone("mode_wrapper", "Mode")
# set field info if data_src == "FCST": self.field_level = var_info['fcst_level'] self.field_name = var_info['fcst_name'] self.field_extra = var_info['fcst_extra'] else: self.field_level = var_info['obs_level'] self.field_name = var_info['obs_name'] self.field_extra = var_info['obs_extra'] in_dir, in_template = self.get_dir_and_template(data_src, 'INPUT') out_dir, out_template = self.get_dir_and_template(data_src, 'OUTPUT') # get files lookback = self.c_dict[data_src + '_DERIVE_LOOKBACK'] if not self.get_accumulation(time_info, lookback, data_src, is_forecast): return None # set output self.outdir = out_dir time_info['level'] = int(lookback) * 3600 psts = sts.StringSub(self.logger, out_template, **time_info) pcp_out = psts.do_string_sub() self.outfile = pcp_out return self.get_command() if __name__ == "__main__": util.run_stand_alone("pcp_combine_wrapper", "PcpCombine")
if verif_type in ("pres", "anom", "sfc"): run_make_plots = True else: run_make_plots = False self.logger.error( verif_type + " is not an accepted VERIF_TYPE option for VERIF_CASE = grid2grid" ) elif verif_case == "grid2obs": if verif_type in ("upper_air", "conus_sfc"): run_make_plots = True else: run_make_plots = False self.logger.error( verif_type + " is not an accepted VERIF_TYPE option for VERIF_CASE = grid2obs" ) elif verif_case == "precip": run_make_plots = True else: self.logger.error(verif_case + " is not an accepted VERIF_CASE option") if run_make_plots: self.create_plots(verif_case, verif_type) else: exit(1) if __name__ == "__main__": util.run_stand_alone("make_plots_wrapper_precip", "MakePlots")
tmp_filename = "filter_" + cur_init + "_" + cur_storm full_tmp_filename = os.path.join(tmp_dir, tmp_filename) storm_match_list = util.grep(cur_storm, filter_name) with open(full_tmp_filename, "a+") as tmp_file: # copy over header information tmp_file.write(header) for storm_match in storm_match_list: tmp_file.write(storm_match) # Perform regridding of the forecast and analysis files # to an n X n degree tile centered on the storm (dimensions # are indicated in the config/param file). feature_util.retrieve_and_regrid(full_tmp_filename, cur_init, cur_storm, self.filtered_out_dir, self.config) # end of for cur_storm # Remove any empty files and directories in the extract_tiles output # directory util.prune_empty(self.filtered_out_dir, self.logger) # Clean up the tmp directory if it exists if os.path.isdir(tmp_dir): util.rmtree(tmp_dir) if __name__ == "__main__": util.run_stand_alone("extract_tiles_wrapper", "ExtractTiles")
self.config.getraw('filename_templates', 'CLIMO_GRID_STAT_INPUT_TEMPLATE') c_dict['OUTPUT_DIR'] = self.config.getdir( 'GRID_STAT_OUTPUT_DIR', self.config.getdir('OUTPUT_BASE')) c_dict['ONCE_PER_FIELD'] = self.config.getbool( 'config', 'GRID_STAT_ONCE_PER_FIELD', False) c_dict['FCST_PROB_THRESH'] = self.config.getstr( 'config', 'FCST_GRID_STAT_PROB_THRESH', '==0.1') c_dict['OBS_PROB_THRESH'] = self.config.getstr( 'config', 'OBS_GRID_STAT_PROB_THRESH', '==0.1') c_dict['ALLOW_MULTIPLE_FILES'] = False c_dict['NEIGHBORHOOD_WIDTH'] = self.config.getstr( 'config', 'GRID_STAT_NEIGHBORHOOD_WIDTH', '') c_dict['NEIGHBORHOOD_SHAPE'] = self.config.getstr( 'config', 'GRID_STAT_NEIGHBORHOOD_SHAPE', '') c_dict['VERIFICATION_MASK_TEMPLATE'] = \ self.config.getraw('filename_templates', 'GRID_STAT_VERIFICATION_MASK_TEMPLATE') c_dict['VERIFICATION_MASK'] = '' # handle window variables [FCST/OBS]_[FILE_]_WINDOW_[BEGIN/END] self.handle_window_variables(c_dict, 'grid_stat') return c_dict if __name__ == "__main__": util.run_stand_alone("grid_stat_wrapper", "GridStat")
# This is for extract_tiles to call without a config file tc_cmd_list = [ self.tc_exe, " -job filter ", " -lookin ", tc_input_list, " -match_points true ", " -init_inc ", cur_init, " -dump_row ", filter_name, " ", filter_opts ] tc_cmd_str = ''.join(tc_cmd_list) # Since this wrapper is not using the CommandBuilder to build the cmd, # we need to add the met verbosity level to the MET cmd created before # we run the command. tc_cmd_str = self.cmdrunner.insert_metverbosity_opt(tc_cmd_str) # Run tc_stat try: # tc_cmd = batchexe('sh')['-c', tc_cmd_str].err2out() # checkrun(tc_cmd) (ret, cmd) = \ self.cmdrunner.run_cmd(tc_cmd_str, self.env, app_name=self.app_name) if not ret == 0: raise ExitStatusException( '%s: non-zero exit status' % (repr(cmd), ), ret) except ExitStatusException as ese: self.logger.error(ese) if __name__ == "__main__": util.run_stand_alone("tc_stat_wrapper", "TcStat")
if self.outfile == "": self.logger.error("No output filename specified") return None if self.outdir == "": self.logger.error("No output directory specified") return None out_path = os.path.join(self.outdir, self.outfile) # create outdir (including subdir in outfile) if it doesn't exist if not os.path.exists(os.path.dirname(out_path)): os.makedirs(os.path.dirname(out_path)) cmd += " " + out_path if self.param != "": cmd += ' ' + self.param if len(self.infiles) > 1: for f in self.infiles[1:]: cmd += ' -pbfile' + f return cmd if __name__ == "__main__": util.run_stand_alone("pb2nc_wrapper", "PB2NC")
for a in self.args: cmd += a + " " if self.c_dict['SINGLE_RUN']: if self.fcst_file == None: self.logger.error("No file path specified") return None cmd += '-single ' + self.fcst_file + ' ' else: if self.fcst_file == None: self.logger.error("No forecast file path specified") return None if self.obs_file == None: self.logger.error("No observation file path specified") return None cmd += '-fcst ' + self.fcst_file + ' ' cmd += '-obs ' + self.obs_file + ' ' cmd += '-config ' + self.param + ' ' if self.outdir != "": cmd += '-outdir {}'.format(self.outdir) return cmd if __name__ == "__main__": util.run_stand_alone("mtd_wrapper", "MTD")
# Replace the second column (storm number) with # the month followed by the storm number # e.g. Replace 0006 with 010006 # this is done because this data has many storms per month # and we need to know which storm we are processing if running # over multiple months row[1] = " " + storm_month + (row[1]).strip() # Iterate over the items, deleting or modifying the columns for item in row: # Delete the third column if item == row[2]: continue # Replace MISSING_VAL_TO_REPLACE=missing_values[0] with # MISSING_VAL=missing_values[1] elif item.strip() == missing_values[0]: item = " " + missing_values[1] # Create a new row to write row_list.append(item) # Write the modified file writer.writerow(row_list) csvfile.close() out_file.close() if __name__ == "__main__": util.run_stand_alone("tc_pairs_wrapper", "TcPairs")
for args in self.args: cmd += args + " " if len(self.infiles) == 0: self.logger.error(self.app_name + ": No input filenames specified") return None for infile in self.infiles: cmd += infile + " " if self.param != "": cmd += self.param + " " for obs_file in self.point_obs_files: cmd += "-point_obs " + obs_file + " " for obs_file in self.grid_obs_files: cmd += "-grid_obs " + obs_file + " " if self.outdir == "": self.logger.error(self.app_name + ": No output directory specified") return None cmd += '-outdir {}'.format(self.outdir) return cmd if __name__ == "__main__": util.run_stand_alone("ensemble_stat_wrapper", "EnsembleStat")
if self.save: options_dict['-save'] = '' return options_dict def get_command(self): """! Over-ride CommandBuilder's get_command because unlike other MET tools, tcmpr_plotter_wrapper handles input files differently because it wraps an R-script, plot_tcmpr.R rather than a typical MET tool. Build command to run from arguments""" if self.app_path is None: self.logger.error("No app path specified. You must use a subclass") return None return self.cmd def build(self): """! Override CommandBuilder's build() since the plot_tcmpr.R plot is set up differently from the other MET tools.""" cmd = self.get_command() if cmd is None: return self.logger.info("RUNNING: " + cmd) process = subprocess.Popen(cmd, env=self.env, shell=True) process.wait() if __name__ == "__main__": util.run_stand_alone("tcmpr_plotter_wrapper", "TCMPRPlotter")
# Sort the files in the fcst_anly_grid_files list. sorted_fcst_anly_grid_files = sorted(fcst_anly_grid_files) tmp_param = '' for cur_fcst_anly in sorted_fcst_anly_grid_files: # Write out the files that pertain to this storm and # don't write if already in tmp_param. if cur_fcst_anly not in tmp_param and cur_storm in cur_fcst_anly: tmp_param += cur_fcst_anly tmp_param += '\n' # Now create the fcst or analysis ASCII file try: with open(fcst_anly_ascii, 'a') as filehandle: filehandle.write(tmp_param) except IOError: msg = ("Could not create requested ASCII file: " + fcst_anly_ascii) self.logger.error(msg) if os.stat(fcst_anly_ascii).st_size == 0: # Just in case there are any empty fcst ASCII or anly ASCII files # at this point, # explicitly remove them (and any resulting empty directories) # so they don't cause any problems with further processing # steps. util.prune_empty(fcst_anly_ascii_dir, self.logger) if __name__ == "__main__": util.run_stand_alone("series_by_init_wrapper", "SeriesByInit")
Usage: Parameters: None Input Files: Output Files: Condition codes: 0 for success, 1 for failure """ from __future__ import (print_function, division) import os import met_util as util from compare_gridded_wrapper import CompareGriddedWrapper '''!@namespace WaveletStatWrapper @brief Wraps the MET tool wavelet_stat to compare gridded datasets @endcode ''' class WaveletStatWrapper(CompareGriddedWrapper): """"!Wraps the MET tool wavelet_stat to compare gridded datasets """ def __init__(self, p, logger): super(WaveletStatWrapper, self).__init__(p, logger) self.app_path = os.path.join(self.config.getdir('MET_INSTALL_DIR'), 'bin/wavelet_stat') self.app_name = os.path.basename(self.app_path) if __name__ == "__main__": util.run_stand_alone("wavelet_stat_wrapper", "WaveletStat")
self.add_env_var('OBS_WINDOW_BEGIN', str(self.c_dict['OBS_WINDOW_BEGIN'])) self.add_env_var('OBS_WINDOW_END', str(self.c_dict['OBS_WINDOW_END'])) # add additional env vars if they are specified if self.c_dict['VERIFICATION_MASK'] != '': self.add_env_var('VERIF_MASK', self.c_dict['VERIFICATION_MASK']) print_list.append('VERIF_MASK') if self.c_dict['NEIGHBORHOOD_WIDTH'] != '': self.add_env_var('NEIGHBORHOOD_WIDTH', self.c_dict['NEIGHBORHOOD_WIDTH']) print_list.append('NEIGHBORHOOD_WIDTH') if self.c_dict['NEIGHBORHOOD_SHAPE'] != '': self.add_env_var('NEIGHBORHOOD_SHAPE', self.c_dict['NEIGHBORHOOD_SHAPE']) print_list.append('NEIGHBORHOOD_SHAPE') # send environment variables to logger self.logger.debug("ENVIRONMENT FOR NEXT COMMAND: ") self.print_user_env_items() for l in print_list: self.print_env_item(l) self.logger.debug("COPYABLE ENVIRONMENT FOR NEXT COMMAND: ") self.print_env_copy(print_list) if __name__ == "__main__": util.run_stand_alone("point_stat_wrapper", "PointStat")
'REGRID_DATA_PLANE_SKIP_IF_OUTPUT_EXISTS to True to process'. format(outpath)) return True if self.config.getstr('config', dtype + '_REGRID_DATA_PLANE_INPUT_DATATYPE', '') in ['', 'NETCDF']: field_name = "{:s}_{:s}".format(compare_var, str(level).zfill(2)) self.args.append( "-field 'name=\"{:s}\"; level=\"(*,*)\";'".format(field_name)) else: field_name = "{:s}".format(compare_var) self.args.append("-field 'name=\"{:s}\"; level=\"{:s}\";'".format( field_name, level)) if self.c_dict['METHOD'] != '': self.args.append("-method {}".format(self.c_dict['METHOD'])) self.args.append("-width {}".format(self.c_dict['WIDTH'])) self.args.append("-name " + field_name) cmd = self.get_command() if cmd is None: self.logger.error("Could not generate command") return self.build() if __name__ == "__main__": util.run_stand_alone("regrid_data_plane_wrapper", "RegridDataPlane")
self.logger.info('Input template is {}'.format(input_template)) # get forecast leads to loop over lead_seq = util.get_lead_sequence(self.config, input_dict) for lead in lead_seq: # set forecast lead time in hours time_info['lead_hours'] = lead # recalculate time info items time_info = time_util.ti_calculate(time_info) # log init, valid, and forecast lead times for current loop iteration self.logger.info('Processing forecast lead {} initialized at {} and valid at {}' .format(lead, time_info['init'].strftime('%Y-%m-%d %HZ'), time_info['valid'].strftime('%Y-%m-%d %HZ'))) # perform string substitution to find filename based on template and current run time # pass in logger, then template, then any items to use to fill in template # pass time info with ** in front to expand each dictionary item to a variable # i.e. time_info['init'] becomes init=init_value filename = StringSub(self.logger, input_template, **time_info).do_string_sub() self.logger.info('Looking in input directory for file: {}'.format(filename)) return True if __name__ == "__main__": util.run_stand_alone("example_wrapper", "Example")