def test_file_exists(self): print("\nRunning unittest: file_exists") # Start with a "clean" directory and non-existent file # expect the file_exists function to return False test_dir_base = self.p.getdir('TEST_DIR') test_file = self.p.getstr('config', 'TEST_FILENAME') full_test_file = os.path.join(test_dir_base, test_file) # Don't do the this test if the TEST_DIR exists. # We do not want to remove a directory unless this test created it. if os.path.exists(test_dir_base): print("Remove your TEST_DIR: %s" % test_dir_base) self.assertTrue(False) else: # Create a file, expect the file_exists function to # return True util.mkdir_p(test_dir_base) touch_cmd = ' '.join(["/usr/bin/touch", full_test_file]) #print("full_test_file: %s" % full_test_file) os.system(touch_cmd) self.assertTrue(util.file_exists(full_test_file)) # clean up util.rmtree(test_dir_base)
def test_mkdir_rmtree(self): print("\nRunning unittest: mkdir_rmtree") # Make sure the test directory doesn't exist # before starting, and remove it when testing is complete # Gather test parameters test_dir_base = self.p.getdir('TEST_DIR') # Don't do the this test if the TEST_DIR exists. # We do not want to remove a directory unless this test created it. # Make sure we have a clean directory # before trying to create the directory if os.path.exists(test_dir_base): print("Remove your TEST_DIR: %s" % test_dir_base) self.assertTrue(False) else: full_test_dir = os.path.join(test_dir_base, 'extract_tiles_test') util.mkdir_p(full_test_dir) self.assertTrue(os.path.exists(full_test_dir)) # clean up util.rmtree(test_dir_base) self.assertFalse(os.path.exists(test_dir_base))
def run_at_time(self, cur_init): """!Get TC-paris data then regrid tiles centered on the storm. Get TC-pairs track data and GFS model data, do any necessary processing then regrid the forecast and analysis files to a 30 x 30 degree tile centered on the storm. Args: Returns: None: invokes regrid_data_plane to create a netCDF file from two extratropical storm track files. """ # pylint:disable=protected-access # Need to call sys.__getframe() to get the filename and method/func # for logging information. # Used in logging cur_filename = sys._getframe().f_code.co_filename cur_function = sys._getframe().f_code.co_name # get the process id to be used to identify the output # amongst different users and runs. cur_pid = str(os.getpid()) tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid) msg = ("INFO|[" + cur_filename + ":" + cur_function + "]" "|Begin extract tiles") self.logger.info(msg) # Check that there are tc_pairs data which are used as input if util.is_dir_empty(self.tc_pairs_dir): msg = ("ERROR|[" + cur_filename + ":" + cur_function + "]" "|No tc pairs data found at " + self.tc_pairs_dir + "Exiting...") self.logger.error(msg) sys.exit(1) # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) | # [File : function]| Message logger.info("INFO | [" + # cur_filename + ":" + "cur_function] |" + "BEGIN extract_tiles") # Process TC pairs by initialization time # Begin processing for initialization time, cur_init year_month = util.extract_year_month(cur_init, self.logger) # Create the name of the filter file we need to find. If # the file doesn't exist, then run TC_STAT filter_filename = "filter_" + cur_init + ".tcst" filter_name = os.path.join(self.filtered_out_dir, cur_init, filter_filename) if util.file_exists(filter_name) and not self.overwrite_flag: msg = ("DEBUG| [" + cur_filename + ":" + cur_function + " ] | Filter file exists, using Track data file: " + filter_name) self.logger.debug(msg) else: # Create the storm track by applying the # filter options defined in the config/param file. tile_dir_parts = [self.tc_pairs_dir, "/", year_month] tile_dir = ''.join(tile_dir_parts) # Use TcStatWrapper to build up the tc_stat command and invoke # the MET tool tc_stat to perform the filtering. tcs = TcStatWrapper(self.config) tcs.build_tc_stat(self.filtered_out_dir, cur_init, tile_dir, self.addl_filter_opts) # Remove any empty files and directories that can occur # from filtering. util.prune_empty(filter_name, self.logger) # Now get unique storm ids from the filter file, # filter_yyyymmdd_hh.tcst sorted_storm_ids = util.get_storm_ids(filter_name, self.logger) # Check for empty sorted_storm_ids, if empty, # continue to the next time. if not sorted_storm_ids: # No storms found for init time, cur_init msg = ("DEBUG|[" + cur_filename + ":" + cur_function + " ]|" + "No storms were found for " + cur_init + "...continue to next in list") self.logger.debug(msg) return # Process each storm in the sorted_storm_ids list # Iterate over each filter file in the output directory and # search for the presence of the storm id. Store this # corresponding row of data into a temporary file in the # /tmp/<pid> directory. for cur_storm in sorted_storm_ids: storm_output_dir = os.path.join(self.filtered_out_dir, cur_init, cur_storm) header = open(filter_name, "r").readline() util.mkdir_p(storm_output_dir) util.mkdir_p(tmp_dir) tmp_filename = "filter_" + cur_init + "_" + cur_storm full_tmp_filename = os.path.join(tmp_dir, tmp_filename) storm_match_list = util.grep(cur_storm, filter_name) with open(full_tmp_filename, "a+") as tmp_file: # copy over header information tmp_file.write(header) for storm_match in storm_match_list: tmp_file.write(storm_match) # Perform regridding of the forecast and analysis files # to an n X n degree tile centered on the storm (dimensions # are indicated in the config/param file). util.retrieve_and_regrid(full_tmp_filename, cur_init, cur_storm, self.filtered_out_dir, self.logger, self.config) # end of for cur_storm # Remove any empty files and directories in the extract_tiles output # directory util.prune_empty(self.filtered_out_dir, self.logger) # Clean up the tmp directory if it exists if os.path.isdir(tmp_dir): util.rmtree(tmp_dir) msg = ("INFO|[" + cur_function + ":" + cur_filename + "]" "| Finished extract tiles") self.logger.info(msg)
def create_plots(self, verif_case, verif_type): """! Read in metplus_final.conf variables and call function for the specific verification plots to run Args: verif_case - string of the verification case to make plots for verif_type - string of the verification type to make plots for Returns: """ self.logger.info("Running plots for VERIF_CASE = " + verif_case + ", VERIF_TYPE = " + verif_type) #read config plot_time = self.config.getstr('config', 'PLOT_TIME') valid_beg_YYYYmmdd = self.config.getstr('config', 'VALID_BEG', "") valid_end_YYYYmmdd = self.config.getstr('config', 'VALID_END', "") valid_hour_method = self.config.getstr('config', 'VALID_HOUR_METHOD') valid_hour_beg = self.config.getstr('config', 'VALID_HOUR_BEG') valid_hour_end = self.config.getstr('config', 'VALID_HOUR_END') valid_hour_increment = self.config.getstr('config', 'VALID_HOUR_INCREMENT') init_beg_YYYYmmdd = self.config.getstr('config', 'INIT_BEG', "") init_end_YYYYmmdd = self.config.getstr('config', 'INIT_END', "") init_hour_method = self.config.getstr('config', 'INIT_HOUR_METHOD') init_hour_beg = self.config.getstr('config', 'INIT_HOUR_BEG') init_hour_end = self.config.getstr('config', 'INIT_HOUR_END') init_hour_increment = self.config.getstr('config', 'INIT_HOUR_INCREMENT') stat_files_input_dir = self.config.getdir('STAT_FILES_INPUT_DIR') plotting_out_dir = self.config.getdir('PLOTTING_OUTPUT_DIR') plotting_scripts_dir = self.config.getdir('PLOTTING_SCRIPTS_DIR') plot_stats_list = self.config.getstr('config', 'PLOT_STATS_LIST') ci_method = self.config.getstr('config', 'CI_METHOD') verif_grid = self.config.getstr('config', 'VERIF_GRID') event_equalization = self.config.getstr('config', 'EVENT_EQUALIZATION', "True") var_list = self.parse_vars_with_level_thresh_list() fourier_decom_list = self.parse_var_fourier_decomp() region_list = util.getlist(self.config.getstr('config', 'REGION_LIST')) lead_list = util.getlist(self.config.getstr('config', 'LEAD_LIST')) model_name_str_list, model_plot_name_str_list = self.parse_model_list() logging_filename = self.config.getstr('config', 'LOG_METPLUS') logging_level = self.config.getstr('config', 'LOG_LEVEL') met_base = self.config.getstr('dir', 'MET_BASE') #set envir vars based on config self.add_env_var('PLOT_TIME', plot_time) if plot_time == 'valid': self.add_env_var('START_DATE_YYYYmmdd', valid_beg_YYYYmmdd) self.add_env_var('END_DATE_YYYYmmdd', valid_end_YYYYmmdd) elif plot_time == 'init': self.add_env_var('START_DATE_YYYYmmdd', init_beg_YYYYmmdd) self.add_env_var('END_DATE_YYYYmmdd', init_end_YYYYmmdd) else: self.logger.error( "Invalid entry for PLOT_TIME, use 'valid' or 'init'") exit(1) self.add_env_var('STAT_FILES_INPUT_DIR', stat_files_input_dir) self.add_env_var('PLOTTING_OUT_DIR', plotting_out_dir) self.add_env_var('PLOT_STATS_LIST', plot_stats_list) self.add_env_var('MODEL_NAME_LIST', model_name_str_list) self.add_env_var('MODEL_PLOT_NAME_LIST', model_plot_name_str_list) self.add_env_var('CI_METHOD', ci_method) self.add_env_var('VERIF_GRID', verif_grid) self.add_env_var('EVENT_EQUALIZATION', event_equalization) self.add_env_var('LOGGING_FILENAME', logging_filename) self.add_env_var('LOGGING_LEVEL', logging_level) plotting_out_dir_full = os.path.join(plotting_out_dir, verif_case, verif_type) if os.path.exists(plotting_out_dir_full): self.logger.info(plotting_out_dir_full + " exists, removing") util.rmtree(plotting_out_dir_full) util.mkdir_p(os.path.join(plotting_out_dir_full, "imgs")) util.mkdir_p(os.path.join(plotting_out_dir_full, "data")) self.add_env_var('PLOTTING_OUT_DIR_FULL', plotting_out_dir_full) with open(met_base + '/version.txt') as met_version_txt: met_version_line = met_version_txt.readline() met_version = float( met_version_line.strip('\n').partition('/met-')[2].partition( '_')[0]) self.add_env_var('MET_VERSION', str(met_version)) if met_version < 6.0: self.logger.exit("Please run with MET version >= 6.0") exit(1) #build valid and init hour information valid_beg_HHMMSS = calendar.timegm( time.strptime(valid_hour_beg, "%H%M")) valid_end_HHMMSS = calendar.timegm( time.strptime(valid_hour_end, "%H%M")) init_beg_HHMMSS = calendar.timegm(time.strptime(init_hour_beg, "%H%M")) init_end_HHMMSS = calendar.timegm(time.strptime(init_hour_end, "%H%M")) valid_hour_list = self.create_hour_group_list( valid_beg_HHMMSS, valid_end_HHMMSS, int(valid_hour_increment)) init_hour_list = self.create_hour_group_list(init_beg_HHMMSS, init_end_HHMMSS, int(init_hour_increment)) valid_init_time_pairs = self.pair_valid_init_times( valid_hour_list, valid_hour_method, init_hour_list, init_hour_method) #loop through time information for valid_init_time_pair in valid_init_time_pairs: self.add_env_var('VALID_TIME_INFO', valid_init_time_pair.valid) self.add_env_var('INIT_TIME_INFO', valid_init_time_pair.init) #loop through variable information for var_info in var_list: self.add_env_var('FCST_VAR_NAME', var_info.fcst_name) self.add_env_var('OBS_VAR_NAME', var_info.obs_name) fcst_var_level_list = var_info.fcst_level obs_var_level_list = var_info.obs_level if len(var_info.fcst_extra) == 0: self.add_env_var('FCST_VAR_EXTRA', "None") else: self.add_env_var('FCST_VAR_EXTRA', var_info.fcst_extra) if len(var_info.obs_extra) == 0: self.add_env_var('OBS_VAR_EXTRA', "None") else: self.add_env_var('OBS_VAR_EXTRA', var_info.obs_extra) if len(var_info.fcst_thresh) == 0 or len( var_info.obs_thresh) == 0: fcst_var_thresh_list = ["None"] obs_var_thresh_list = ["None"] else: fcst_var_thresh_list = var_info.fcst_thresh obs_var_thresh_list = var_info.obs_thresh #check for fourier decompositon for variable, add to interp list interp_list = util.getlist( self.config.getstr('config', 'INTERP', "")) var_fourier_decomp_info = fourier_decom_list[var_list.index( var_info)] if var_fourier_decomp_info.run_fourier: for pair in var_fourier_decomp_info.wave_num_pairings: interp_list.append("WV1_" + pair) #loop through interpolation information for interp in interp_list: self.add_env_var('INTERP', interp) #loop through region information for region in region_list: self.add_env_var('REGION', region) #call specific plot definitions to make plots if verif_case == "precip": self.create_plots_precip(fcst_var_level_list, obs_var_level_list, fcst_var_thresh_list, obs_var_thresh_list, lead_list, plotting_scripts_dir)
def run_at_time(self, input_dict): """!Get TC-paris data then regrid tiles centered on the storm. Get TC-pairs track data and GFS model data, do any necessary processing then regrid the forecast and analysis files to a 30 x 30 degree tile centered on the storm. Args: input_dict: Time dictionary Returns: None: invokes regrid_data_plane to create a netCDF file from two extratropical storm track files. """ time_info = time_util.ti_calculate(input_dict) init_time = time_info['init_fmt'] # get the process id to be used to identify the output # amongst different users and runs. cur_pid = str(os.getpid()) tmp_dir = os.path.join(self.config.getdir('TMP_DIR'), cur_pid) self.logger.info("Begin extract tiles") cur_init = init_time[0:8] + "_" + init_time[8:10] # Check that there are tc_pairs data which are used as input if util.is_dir_empty(self.tc_pairs_dir): self.logger.error("No tc pairs data found at {}"\ .format(self.tc_pairs_dir)) sys.exit(1) # Create the name of the filter file we need to find. If # the file doesn't exist, then run TC_STAT filter_filename = "filter_" + cur_init + ".tcst" filter_name = os.path.join(self.filtered_out_dir, cur_init, filter_filename) if util.file_exists(filter_name) and not self.overwrite_flag: self.logger.debug("Filter file exists, using Track data file: {}"\ .format(filter_name)) else: # Create the storm track by applying the # filter options defined in the config/param file. # Use TcStatWrapper to build up the tc_stat command and invoke # the MET tool tc_stat to perform the filtering. tiles_list = util.get_files(self.tc_pairs_dir, ".*tcst", self.logger) tiles_list_str = ' '.join(tiles_list) tcs = TcStatWrapper(self.config, self.logger) tcs.build_tc_stat(self.filtered_out_dir, cur_init, tiles_list_str, self.addl_filter_opts) # Remove any empty files and directories that can occur # from filtering. util.prune_empty(filter_name, self.logger) # Now get unique storm ids from the filter file, # filter_yyyymmdd_hh.tcst sorted_storm_ids = util.get_storm_ids(filter_name, self.logger) # Check for empty sorted_storm_ids, if empty, # continue to the next time. if not sorted_storm_ids: # No storms found for init time, cur_init msg = "No storms were found for {} ...continue to next in list"\ .format(cur_init) self.logger.debug(msg) return # Process each storm in the sorted_storm_ids list # Iterate over each filter file in the output directory and # search for the presence of the storm id. Store this # corresponding row of data into a temporary file in the # /tmp/<pid> directory. for cur_storm in sorted_storm_ids: storm_output_dir = os.path.join(self.filtered_out_dir, cur_init, cur_storm) header = open(filter_name, "r").readline() util.mkdir_p(storm_output_dir) util.mkdir_p(tmp_dir) tmp_filename = "filter_" + cur_init + "_" + cur_storm full_tmp_filename = os.path.join(tmp_dir, tmp_filename) storm_match_list = util.grep(cur_storm, filter_name) with open(full_tmp_filename, "a+") as tmp_file: # copy over header information tmp_file.write(header) for storm_match in storm_match_list: tmp_file.write(storm_match) # Perform regridding of the forecast and analysis files # to an n X n degree tile centered on the storm (dimensions # are indicated in the config/param file). feature_util.retrieve_and_regrid(full_tmp_filename, cur_init, cur_storm, self.filtered_out_dir, self.config) # end of for cur_storm # Remove any empty files and directories in the extract_tiles output # directory util.prune_empty(self.filtered_out_dir, self.logger) # Clean up the tmp directory if it exists if os.path.isdir(tmp_dir): util.rmtree(tmp_dir)
def grid2grid_pres_plots(self): logging_filename = self.logger.handlers[0].baseFilename self.add_env_var("LOGGING_FILENAME", logging_filename) plotting_scripts_dir = self.p.getdir('PLOTTING_SCRIPTS_DIR') #read config use_init = self.p.getbool('config', 'LOOP_BY_INIT', True) if use_init: start_t = self.p.getstr('config', 'INIT_BEG') end_t = self.p.getstr('config', 'INIT_END') loop_beg_hour = self.p.getint('config', 'INIT_BEG_HOUR') loop_end_hour = self.p.getint('config', 'INIT_END_HOUR') loop_inc = self.p.getint('config', 'INIT_INC') date_filter_method = "Initialization" self.add_env_var("START_T", start_t) self.add_env_var("END_T", end_t) self.add_env_var("DATE_FILTER_METHOD", date_filter_method) else: start_t = self.p.getstr('config', 'VALID_BEG') end_t = self.p.getstr('config', 'VALID_END') loop_beg_hour = self.p.getint('config', 'VALID_BEG_HOUR') loop_end_hour = self.p.getint('config', 'VALID_END_HOUR') loop_inc = self.p.getint('config', 'VALID_INC') date_filter_method = "Valid" self.add_env_var("START_T", start_t) self.add_env_var("END_T", end_t) self.add_env_var("DATE_FILTER_METHOD", date_filter_method) stat_files_input_dir = self.p.getdir('STAT_FILES_INPUT_DIR') plotting_out_dir = self.p.getdir('PLOTTING_OUT_DIR') if os.path.exists(plotting_out_dir): self.logger.info(plotting_out_dir + " exist, removing") util.rmtree(plotting_out_dir) region_list = util.getlist(self.p.getstr('config', 'REGION_LIST')) lead_list = util.getlistint(self.p.getstr('config', 'LEAD_LIST')) model_list = self.p.getstr('config', 'MODEL_LIST') plot_stats_list = self.p.getstr('config', 'PLOT_STATS_LIST') self.add_env_var("STAT_FILES_INPUT_DIR", stat_files_input_dir) self.add_env_var("PLOTTING_OUT_DIR", plotting_out_dir) self.add_env_var("MODEL_LIST", model_list) self.add_env_var("PLOT_STATS_LIST", plot_stats_list) #need to grab var info in special way that differs from util.parse_var_list #need variables with cooresponding list of levels; logic derived from util.parse_var_list var_info_list = [] # find all FCST_VARn_NAME keys in the conf files all_conf = self.p.keys('config') fcst_indices = [] regex = re.compile("FCST_VAR(\d+)_NAME") for conf in all_conf: result = regex.match(conf) if result is not None: fcst_indices.append(result.group(1)) # loop over all possible variables and add them to list for n in fcst_indices: # get fcst var info if available if self.p.has_option('config', "FCST_VAR" + n + "_NAME"): fcst_name = self.p.getstr('config', "FCST_VAR" + n + "_NAME") fcst_extra = "" if self.p.has_option('config', "FCST_VAR" + n + "_OPTIONS"): fcst_extra = self.p.getraw('config', "FCST_VAR" + n + "_OPTIONS") fcst_levels = util.getlist( self.p.getstr('config', "FCST_VAR" + n + "_LEVELS")) # if OBS_VARn_X does not exist, use FCST_VARn_X if self.p.has_option('config', "OBS_VAR" + n + "_NAME"): obs_name = self.p.getstr('config', "OBS_VAR" + n + "_NAME") else: obs_name = fcst_name obs_extra = "" if self.p.has_option('config', "OBS_VAR" + n + "_OPTIONS"): obs_extra = self.p.getraw('config', "OBS_VAR" + n + "_OPTIONS") ##else: ## obs_extra = fcst_extra ##fcst_levels = util.getlist(self.p.getstr('config', "FCST_VAR"+n+"_LEVELS")) if self.p.has_option('config', "OBS_VAR" + n + "_LEVELS"): obs_levels = util.getlist( self.p.getstr('config', "FCST_VAR" + n + "_LEVELS")) else: obs_levels = fcst_levels if len(fcst_levels) != len(obs_levels): print("ERROR: FCST_VAR"+n+"_LEVELS and OBS_VAR"+n+\ "_LEVELS do not have the same number of elements") exit(1) fo = util.FieldObj() fo.fcst_name = fcst_name fo.obs_name = obs_name fo.fcst_extra = fcst_extra fo.obs_extra = obs_extra fo.fcst_level = fcst_levels fo.obs_level = obs_levels var_info_list.append(fo) loop_hour = loop_beg_hour while loop_hour <= loop_end_hour: loop_hour_str = str(loop_hour).zfill(2) self.add_env_var('CYCLE', loop_hour_str) for v in var_info_list: fcst_var_levels_list = v.fcst_level self.add_env_var('FCST_VAR_NAME', v.fcst_name) #self.add_env_var('FCST_VAR_EXTRA', v.fcst_extra) self.add_env_var( 'FCST_VAR_LEVELS_LIST', ''.join(fcst_var_levels_list).replace( "P", " P").lstrip().replace(" P", ", P")) obs_var_levels_list = v.obs_level self.add_env_var('OBS_VAR_NAME', v.obs_name) #self.add_env_var('OBS_VAR_EXTRA', v.obs_extra) self.add_env_var( 'OBS_VAR_LEVELS_LIST', ''.join(obs_var_levels_list).replace( "P", " P").lstrip().replace(" P", ", P")) for region in region_list: self.add_env_var('REGION', region) for lead in lead_list: if lead < 10: lead_string = '0' + str(lead) else: lead_string = str(lead) self.add_env_var('LEAD', lead_string) for vl in range(len(fcst_var_levels_list)): self.add_env_var('FCST_VAR_LEVEL', fcst_var_levels_list[vl]) self.add_env_var('OBS_VAR_LEVEL', obs_var_levels_list[vl]) py_cmd = os.path.join( "python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_pres_ts.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") ####py_cmd = os.path.join("python3")+" "+os.path.join(plotting_scripts_dir, "plot_grid2grid_pres_tp.py") #add python3 at top of script py_cmd = os.path.join("python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_pres_tp.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") self.add_env_var("LEAD_LIST", self.p.getstr('config', 'LEAD_LIST')) py_cmd = os.path.join("python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_pres_tsmean.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") ####py_cmd = os.path.join("python3")+" "+os.path.join(plotting_scripts_dir, "plot_grid2grid_pres_tpmean.py") #add python3 at top of script py_cmd = os.path.join("python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_pres_tpmean.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") loop_hour += loop_inc
def grid2grid_sfc_plots(self): logging_filename = self.logger.handlers[0].baseFilename self.add_env_var("LOGGING_FILENAME", logging_filename) plotting_scripts_dir = self.p.getdir('PLOTTING_SCRIPTS_DIR') #read config use_init = self.p.getbool('config', 'LOOP_BY_INIT', True) if use_init: start_t = self.p.getstr('config', 'INIT_BEG') end_t = self.p.getstr('config', 'INIT_END') loop_beg_hour = self.p.getint('config', 'INIT_BEG_HOUR') loop_end_hour = self.p.getint('config', 'INIT_END_HOUR') loop_inc = self.p.getint('config', 'INIT_INC') date_filter_method = "Initialization" self.add_env_var("START_T", start_t) self.add_env_var("END_T", end_t) self.add_env_var("DATE_FILTER_METHOD", date_filter_method) else: start_t = self.p.getstr('config', 'VALID_BEG') end_t = self.p.getstr('config', 'VALID_END') loop_beg_hour = self.p.getint('config', 'VALID_BEG_HOUR') loop_end_hour = self.p.getint('config', 'VALID_END_HOUR') loop_inc = self.p.getint('config', 'VALID_INC') date_filter_method = "Valid" self.add_env_var("START_T", start_t) self.add_env_var("END_T", end_t) self.add_env_var("DATE_FILTER_METHOD", date_filter_method) stat_files_input_dir = self.p.getdir('STAT_FILES_INPUT_DIR') plotting_out_dir = self.p.getdir('PLOTTING_OUT_DIR') if os.path.exists(plotting_out_dir): self.logger.info(plotting_out_dir + " exist, removing") util.rmtree(plotting_out_dir) region_list = util.getlist(self.p.getstr('config', 'REGION_LIST')) lead_list = util.getlistint(self.p.getstr('config', 'LEAD_LIST')) model_list = self.p.getstr('config', 'MODEL_LIST') plot_stats_list = self.p.getstr('config', 'PLOT_STATS_LIST') self.add_env_var("STAT_FILES_INPUT_DIR", stat_files_input_dir) self.add_env_var("PLOTTING_OUT_DIR", plotting_out_dir) self.add_env_var("MODEL_LIST", model_list) self.add_env_var("PLOT_STATS_LIST", plot_stats_list) var_list = util.parse_var_list(self.p) loop_hour = loop_beg_hour while loop_hour <= loop_end_hour: loop_hour_str = str(loop_hour).zfill(2) self.add_env_var('CYCLE', loop_hour_str) for var_info in var_list: fcst_var_name = var_info.fcst_name fcst_var_level = var_info.fcst_level #fcst_var_extra = var_info.fcst_extra.replace(" = ", "").rstrip(";") obs_var_name = var_info.obs_name obs_var_level = var_info.obs_level #obs_var_extra = var_info.obs_extra.replace(" = ", "").rstrip(";") self.add_env_var('FCST_VAR_NAME', fcst_var_name) self.add_env_var('FCST_VAR_LEVEL', fcst_var_level) self.add_env_var('OBS_VAR_NAME', obs_var_name) self.add_env_var('OBS_VAR_LEVEL', obs_var_level) for region in region_list: self.add_env_var('REGION', region) for lead in lead_list: if lead < 10: lead_string = '0' + str(lead) else: lead_string = str(lead) self.add_env_var('LEAD', lead_string) py_cmd = os.path.join("python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_sfc_ts.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") self.add_env_var("LEAD_LIST", self.p.getstr('config', 'LEAD_LIST')) py_cmd = os.path.join("python") + " " + os.path.join( plotting_scripts_dir, "plot_grid2grid_sfc_tsmean.py") process = subprocess.Popen(py_cmd, env=self.env, shell=True) process.wait() print("") loop_hour += loop_inc
SBI.run_all_times() produtil.log.postmsg('SeriesByInitWRapper.run_all_times completed') # Remove all conf file command line arguments from sys.argv, # except sys.argv[0]. Removing conf args allows unittest.main() # to run, else it will fail. for arg in range(1, len(sys.argv)): sys.argv.pop() # Workaround - to pass in command line args to unittest.main() # You must code them in here .... # For example, uncomment the next line and you will see available options. # sys.argv.append('-h') # Setting exit=False allows unittest to return and NOT sys.exit, which # allows commands after unittest.main() to execute. unittest.main(exit=False) # Caveate to exit=False # It seems if you pass in an argument than unittest will sys.exit # and these line do not get executed, # at least for '-h' argument .... ie. sys.argv.append('-h') util.rmtree(SBI.series_out_dir) util.rmtree(SBI.series_filtered_out_dir) except Exception as exc: produtil.log.jlogger.critical('test_series_by_init failed: %s' % (str(exc), ), exc_info=True) sys.exit(2)
def apply_series_filters(self, tile_dir, init_times, series_output_dir, filter_opts, temporary_dir): """! Apply filter options, as specified in the param/config file. Args: @param tile_dir: Directory where input data files reside. e.g. data which we will be applying our filter criteria. @param init_times: List of init times that define the input data. @param series_output_dir: The directory where the filter results will be stored. @param filter_opts: The filter options to apply @param temporary_dir: The temporary directory where intermediate files are saved. Returns: None """ # pylint: disable=too-many-arguments # Seven input arguments are needed to perform filtering. # pylint:disable=protected-access # Need to call sys.__getframe() to get the filename and method/func # for logging information. # Useful for logging cur_filename = sys._getframe().f_code.co_filename cur_function = sys._getframe().f_code.co_name # Create temporary directory where intermediate files are saved. cur_pid = str(os.getpid()) tmp_dir = os.path.join(temporary_dir, cur_pid) self.logger.debug("creating tmp dir: " + tmp_dir) for cur_init in init_times: # Call the tc_stat wrapper to build up the command and invoke # the MET tool tc_stat. filter_file = "filter_" + cur_init + ".tcst" filter_filename = os.path.join(series_output_dir, cur_init, filter_file) tcs = TcStatWrapper(self.config, self.logger) tcs.build_tc_stat(series_output_dir, cur_init, tile_dir, filter_opts) # Check that the filter.tcst file isn't empty. If # it is, then use the files from extract_tiles as # input (tile_dir = extract_out_dir) if not util.file_exists(filter_filename): msg = ("Non-existent filter file, filter " + " Never created by MET Tool tc_stat.") self.logger.debug(msg) continue elif os.stat(filter_filename).st_size == 0: msg = ("Empty filter file, filter " + " options yield nothing.") self.logger.debug(msg) continue else: # Now retrieve the files corresponding to these # storm ids that resulted from filtering. sorted_storm_ids = util.get_storm_ids(filter_filename, self.logger) # Retrieve the header from filter_filename to be used in # creating the temporary files. with open(filter_filename, 'r') as filter_file: header = filter_file.readline() for cur_storm in sorted_storm_ids: msg = ("Processing storm: " + cur_storm + " for file: " + filter_filename) self.logger.debug(msg) storm_output_dir = os.path.join(series_output_dir, cur_init, cur_storm) util.mkdir_p(storm_output_dir) util.mkdir_p(tmp_dir) tmp_file = "filter_" + cur_init + "_" + cur_storm tmp_filename = os.path.join(tmp_dir, tmp_file) storm_match_list = util.grep(cur_storm, filter_filename) with open(tmp_filename, "a+") as tmp_file: tmp_file.write(header) for storm_match in storm_match_list: tmp_file.write(storm_match) # Create the analysis and forecast files based # on the storms (defined in the tmp_filename created above) # Store the analysis and forecast files in the # series_output_dir. feature_util.retrieve_and_regrid(tmp_filename, cur_init, cur_storm, series_output_dir, self.config) # Check for any empty files and directories and remove them to avoid # any errors or performance degradation when performing # series analysis. util.prune_empty(series_output_dir, self.logger) # Clean up the tmp dir util.rmtree(tmp_dir)