def standalone_main(): # Load command line options parser = OptionParser(usage="usage: %prog [options] [summary_files]...") parser.add_option( "-r", "--run_dir_file", dest="run_dir_file", metavar="FILE", help="file to read list of run directories from") parser.add_option( "-o", "--output_dir", dest="output_dir", metavar="DIR", help="directory where to write plot files") parser.add_option( "-f", "--filter", dest="filter_names", metavar="REGEXP", type="string", action="append", help="filter matching filenames according to the supplied regular expression", ) parser.add_option( "-i", "--indexes", dest="indexes", metavar="NUM", type="string", action="append", help="which indexes that would generate multiple pages of plots to use", ) parser.add_option( "--log", dest="make_logs", default=False, action="store_true", help="make debugging log files per plot file") # Parse command line arguments (options, args) = parser.parse_args() # Initialize logging L2_Log_Util.init_logging() # Gather list of run directories to gather information from run_dirs = [] for arg_dir in args: run_dirs.append(arg_dir) if options.indexes != None: indexes = [] for idx_arg in options.indexes: indexes += OCO_TextUtils.index_range_list(idx_arg) else: indexes = None if options.run_dir_file != None: if not os.path.exists(options.run_dir_file): parser.error("Run directory file '%s' does not exist" % options.run_dir_file) with open(options.run_dir_file, 'r') as run_dir_fh: run_dirs += [ file_dir.strip() for file_dir in run_dir_fh.readlines() ] plot_run_comparisons(run_dirs, output_directory=options.output_dir, filename_filters=options.filter_names, page_indexes=indexes, debugging_logs=options.make_logs)
def standalone_main(): # Load command line options parser = OptionParser(usage="usage: %prog [options] [run_dir_file]") parser.add_option( "-t", "--timings_file", dest="timings_file", metavar="FILE", default="timings.dat", help="filename to output timing information other than default") parser.add_option("-a", "--additional_info", dest="additional_info", default=False, action="store_true", help="output additional run information columns") parser.add_option("-p", "--parallel_info", dest="parallel_info", default=False, action="store_true", help="output parallel statistics information columns") parser.add_option("-v", "--verbose", dest="verbose", default=False, action="store_true", help="Output more information to screen on processing") # Parse command line arguments (options, args) = parser.parse_args() # Initialize logging if options.verbose: L2_Log_Util.init_logging(logging.DEBUG) else: L2_Log_Util.init_logging(logging.INFO) if (len(args) < 1): parser.error('A file containing a list of run directories is needed') run_dirs_file = args[0] output_data_file = options.timings_file test_data_locs_f = open(run_dirs_file, 'r') run_dirs = test_data_locs_f.readlines() test_data_locs_f.close() compute_runtimes(run_dirs, output_data_file, additional_cols=options.additional_info, parallel_cols=options.parallel_info)
def standalone_main(): if (len(sys.argv) < 5): print "usage:\n\t", os.path.basename(sys.argv[0]), "<input_matrix_file> <output_covariance_file> <scaling> <column_def>\n" sys.exit(1) input_matrix_file = sys.argv[1] output_matrix_file = sys.argv[2] scaling = sys.argv[3] column_def = sys.argv[4] L2_Log_Util.init_logging() create_simple_cov(input_matrix_file, output_matrix_file, scaling, column_def)
def standalone_main(): if (len(sys.argv) < 3): print "usage:\n\t", os.path.basename(sys.argv[0]), "<input_spectra_file> <output_spectra_file> [row_range_spec]\n" sys.exit(1) input_radiance_file = sys.argv[1] output_radiance_file = sys.argv[2] if len(sys.argv) > 3: row_range_spec = sys.argv[3] else: row_range_spec = None L2_Log_Util.init_logging() noisify_spectra_file(input_radiance_file, output_radiance_file, row_range_spec)
def standalone_main(): # Load command line options parser = OptionParser(usage="usage: %prog [options] [run_dir_file]") parser.add_option( "-t", "--timings_file", dest="timings_file", metavar="FILE", default="timings.dat", help="filename to output timing information other than default") parser.add_option( "-a", "--additional_info", dest="additional_info", default=False, action="store_true", help="output additional run information columns") parser.add_option( "-p", "--parallel_info", dest="parallel_info", default=False, action="store_true", help="output parallel statistics information columns" ) parser.add_option( "-v", "--verbose", dest="verbose", default=False, action="store_true", help="Output more information to screen on processing" ) # Parse command line arguments (options, args) = parser.parse_args() # Initialize logging if options.verbose: L2_Log_Util.init_logging(logging.DEBUG) else: L2_Log_Util.init_logging(logging.INFO) if (len(args) < 1): parser.error('A file containing a list of run directories is needed') run_dirs_file = args[0] output_data_file = options.timings_file test_data_locs_f = open(run_dirs_file, 'r') run_dirs = test_data_locs_f.readlines() test_data_locs_f.close() compute_runtimes(run_dirs, output_data_file, additional_cols=options.additional_info, parallel_cols=options.parallel_info)
def standalone_main(): if (len(sys.argv) < 3): print "usage:\n\t", os.path.basename( sys.argv[0] ), "<input_spectra_file> <output_spectra_file> [row_range_spec]\n" sys.exit(1) input_radiance_file = sys.argv[1] output_radiance_file = sys.argv[2] if len(sys.argv) > 3: row_range_spec = sys.argv[3] else: row_range_spec = None L2_Log_Util.init_logging() noisify_spectra_file(input_radiance_file, output_radiance_file, row_range_spec)
def standalone_main(): # Load command line options parser = OptionParser(usage="usage: %prog [options] [run_dir] [run_dir]...") parser.add_option( "-r", "--run_dir_file", dest="run_dir_file", metavar="FILE", help="file to read list of run directories from") parser.add_option( "--stat_file", dest="stat_filename", metavar="FILE", default="./stats.dat", help="name of stats table file other than default") parser.add_option( "--summ_file", dest="summ_filename", metavar="FILE", default="./summary.dat", help="name of summary table file other than default") parser.add_option( "--overwrite", dest="overwrite", default=False, action="store_true", help="Overwrite existing summary and stats file instead of adding to existing contents" ) parser.add_option( "-v", "--verbose", dest="verbose", default=False, action="store_true", help="Output more information to screen on processing" ) # Parse command line arguments (options, args) = parser.parse_args() # Initialize logging if options.verbose: L2_Log_Util.init_logging(logging.DEBUG) else: L2_Log_Util.init_logging(logging.INFO) # Gather list of run directories to gather information from run_dirs = [] if (len(args) > 0): for arg_dir in args: run_dirs.append(arg_dir) if options.run_dir_file != None: if not os.path.exists(options.run_dir_file): parser.error("Run directory file '%s' does not exist" % options.run_dir_file) run_dir_fh = open(options.run_dir_file, 'r') for file_dir in run_dir_fh.readlines(): run_dirs.append(file_dir.strip()) run_dir_fh.close() # Sort items from run dir list run_dirs.sort() if len(run_dirs) == 0: parser.error('at least one run directory must be specified to be plotted') ts_summ = testset_summary() ts_summ.make_summary_output(run_dirs, options.summ_filename, options.stat_filename, overwrite=options.overwrite)
metavar="KEY=VALUE", type="string", action="append", help="define a constant keyword substitution and value" ) parser.add_option( "-o", "--overwrite", dest="overwrite", default=False, action="store_true", help="Overwrite existing case directories" ) # Parse command line arguments (cmd_options, control_files) = parser.parse_args() # Initialize logging L2_Log_Util.init_logging() if len(control_files) == 0: parser.error('at least one testcase input file must be specified') # Create list substitution dictionary from command line args constants_dict = Parse_KeyVal_Str_List(cmd_options.constants) if cmd_options.max_cases != None: max_num_cases = cmd_options.max_cases else: max_num_cases = None run_generator(control_files, constants_dict, max_num_cases, cmd_options.overwrite)
type="string", action="append", help="define a constant keyword substitution and value") parser.add_option("-o", "--overwrite", dest="overwrite", default=False, action="store_true", help="Overwrite existing case directories") # Parse command line arguments (cmd_options, control_files) = parser.parse_args() # Initialize logging L2_Log_Util.init_logging() if len(control_files) == 0: parser.error('at least one testcase input file must be specified') # Create list substitution dictionary from command line args constants_dict = Parse_KeyVal_Str_List(cmd_options.constants) if cmd_options.max_cases != None: max_num_cases = cmd_options.max_cases else: max_num_cases = None run_generator(control_files, constants_dict, max_num_cases, cmd_options.overwrite)
def standalone_main(): parser = OptionParser(usage="usage: %prog [options] [run_dir_base]...") parser.add_option("-o", "--overwrite", dest="overwrite", default=False, action="store_true", help="overwrite any existing files instead of skipping") parser.add_option( "-d", "--output_dir", dest="output_dir", metavar="DIR", type="string", help="directory for outputting plots and summary files", ) parser.add_option("-r", "--recurse", dest="recurse", default=False, action="store_true", help="recurse from base dir looking for set directories") parser.add_option("-f", "--fail_on_error", dest="fail_on_error", default=False, action="store_true", help="fail if any sub-programs fail") parser.add_option("-p", "--make_plots", dest="make_plots", default=False, action="store_true", help="make plots for run directories") parser.add_option("-X", "--use_xvfb", dest="use_xvfb", default=False, action="store_true", help="use X frame buffer in case no X server available") parser.add_option( "--use_idl", dest="use_idl", default=False, action="store_true", help="use idl based routines, requires pyIDL Python module") parser.add_option( "--opt", dest="pass_option", metavar="KEY=VALUE", type="string", action="append", help="additional arguments to pass along to modules", ) # Initialize logging L2_Log_Util.init_logging() # Parse command line arguments (options, args) = parser.parse_args() if len(args) >= 1: base_dir_list = args else: base_dir_list = ['./'] addl_args = {} if options.pass_option != None: for arg_string in options.pass_option: if arg_string.find('=') < 0: parser.error('pass_option must be formated KEY=VALUE') (key, value) = arg_string.split('=') addl_args[key.strip()] = eval(value.strip()) # Need a place to draw for IDL commands if options.use_xvfb: display_index = launch_xvfb() if display_index == None: raise Exception('Was unable to initialize or use xvfb') else: os.environ['DISPLAY'] = ':%d' % display_index summ_obj = testset_summary(**addl_args) if options.make_plots: import l2_vis vis_opts = dict(idl_recompile=DO_RECOMPILE, do_sv_profiles=options.use_idl, **addl_args) else: vis_opts = None if options.make_plots and options.use_idl: from standard_testplots import standard_testplots tstplot_obj = standard_testplots(recompile=DO_RECOMPILE, **addl_args) hist_obj = None # Disabled for now #hist_obj = sza_histogram(recompile=DO_RECOMPILE, **addl_args) else: tstplot_obj = None hist_obj = None for base_dir in base_dir_list: if options.output_dir == None: if options.recurse: output_dir = '%s/' + RESULTS_DIR_BASENAME else: output_dir = '%s/%s' % (base_dir.rstrip('/'), RESULTS_DIR_BASENAME) else: output_dir = options.output_dir if options.recurse: summarize_recursively(base_dir, output_dir, summ_obj=summ_obj, tstplot_obj=tstplot_obj, vist_opts=vis_opts, hist_obj=hist_obj, overwrite=options.overwrite, fail_on_error=options.fail_on_error) else: summarize_set_dir(base_dir, output_dir, summ_obj=summ_obj, tstplot_obj=tstplot_obj, vis_opts=vis_opts, hist_obj=hist_obj, overwrite=options.overwrite, fail_on_error=options.fail_on_error)
def summarize_set_dir(set_dir, results_dir, summ_obj=None, tstplot_obj=None, vis_opts=None, hist_obj=None, overwrite=False, fail_on_error=False, verbose=False): logger = logging.getLogger(LOGGER_NAME) # Remove trailing slashes set_dir = set_dir.rstrip('/') results_dir = results_dir.rstrip('/') if not os.path.exists(results_dir): os.makedirs(results_dir) # Open log file for set dir log_handler = L2_Log_Util.open_log_file( os.path.join(results_dir, LOG_FILE_BASE)) # Create necessary objects if not passed in if summ_obj == None: summ_obj = testset_summary() # Create status files run_lists = stats.gather_status(set_dir, results_dir) for (run_type, run_directories) in run_lists.items(): if run_type not in SUMMARY_TYPES: continue input_files_only = run_type == 'forward_model' or run_type == 'jacobian_only' summ_file = SUMM_FILE_FMT % (results_dir, run_type) stat_file = STAT_FILE_FMT % (results_dir, run_type) plot_file = PLOT_FILE_FMT % (results_dir, run_type) time_file = TIME_FILE_FMT % (results_dir, run_type) plots_dir = PLOTS_DIR_FMT % (results_dir, run_type) processing_msg = 'generating summary files for run type: %s for set dir: %s' % ( run_type, set_dir) with error_trapping(fail_on_error, processing_msg): summ_obj.make_summary_output(run_directories, stat_filename=stat_file, summ_filename=summ_file, overwrite=overwrite) #### if tstplot_obj != None: processing_msg = 'generating summary plots for run type: %s for set dir: %s' % ( run_type, set_dir) with error_trapping(fail_on_error, processing_msg): tstplot_obj.make_summary_output(stat_filename=stat_file, summ_filename=summ_file, plot_filename=plot_file, overwrite=overwrite) #### if vis_opts != None and not input_files_only: import l2_vis if not os.path.exists(plots_dir): logger.info('Creating plots dir:', plots_dir) os.makedirs(plots_dir) processing_msg = 'creating per run directory plots for run type: %s for set dir: %s' % ( run_type, set_dir) with error_trapping(fail_on_error, processing_msg): l2_vis.make_run_plots(run_directories, plots_dir, overwrite=overwrite, **vis_opts) #### # Commented out as the new L2 code does not have the output that this routine reads anymore # processing_msg = 'generating summary timings info file for run type: %s for set dir: %s' % (run_type, set_dir) # with error_trapping(fail_on_error, processing_msg): # make_time_file = True # if not overwrite and os.path.exists(time_file): # time_obj = OCO_Matrix() # time_obj.read(time_file, read_data=False) # if time_obj.dims[0] == len(run_directories): # if verbose: # logger.info('Skipping recreating timings file: %s' % time_file) # make_time_file = False # if make_time_file: # compute_runtimes(run_directories, time_file) #### if hist_obj != None: processing_msg = 'creating solar zenith angle histogram plot for set dir: %s' % set_dir with error_trapping(fail_on_error, processing_msg): hist_obj.plot(set_dir, results_dir, verbose) L2_Log_Util.close_log_file(log_handler)
def standalone_main(): parser = OptionParser(usage="usage: %prog [options] [run_dir_base]...") parser.add_option( "-o", "--overwrite", dest="overwrite", default=False, action="store_true", help="overwrite any existing files instead of skipping") parser.add_option( "-d", "--output_dir", dest="output_dir", metavar="DIR", type="string", help="directory for outputting plots and summary files", ) parser.add_option( "-r", "--recurse", dest="recurse", default=False, action="store_true", help="recurse from base dir looking for set directories") parser.add_option( "-f", "--fail_on_error", dest="fail_on_error", default=False, action="store_true", help="fail if any sub-programs fail") parser.add_option( "-p", "--make_plots", dest="make_plots", default=False, action="store_true", help="make plots for run directories") parser.add_option( "-X", "--use_xvfb", dest="use_xvfb", default=False, action="store_true", help="use X frame buffer in case no X server available") parser.add_option( "--use_idl", dest="use_idl", default=False, action="store_true", help="use idl based routines, requires pyIDL Python module") parser.add_option( "--opt", dest="pass_option", metavar="KEY=VALUE", type="string", action="append", help="additional arguments to pass along to modules", ) # Initialize logging L2_Log_Util.init_logging() # Parse command line arguments (options, args) = parser.parse_args() if len(args) >= 1: base_dir_list = args else: base_dir_list = [ './' ] addl_args = {} if options.pass_option != None: for arg_string in options.pass_option: if arg_string.find('=') < 0: parser.error('pass_option must be formated KEY=VALUE') (key, value) = arg_string.split('=') addl_args[key.strip()] = eval(value.strip()) # Need a place to draw for IDL commands if options.use_xvfb: display_index = launch_xvfb() if display_index == None: raise Exception('Was unable to initialize or use xvfb') else: os.environ['DISPLAY'] = ':%d' % display_index summ_obj = testset_summary(**addl_args) if options.make_plots: import l2_vis vis_opts = dict(idl_recompile=DO_RECOMPILE, do_sv_profiles=options.use_idl, **addl_args) else: vis_opts = None if options.make_plots and options.use_idl: from standard_testplots import standard_testplots tstplot_obj = standard_testplots(recompile=DO_RECOMPILE, **addl_args) hist_obj = None # Disabled for now #hist_obj = sza_histogram(recompile=DO_RECOMPILE, **addl_args) else: tstplot_obj = None hist_obj = None for base_dir in base_dir_list: if options.output_dir == None: if options.recurse: output_dir = '%s/' + RESULTS_DIR_BASENAME else: output_dir = '%s/%s' % (base_dir.rstrip('/'), RESULTS_DIR_BASENAME) else: output_dir = options.output_dir if options.recurse: summarize_recursively(base_dir, output_dir, summ_obj=summ_obj, tstplot_obj=tstplot_obj, vist_opts=vis_opts, hist_obj=hist_obj, overwrite=options.overwrite, fail_on_error=options.fail_on_error) else: summarize_set_dir(base_dir, output_dir, summ_obj=summ_obj, tstplot_obj=tstplot_obj, vis_opts=vis_opts, hist_obj=hist_obj, overwrite=options.overwrite, fail_on_error=options.fail_on_error)
def summarize_set_dir(set_dir, results_dir, summ_obj=None, tstplot_obj=None, vis_opts=None, hist_obj=None, overwrite=False, fail_on_error=False, verbose=False): logger = logging.getLogger(LOGGER_NAME) # Remove trailing slashes set_dir = set_dir.rstrip('/') results_dir = results_dir.rstrip('/') if not os.path.exists(results_dir): os.makedirs(results_dir) # Open log file for set dir log_handler = L2_Log_Util.open_log_file( os.path.join(results_dir, LOG_FILE_BASE) ) # Create necessary objects if not passed in if summ_obj == None: summ_obj = testset_summary() # Create status files run_lists = stats.gather_status(set_dir, results_dir) for (run_type, run_directories) in run_lists.items(): if run_type not in SUMMARY_TYPES: continue input_files_only = run_type == 'forward_model' or run_type == 'jacobian_only' summ_file = SUMM_FILE_FMT % (results_dir, run_type) stat_file = STAT_FILE_FMT % (results_dir, run_type) plot_file = PLOT_FILE_FMT % (results_dir, run_type) time_file = TIME_FILE_FMT % (results_dir, run_type) plots_dir = PLOTS_DIR_FMT % (results_dir, run_type) processing_msg = 'generating summary files for run type: %s for set dir: %s' % (run_type, set_dir) with error_trapping(fail_on_error, processing_msg): summ_obj.make_summary_output(run_directories, stat_filename=stat_file, summ_filename=summ_file, overwrite=overwrite) #### if tstplot_obj != None: processing_msg = 'generating summary plots for run type: %s for set dir: %s' % (run_type, set_dir) with error_trapping(fail_on_error, processing_msg): tstplot_obj.make_summary_output(stat_filename=stat_file, summ_filename=summ_file, plot_filename=plot_file, overwrite=overwrite) #### if vis_opts != None and not input_files_only: import l2_vis if not os.path.exists(plots_dir): logger.info('Creating plots dir:', plots_dir) os.makedirs(plots_dir) processing_msg = 'creating per run directory plots for run type: %s for set dir: %s' % (run_type, set_dir) with error_trapping(fail_on_error, processing_msg): l2_vis.make_run_plots(run_directories, plots_dir, overwrite=overwrite, **vis_opts) #### # Commented out as the new L2 code does not have the output that this routine reads anymore # processing_msg = 'generating summary timings info file for run type: %s for set dir: %s' % (run_type, set_dir) # with error_trapping(fail_on_error, processing_msg): # make_time_file = True # if not overwrite and os.path.exists(time_file): # time_obj = OCO_Matrix() # time_obj.read(time_file, read_data=False) # if time_obj.dims[0] == len(run_directories): # if verbose: # logger.info('Skipping recreating timings file: %s' % time_file) # make_time_file = False # if make_time_file: # compute_runtimes(run_directories, time_file) #### if hist_obj != None: processing_msg = 'creating solar zenith angle histogram plot for set dir: %s' % set_dir with error_trapping(fail_on_error, processing_msg): hist_obj.plot(set_dir, results_dir, verbose) L2_Log_Util.close_log_file(log_handler)
def plot_group(run_directories, output_directory=None, filename_filters=None, overwrite=False, debugging_logs=False, **kwargs): """Plot files for a group of supposedly similar run directories""" logger = logging.getLogger() # Find common name among basenames of run directories dir_group_name = os.path.commonprefix( [os.path.basename(curr_dir.strip('/')) for curr_dir in run_directories] ) logger.info('Ploting run group: ' + dir_group_name) for curr_glob, curr_routine in PLOT_ROUTINE_GLOBS.items(): # For each type of plotting glob find matching files plot_files = [] for curr_dir in run_directories: # glob may return multiple glob_results = glob.glob( os.path.join(curr_dir, curr_glob) ) if filename_filters == None: plot_files += glob_results else: for curr_filter in filename_filters: for curr_result in glob_results: if re.search(curr_filter, curr_result): plot_files.append(curr_result) # Group together globbed files according to basename # Safe way to only plot files that appear in all grouped dirs file_groups = get_path_groups(plot_files) for curr_group_files in file_groups: group_basename = os.path.splitext(os.path.basename(curr_group_files[0]))[0] group_title = '%s %s' % (dir_group_name, group_basename) output_filename = '%s_%s.%s' % (dir_group_name, group_basename, OUTPUT_FORMAT) if output_directory != None: if not os.path.exists(output_directory): os.makedirs(output_directory) output_filename = os.path.join(output_directory, output_filename) if debugging_logs: log_filename = '%s.%s' % (os.path.splitext(output_filename)[0], 'log') logger.info('Writing log file: %s' % log_filename) log_handler = L2_Log_Util.open_log_file(log_filename) logger.info('Plotting file group: ' + group_basename) logger.debug('Group files:') for curr_file in curr_group_files: logger.debug(os.path.realpath(curr_file)) # Remove common parts from end of filenames group_file_labels = [ os.path.dirname(re.sub(dir_group_name + '.*', '', curr_file)) for curr_file in curr_group_files ] # Remove common directory part from beginning of labels common_dir_prefix = os.path.commonprefix([ os.path.dirname(curr_lbl) for curr_lbl in group_file_labels]) if len(common_dir_prefix) > 0: group_file_labels = [ curr_lbl.replace(common_dir_prefix, '') for curr_lbl in group_file_labels ] if os.path.exists(output_filename): logger.info('Skipping existing plot file: %s' % output_filename) else: logger.info('Creating plot file: %s' % output_filename) # Plot files using plotting routine associated with file glob try: plotted_filenames = curr_routine(curr_group_files, group_title, group_file_labels, **kwargs) combine_pdfs(plotted_filenames, output_filename, remove_source_files=True) logger.debug('Combined %d plot files' % len(plotted_filenames)) except: logger.error('Failed to create plot file: %s' % output_filename) logger.error('Error running routine %s for files %s:' % (curr_routine, curr_group_files)) logger.error(''.join(traceback.format_exception(*sys.exc_info(), limit=2))) if debugging_logs: L2_Log_Util.close_log_file(log_handler)