def standalone_main():
    parser = ArgumentParser()

    parser.add_argument("l2_filename",
            help="L2 file from which to plot soundings")
    parser.add_argument("-i", "--sounding_id", metavar="SOUNDING_ID", action="append",
            help="Plot the single specified sounding id")
    parser.add_argument("-l", "--sounding_id_list", metavar="FILE", 
            help="A file with a list of sounding ids to plot")
    parser.add_argument("-o", "--output_file", metavar="FILE",
            help="Name of output file to be used instead of default")
    parser.add_argument("-v", "--verbose", action="store_true",
            help="Output additional verbose information")

    # Parse command line arguments
    args = parser.parse_args()

    if (args.verbose):
        init_logging(logging.DEBUG)
    else:
        init_logging(logging.INFO)

    if not os.path.exists(args.l2_filename):
        raise IOError("L2 filename specified does not exist: %s" % args.l2_filename)
    
    analysis = l2_analysis.AnalysisEnvironment([args.l2_filename])
    analysis.add_routines(l2_analysis.FrankenburgPlotRoutines(analysis_env=analysis))

    plot_sounding_ids = []
    if args.sounding_id or args.sounding_id_list:
        id_type = analysis.comparison_ids[0].dtype.type
        if args.sounding_id:
            plot_sounding_ids += [ id_type(snd_id) for snd_id in args.sounding_id ]
        if args.sounding_id_list:
            with open(args.sounding_id_list) as id_file:
                plot_sounding_ids += [ id_type(line.strip()) for line in id_file ]
    else:
        plot_sounding_ids = analysis.comparison_ids

    if args.output_file:
        output_file = args.output_file
    else:
        if len(plot_sounding_ids) > 1:
            output_file = "spectral_fit_%s-%s.pdf" % (plot_sounding_ids[0], plot_sounding_ids[-1])
        else:
            output_file = "spectral_fit_%s.pdf" % (plot_sounding_ids[0])

    logging.info("Writing plots to: %s" % output_file)
    pdf = PdfPages(output_file)

    num_soundings = len(plot_sounding_ids)

    for idx, (snd_id, figs) in enumerate(zip(plot_sounding_ids, analysis.call_analysis_routine("plot_spectral_fits", ids=(plot_sounding_ids,), all_soundings=True))):
        logging.info("[%d / %d] %s" % (idx+1, num_soundings, snd_id))
        for band_fig in figs:
            pdf.savefig(band_fig)
    pdf.close()
def standalone_main():
    parser = ArgumentParser(description="Splices together input HDF5 products for a given set of sounding ids")

    parser.add_argument( "filenames", metavar="FILE", nargs='*',
                         help="files to splice, may be left blank if using the -i --input-files-list option" )

    parser.add_argument( "-i", "--input-files-list", dest="input_files_list",
                         metavar="FILE",
                         help="text file with input filenames to splice")

    parser.add_argument( "-s", "--sounding-id-file", dest="sounding_id_file",
                         metavar="FILE",
                         help="file containing list of soundings for destination file")

    parser.add_argument( "-o", "--output-file", dest="output_filename",
                         metavar="FILE", default=DEFAULT_OUTPUT_FILENAME,
                         help="output filename of splice data, default: %s" % DEFAULT_OUTPUT_FILENAME)

    parser.add_argument( "-d", "--datasets-list-file", dest="datasets_list_file",
                         metavar="FILE",
                         help="file containing list of only datasets to consider for copying. If rename_mapping is enabled then the names are matched on their destination dataset name")

    parser.add_argument( "-r", "--rename-mapping", dest="rename_mapping",
                         action="store_true",
                         default=False,
                         help="rename datasets into output file according to internal mapping table as they would appear in the L2Agg PGE")

    parser.add_argument( "-w", "--workers", dest="workers", type=int, default=1,
                         help="Number of workers to use when parallelizing splicing" )

    parser.add_argument( "--temp", dest="temp_dir", default=os.curdir,
                         help="Directory where temporary files are saved when number of parallel workers is greater than 1" )

    parser.add_argument( "-l", "--log_file", dest="log_file", 
                         help="Save verbose information to log file" )

    parser.add_argument( "--agg-names-filter", dest="agg_names_filter",
                         action="store_true",
                         default=False,
                         help="include only dataset names that would appear in the L2Agg PGE. Its only makes sense to use this option with --rename_mapping")

    parser.add_argument( "--splice-all", dest="splice_all",
                         action="store_true",
                         default=False,
                         help="splice all datasets, including those which do not have a sounding dimension. Note that datasets without an explicit handler and no sounding dimension are simply copied from the first file.")

    parser.add_argument( "--multiple-file-types", dest="multi_source_types", action="store_true", default=None,
                         help="indicates that multiple file type sources are being spliced. Speeds up multiple source type determination stage by being specified." )

    parser.add_argument( "--single-file-type", dest="multi_source_types", action="store_false", default=None,
                         help="indicates that a single type of file is being spliced. Speeds up multiple source type determination stage by being specified." )

    parser.add_argument( "-v", "--verbose", dest="verbose",
                         action="store_true",
                         default=False,
                         help="enable verbose informational reporting")

    # Parse command line arguments
    args = parser.parse_args()

    if len(args.filenames) == 0 and args.input_files_list == None:
        parser.error("Input list file must be specified")

    # Set up logging
    if args.verbose:
        # Include HDF5 errors in output
        h5py._errors.unsilence_errors()

        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    main_logger = log_util.init_logging(log_level=log_level, format="%(message)s")
        
    # Initialize logging
    if args.log_file:
        log_file = log_util.open_log_file(args.log_file, logger=main_logger)
        log_file.setFormatter( logging.Formatter("%(asctime)s: %(name)8s - %(levelname)7s - %(message)s") )
    else:
        log_file = None

    source_files = load_source_files(args.filenames, args.input_files_list)

    if args.sounding_id_file != None:
        sounding_ids = [ sid.strip() for sid in open(args.sounding_id_file).readlines() ]
    else:
        main_logger.debug("No sounding ids file supplied, aggregating all ids from all files")
        sounding_ids = None

    copy_datasets_list = None
    if args.datasets_list_file != None:
        copy_datasets_list = [ ds.strip() for ds in open(args.datasets_list_file).readlines() ]

    if args.agg_names_filter:
        if copy_datasets_list == None:
            copy_datasets_list = aggregator_dataset_dest_names
        else:
            copy_datasets_list += aggregator_dataset_dest_names

    process_files(source_files, args.output_filename, sounding_ids, splice_all=args.splice_all, desired_datasets_list=copy_datasets_list, rename_mapping=args.rename_mapping, multi_source_types=args.multi_source_types, workers=args.workers, temp_dir=args.temp_dir, main_logger=main_logger, log_file=log_file)
  -q, --quiet           
      Do not output details of program operation to console

  -l, --log_file=FILE
      Log file where populator operations are stored instead of printed 
      to stdout
'''

args = docopt_simple(usage, version=version)

# Logger for file operations
logger = logging.getLogger(os.path.basename(__file__))

if args.quiet:
    log_util.init_logging(logging.ERROR)
else:
    log_util.init_logging(logging.INFO)

# Initialize logging
if args.log_file:
    log_obj = log_util.open_log_file(args.log_file)
    log_obj.setFormatter(
        logging.Formatter(
            "%(asctime)s: %(name)25s - %(levelname)7s - %(message)s"))
else:
    log_obj = None
populate_options = {}
if (args.binary):
    populate_options["l2_binary_filename"] = args.binary
if (args.l2_config):
        sounding_id = create_sounding_id(obs_id, l1b_fts)

        logger.debug('Obs ID = %s, Sounding ID = %s' % (obs_id, sounding_id))
        logger.debug('Spectrum A = %s' % spec_a)
        logger.debug('Spectrum B = %s' % spec_b)

        store_l1b_data(sounding_id, idx, l1b_fts, output_file, l1b_file)

if __name__ == "__main__":
    parser = ArgumentParser(description='Converts FTS data into ')

    parser.add_argument('fts_dir', metavar='FILENAME')

    parser.add_argument('--output_file', '-o', metavar='FILENAME', required=True,
        help='File to write convolved FTS data in OCO format')

    parser.add_argument('--l1b_file', '-l', metavar='FILENAME', default=REFERENCE_L1B_FILE,
        help='L1B file to get ILS information from')

    parser.add_argument('--verbose', '-v', default=False, action='store_true',
        help='Output verbose debugging information')

    args = parser.parse_args()

    if args.verbose:
        log_util.init_logging(logging.DEBUG)
    else:
        log_util.init_logging(logging.INFO)

    convert_fts_data(args.fts_dir, args.output_file, args.l1b_file)
Exemple #5
0
def standalone_main():
    parser = ArgumentParser(
        description=
        "Splices together input HDF5 products for a given set of sounding ids")

    parser.add_argument(
        "filenames",
        metavar="FILE",
        nargs='*',
        help=
        "files to splice, may be left blank if using the -i --input-files-list option"
    )

    parser.add_argument("-i",
                        "--input-files-list",
                        dest="input_files_list",
                        metavar="FILE",
                        help="text file with input filenames to splice")

    parser.add_argument(
        "-s",
        "--sounding-id-file",
        dest="sounding_id_file",
        metavar="FILE",
        help="file containing list of soundings for destination file")

    parser.add_argument("-o",
                        "--output-file",
                        dest="output_filename",
                        metavar="FILE",
                        default=DEFAULT_OUTPUT_FILENAME,
                        help="output filename of splice data, default: %s" %
                        DEFAULT_OUTPUT_FILENAME)

    parser.add_argument(
        "-d",
        "--datasets-list-file",
        dest="datasets_list_file",
        metavar="FILE",
        help=
        "file containing list of only datasets to consider for copying. If rename_mapping is enabled then the names are matched on their destination dataset name"
    )

    parser.add_argument(
        "-r",
        "--rename-mapping",
        dest="rename_mapping",
        action="store_true",
        default=False,
        help=
        "rename datasets into output file according to internal mapping table as they would appear in the L2Agg PGE"
    )

    parser.add_argument(
        "-w",
        "--workers",
        dest="workers",
        type=int,
        default=1,
        help="Number of workers to use when parallelizing splicing")

    parser.add_argument(
        "--temp",
        dest="temp_dir",
        default=os.curdir,
        help=
        "Directory where temporary files are saved when number of parallel workers is greater than 1"
    )

    parser.add_argument("-l",
                        "--log_file",
                        dest="log_file",
                        help="Save verbose information to log file")

    parser.add_argument(
        "--agg-names-filter",
        dest="agg_names_filter",
        action="store_true",
        default=False,
        help=
        "include only dataset names that would appear in the L2Agg PGE. Its only makes sense to use this option with --rename_mapping"
    )

    parser.add_argument(
        "--splice-all",
        dest="splice_all",
        action="store_true",
        default=False,
        help=
        "splice all datasets, including those which do not have a sounding dimension. Note that datasets without an explicit handler and no sounding dimension are simply copied from the first file."
    )

    parser.add_argument(
        "--multiple-file-types",
        dest="multi_source_types",
        action="store_true",
        default=None,
        help=
        "indicates that multiple file type sources are being spliced. Speeds up multiple source type determination stage by being specified."
    )

    parser.add_argument(
        "--single-file-type",
        dest="multi_source_types",
        action="store_false",
        default=None,
        help=
        "indicates that a single type of file is being spliced. Speeds up multiple source type determination stage by being specified."
    )

    parser.add_argument("-v",
                        "--verbose",
                        dest="verbose",
                        action="store_true",
                        default=False,
                        help="enable verbose informational reporting")

    # Parse command line arguments
    args = parser.parse_args()

    if len(args.filenames) == 0 and args.input_files_list == None:
        parser.error("Input list file must be specified")

    # Set up logging
    if args.verbose:
        # Include HDF5 errors in output
        h5py._errors.unsilence_errors()

        log_level = logging.DEBUG
    else:
        log_level = logging.INFO
    main_logger = log_util.init_logging(log_level=log_level,
                                        format="%(message)s")

    # Initialize logging
    if args.log_file:
        log_file = log_util.open_log_file(args.log_file, logger=main_logger)
        log_file.setFormatter(
            logging.Formatter(
                "%(asctime)s: %(name)8s - %(levelname)7s - %(message)s"))
    else:
        log_file = None

    source_files = load_source_files(args.filenames, args.input_files_list)

    if args.sounding_id_file != None:
        sounding_ids = [
            sid.strip() for sid in open(args.sounding_id_file).readlines()
        ]
    else:
        main_logger.debug(
            "No sounding ids file supplied, aggregating all ids from all files"
        )
        sounding_ids = None

    copy_datasets_list = None
    if args.datasets_list_file != None:
        copy_datasets_list = [
            ds.strip() for ds in open(args.datasets_list_file).readlines()
        ]

    if args.agg_names_filter:
        if copy_datasets_list == None:
            copy_datasets_list = aggregator_dataset_dest_names
        else:
            copy_datasets_list += aggregator_dataset_dest_names

    process_files(source_files,
                  args.output_filename,
                  sounding_ids,
                  splice_all=args.splice_all,
                  desired_datasets_list=copy_datasets_list,
                  rename_mapping=args.rename_mapping,
                  multi_source_types=args.multi_source_types,
                  workers=args.workers,
                  temp_dir=args.temp_dir,
                  main_logger=main_logger,
                  log_file=log_file)
Exemple #6
0
        help='static input file to get window information from')

    parser.add_argument(
        '--sounding_pos',
        '-s',
        metavar='INT',
        default=DEFAULT_SOUNDING,
        type=int,
        choices=range(8),
        help='specify which footprint ils to use (zero-based index)')

    parser.add_argument('--verbose',
                        '-v',
                        default=False,
                        action='store_true',
                        help='Output verbose debugging information')

    args = parser.parse_args()

    if args.verbose:
        log_util.init_logging(logging.DEBUG)
    else:
        log_util.init_logging(logging.INFO)

    convert_fts_data(args.fts_dir,
                     args.output_file,
                     args.l1b_file,
                     args.config_file,
                     args.window_file,
                     sounding_pos=args.sounding_pos)
Exemple #7
0
def standalone_main():
    parser = ArgumentParser()

    parser.add_argument("l2_filename",
                        help="L2 file from which to plot soundings")
    parser.add_argument("-i",
                        "--sounding_id",
                        metavar="SOUNDING_ID",
                        action="append",
                        help="Plot the single specified sounding id")
    parser.add_argument("-l",
                        "--sounding_id_list",
                        metavar="FILE",
                        help="A file with a list of sounding ids to plot")
    parser.add_argument(
        "-o",
        "--output_file",
        metavar="FILE",
        help="Name of output file to be used instead of default")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Output additional verbose information")

    # Parse command line arguments
    args = parser.parse_args()

    if (args.verbose):
        init_logging(logging.DEBUG)
    else:
        init_logging(logging.INFO)

    if not os.path.exists(args.l2_filename):
        raise IOError("L2 filename specified does not exist: %s" %
                      args.l2_filename)

    analysis = l2_analysis.AnalysisEnvironment([args.l2_filename])
    analysis.add_routines(
        l2_analysis.FrankenburgPlotRoutines(analysis_env=analysis))

    plot_sounding_ids = []
    if args.sounding_id or args.sounding_id_list:
        id_type = analysis.comparison_ids[0].dtype.type
        if args.sounding_id:
            plot_sounding_ids += [
                id_type(snd_id) for snd_id in args.sounding_id
            ]
        if args.sounding_id_list:
            with open(args.sounding_id_list) as id_file:
                plot_sounding_ids += [
                    id_type(line.strip()) for line in id_file
                ]
    else:
        plot_sounding_ids = analysis.comparison_ids

    if args.output_file:
        output_file = args.output_file
    else:
        if len(plot_sounding_ids) > 1:
            output_file = "spectral_fit_%s-%s.pdf" % (plot_sounding_ids[0],
                                                      plot_sounding_ids[-1])
        else:
            output_file = "spectral_fit_%s.pdf" % (plot_sounding_ids[0])

    logging.info("Writing plots to: %s" % output_file)
    pdf = PdfPages(output_file)

    num_soundings = len(plot_sounding_ids)

    for idx, (snd_id, figs) in enumerate(
            zip(
                plot_sounding_ids,
                analysis.call_analysis_routine("plot_spectral_fits",
                                               ids=(plot_sounding_ids, ),
                                               all_soundings=True))):
        logging.info("[%d / %d] %s" % (idx + 1, num_soundings, snd_id))
        for band_fig in figs:
            pdf.savefig(band_fig)
    pdf.close()
  -q, --quiet           
      Do not output details of program operation to console

  -l, --log_file=FILE
      Log file where populator operations are stored instead of printed 
      to stdout
"""

args = docopt_simple(usage, version=version)

# Logger for file operations
logger = logging.getLogger(os.path.basename(__file__))

if args.quiet:
    log_util.init_logging(logging.ERROR)
else:
    log_util.init_logging(logging.INFO)

# Initialize logging
if args.log_file:
    log_obj = log_util.open_log_file(args.log_file)
    log_obj.setFormatter(logging.Formatter("%(asctime)s: %(name)25s - %(levelname)7s - %(message)s"))
else:
    log_obj = None
populate_options = {}
if args.binary:
    populate_options["l2_binary_filename"] = args.binary
if args.l2_config:
    populate_options["l2_config_filename"] = args.l2_config
populate_options["aggregate"] = args.aggregate