Exemple #1
0
def main():
    """Super simple testing interface for the catalog_utils code."""
    parser = argparse.ArgumentParser(description='test interface for sourcelist_generation')
    parser.add_argument('input_file', help="input filename (ends with '.out'")
    parser.add_argument('-d', '--debug', required=False, choices=['True', 'False'], default='False', help='debug mode on? (generate region files?)')
    parser.add_argument('-m', '--phot_mode', required=False, choices=['aperture', 'segment', 'both'], default='both', help="which photometry mode should be run? 'aperture' for aperture only; 'seg' for segment only, and 'both' for both aperture and segment photometry.")
    args = parser.parse_args()
    if args.debug == "True":
        args.debug = True
    else:
        args.debug = False

    log.info("python {} {} -d {} -m {}".format(os.path.realpath(__file__), args.input_file, args.debug, args.phot_mode))

    obs_info_dict, total_list = poller_utils.interpret_obset_input(args.input_file)
    out_pars_file = 'pars.json'
    for total_item in total_list:
        total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,use_defaults=True)
        for filter_item in total_item.fdp_list:
            filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,use_defaults=True)
        for expo_item in total_item.edp_list:
            expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,use_defaults=True)

    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))

    product_list = run_catalog_utils(total_list, args.debug, args.phot_mode)

    log.info('Total processing time: {} sec\a'.format((datetime.datetime.now() - starting_dt).total_seconds()))

    for item in product_list:
        print(item)
Exemple #2
0
def run_hap_processing(input_filename, diagnostic_mode=False, use_defaults_configs=True,
                       input_custom_pars_file=None, output_custom_pars_file=None, phot_mode="both",
                       log_level=logutil.logging.INFO):
    """
    Run the HST Advanced Products (HAP) generation code.  This routine is the sequencer or
    controller which invokes the high-level functionality to process the single visit data.

    Parameters
    ----------
    input_filename: string
        The 'poller file' where each line contains information regarding an exposures taken
        during a single visit.

    diagnostic_mode : bool, optional
        Allows printing of additional diagnostic information to the log.  Also, can turn on
        creation and use of pickled information.

    use_defaults_configs: bool, optional
        If True, use the configuration parameters in the 'default' portion of the configuration
        JSON files.  If False, use the configuration parameters in the "parameters" portion of
        the file.  The default is True.

    input_custom_pars_file: string, optional
        Represents a fully specified input filename of a configuration JSON file which has been
        customized for specialized processing.  This file should contain ALL the input parameters
        necessary for processing.  If there is a filename present for this parameter, the
        'use_defaults_configs' parameter is ignored. The default is None.

    output_custom_pars_file: string, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session.  The default is None.

    phot_mode : str, optional
        Which algorithm should be used to generate the sourcelists? 'aperture' for aperture photometry;
        'segment' for segment map photometry; 'both' for both 'segment' and 'aperture'. Default value is 'both'.

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the .log file.
        Default value is 20, or 'info'.


    RETURNS
    -------
    return_value: integer
        A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error
    """
    # This routine needs to return an exit code, return_value, for use by the calling
    # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
    return_value = 0
    log.setLevel(log_level)
    # Define trailer file (log file) that will contain the log entries for all processing
    if isinstance(input_filename, str):  # input file is a poller file -- easy case
        logname = input_filename.replace('.out', '.log')
    else:
        logname = 'svm_process.log'
    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)
    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))
    total_obj_list = []
    product_list = []
    try:
        # Parse the poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info("Parse the poller and determine what exposures need to be combined into separate products.\n")
        obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(input_filename, log_level)

        # Generate the name for the manifest file which is for the entire visit.  It is fine
        # to use only one of the Total Products to generate the manifest name as the name is not
        # dependent on the detector.
        # Example: instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt)
        manifest_name = total_obj_list[0].manifest_name
        log.info("\nGenerate the manifest name for this visit.")
        log.info("The manifest will contain the names of all the output products.")

        # The product_list is a list of all the output products which will be put into the manifest file
        product_list = []

        # Update all of the product objects with their associated configuration information.
        for total_item in total_obj_list:
            log.info("Preparing configuration parameter values for total product {}".format(total_item.drizzle_filename))
            total_item.configobj_pars = config_utils.HapConfig(total_item,
                                                               log_level=log_level,
                                                               use_defaults=use_defaults_configs,
                                                               input_custom_pars_file=input_custom_pars_file,
                                                               output_custom_pars_file=output_custom_pars_file)
            for filter_item in total_item.fdp_list:
                log.info("Preparing configuration parameter values for filter product {}".format(filter_item.drizzle_filename))
                filter_item.configobj_pars = config_utils.HapConfig(filter_item,
                                                                    log_level=log_level,
                                                                    use_defaults=use_defaults_configs,
                                                                    input_custom_pars_file=input_custom_pars_file,
                                                                    output_custom_pars_file=output_custom_pars_file)
            for expo_item in total_item.edp_list:
                log.info("Preparing configuration parameter values for exposure product {}".format(expo_item.drizzle_filename))
                expo_item.configobj_pars = config_utils.HapConfig(expo_item,
                                                                  log_level=log_level,
                                                                  use_defaults=use_defaults_configs,
                                                                  input_custom_pars_file=input_custom_pars_file,
                                                                  output_custom_pars_file=output_custom_pars_file)
                expo_item = poller_utils.add_primary_fits_header_as_attr(expo_item, log_level)

            log.info("The configuration parameters have been read and applied to the drizzle objects.")

            reference_catalog = run_align_to_gaia(total_item, log_level=log_level, diagnostic_mode=diagnostic_mode)
            if reference_catalog:
                product_list += reference_catalog

        # Run AstroDrizzle to produce drizzle-combined products
        log.info("\n{}: Create drizzled imagery products.".format(str(datetime.datetime.now())))
        driz_list = create_drizzle_products(total_obj_list)
        product_list += driz_list

        # Create source catalogs from newly defined products (HLA-204)
        log.info("{}: Create source catalog from newly defined product.\n".format(str(datetime.datetime.now())))
        if "total detection product 00" in obs_info_dict.keys():
            catalog_list = create_catalog_products(total_obj_list, log_level,
                                                   diagnostic_mode=diagnostic_mode,
                                                   phot_mode=phot_mode)
            product_list += catalog_list
        else:
            log.warning("No total detection product has been produced. The sourcelist generation step has been skipped")

        # Store total_obj_list to a pickle file to speed up development
        if log_level <= logutil.logging.DEBUG:
            pickle_filename = "total_obj_list_full.pickle"
            if os.path.exists(pickle_filename):
                os.remove(pickle_filename)
            pickle_out = open(pickle_filename, "wb")
            pickle.dump(total_obj_list, pickle_out)
            pickle_out.close()
            log.info("Successfully wrote total_obj_list to pickle file {}!".format(pickle_filename))

        # Quality assurance portion of the processing - done only if the environment
        # variable, SVM_QUALITY_TESTING, is set to 'on', 'yes', or 'true'.
        qa_switch = _get_envvar_switch(envvar_qa_svm)

        # If requested, generate quality assessment statistics for the SVM products
        if qa_switch:
            log.info("SVM Quality Assurance statistics have been requested for this dataset, {}.".format(input_filename))
            svm_qa.run_quality_analysis(total_obj_list, log_level=log_level)

        # Write out manifest file listing all products generated during processing
        log.info("Creating manifest file {}.".format(manifest_name))
        log.info("  The manifest contains the names of products generated during processing.")
        with open(manifest_name, mode='w') as catfile:
            [catfile.write("{}\n".format(name)) for name in product_list]
        # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
        return_value = 0
    except Exception:
        return_value = 1
        print("\a\a\a")
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        logging.exception("message")

    finally:
        end_dt = datetime.datetime.now()
        log.info('Processing completed at {}'.format(str(end_dt)))
        log.info('Total processing time: {} sec'.format((end_dt - starting_dt).total_seconds()))
        log.info("Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error ")
        log.info("Return condition {}".format(return_value))
        logging.shutdown()
        # Append total trailer file (from astrodrizzle) to all total log files
        if total_obj_list:
            for tot_obj in total_obj_list:
                proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False)
        # Now remove single temp log file
        if os.path.exists(logname):
            os.remove(logname)
        else:
            print("Master log file not found.  Please check logs to locate processing messages.")
        return return_value
Exemple #3
0
def make_svm_input_file(input_filename,
                        output_custom_pars_file='custom_svm_params.json',
                        clobber=False,
                        log_level=logutil.logging.INFO):
    """
    create a custom SVM processing pipeline parameter file based on the observations present in the current
    working directory using config_utils.HapConfig() and optionally update_ci_values() to adjust CI upper and
    lower limits for filter products

    Parameters
    ----------
    input_filename: str
        The 'poller file' where each line contains information regarding an exposures taken
        during a single visit.

    output_custom_pars_file: str, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session. Default is 'custom_svm_params.json'.

    clobber : Bool, optional
        If set to Boolean 'True', existing files with the same name as *output_custom_pars_file*, the output
        custom SVM parameter file created by this script will be overwritten. Default value is Boolean
        'False'.

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the
        .log file. Default value is 20, or 'info'.

    RETURNS
    -------
    Nothing.
    """
    log.setLevel(log_level)
    if not clobber:
        if os.path.exists(output_custom_pars_file):
            msg = "A file named '{}' already exists. Please choose a unique name for the custom SVM parameter file.".format(
                output_custom_pars_file)
            log.critical(msg)
            sys.exit()
    # Define trailer file (log file) that will contain the log entries for all processing
    if isinstance(input_filename,
                  str):  # input file is a poller file -- easy case
        logname = input_filename.replace('.out', '_svm_partam_gen.log')

    else:
        logname = 'svm_param_gen.log'

    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname,
                        format=SPLUNK_MSG_FORMAT,
                        datefmt=MSG_DATEFMT)
    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))

    try:
        # Parse the poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info(
            "Parse the poller and determine what exposures need to be combined into separate products.\n"
        )
        obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(
            input_filename, log_level)

        # Update all of the product objects with their associated configuration information.
        for total_item in total_obj_list:
            log.info(
                "Preparing configuration parameter values for total product {}"
                .format(total_item.drizzle_filename))
            total_item.configobj_pars = config_utils.HapConfig(
                total_item,
                log_level=log_level,
                output_custom_pars_file=output_custom_pars_file)
            for filter_item in total_item.fdp_list:
                log.info(
                    "Preparing configuration parameter values for filter product {}"
                    .format(filter_item.drizzle_filename))
                filter_item.configobj_pars = config_utils.HapConfig(
                    filter_item,
                    log_level=log_level,
                    output_custom_pars_file=output_custom_pars_file)
                update_ci_values(filter_item, output_custom_pars_file,
                                 log_level)

            for expo_item in total_item.edp_list:
                log.info(
                    "Preparing configuration parameter values for exposure product {}"
                    .format(expo_item.drizzle_filename))
                expo_item.configobj_pars = config_utils.HapConfig(
                    expo_item,
                    log_level=log_level,
                    output_custom_pars_file=output_custom_pars_file)
                # Housekeeping: remove those pesky renamed copies of the input flc.fits/flt.fits files
                # generated by drizzlepac.haputils.product()
                if expo_item.drizzle_filename.endswith("_drc.fits"):
                    file_to_remove = expo_item.drizzle_filename.replace(
                        "_drc.fits", "_flc.fits")
                if expo_item.drizzle_filename.endswith("_drz.fits"):
                    file_to_remove = expo_item.drizzle_filename.replace(
                        "_drz.fits", "_flt.fits")
                if os.path.exists(file_to_remove):
                    os.remove(file_to_remove)
    except Exception:
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        err_msg = "Something went wrong!"
        log.error(err_msg)
        raise Exception(err_msg)
#!/usr/bin/env python
import pdb
import sys


from drizzlepac.haputils import config_utils
from drizzlepac.haputils import poller_utils



input_filename = sys.argv[1]

obs_info_dict, total_list = poller_utils.interpret_obset_input(input_filename)
out_pars_file = None
for total_item in total_list:

    total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
                                                       use_defaults=True)
    for filter_item in total_item.fdp_list:
        filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
                                                            use_defaults=True)
    for expo_item in total_item.edp_list:
        expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
                                                          use_defaults=True)

# #
# print(" ")
# print(expo_list[0].configobj_pars.get_pars("alignment"))
#

# rv = hapsequencer.run_hap_processing(input_filename)