コード例 #1
0
ファイル: hapcatalog.py プロジェクト: rendinam/drizzlepac
def main():
    """Super simple testing interface for the catalog_utils code."""
    parser = argparse.ArgumentParser(description='test interface for sourcelist_generation')
    parser.add_argument('input_file', help="input filename (ends with '.out'")
    parser.add_argument('-d', '--debug', required=False, choices=['True', 'False'], default='False', help='debug mode on? (generate region files?)')
    parser.add_argument('-m', '--phot_mode', required=False, choices=['aperture', 'segment', 'both'], default='both', help="which photometry mode should be run? 'aperture' for aperture only; 'seg' for segment only, and 'both' for both aperture and segment photometry.")
    args = parser.parse_args()
    if args.debug == "True":
        args.debug = True
    else:
        args.debug = False

    log.info("python {} {} -d {} -m {}".format(os.path.realpath(__file__), args.input_file, args.debug, args.phot_mode))

    obs_info_dict, total_list = poller_utils.interpret_obset_input(args.input_file)
    out_pars_file = 'pars.json'
    for total_item in total_list:
        total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,use_defaults=True)
        for filter_item in total_item.fdp_list:
            filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,use_defaults=True)
        for expo_item in total_item.edp_list:
            expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,use_defaults=True)

    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))

    product_list = run_catalog_utils(total_list, args.debug, args.phot_mode)

    log.info('Total processing time: {} sec\a'.format((datetime.datetime.now() - starting_dt).total_seconds()))

    for item in product_list:
        print(item)
コード例 #2
0
def run_mvm_processing(input_filename,
                       skip_gaia_alignment=True,
                       diagnostic_mode=False,
                       use_defaults_configs=True,
                       input_custom_pars_file=None,
                       output_custom_pars_file=None,
                       phot_mode="both",
                       custom_limits=None,
                       output_file_prefix=None,
                       log_level=logutil.logging.INFO):
    """Run the HST Advanced Products (HAP) generation code.  This routine is the sequencer or
    controller which invokes the high-level functionality to process the multi-visit data.

    Parameters
    ----------
    input_filename: string
        The 'poller file' where each line contains information regarding an exposures considered
        part of the multi-visit.

    skip_gaia_alignment : bool, optional
        Skip alignment of all input images to known Gaia/HSC sources in the input image footprint? If set to
        'True', the existing input image alignment solution will be used instead. The default is False.

    diagnostic_mode : bool, optional
        Allows printing of additional diagnostic information to the log.  Also, can turn on
        creation and use of pickled information.

    use_defaults_configs: bool, optional
        If True, use the configuration parameters in the 'default' portion of the configuration
        JSON files.  If False, use the configuration parameters in the "parameters" portion of
        the file.  The default is True.

    input_custom_pars_file: string, optional
        Represents a fully specified input filename of a configuration JSON file which has been
        customized for specialized processing.  This file should contain ALL the input parameters
        necessary for processing.  If there is a filename present for this parameter, the
        'use_defaults_configs' parameter is ignored. The default is None.

    output_custom_pars_file: string, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session.  The default is None.

    phot_mode : str, optional
        Which algorithm should be used to generate the sourcelists? 'aperture' for aperture (point)
        photometry; 'segment' for isophotal photometry; 'both' for both 'segment' and 'aperture'. Default
        value is 'both'.

    custom_limits : list, optional
        4-element list containing the mosaic bounding rectangle X min and max and Y min and max values for
        custom mosaics

    output_file_prefix : str, optional
        'Text string that will be used as the filename prefix all files created by hapmultisequencer.py
        during the MVM custom mosaic generation process. If not explicitly specified, all output files will
        start with the following formatted text string:
        "hst-skycell-p<pppp>-ra<##>d<####>-dec<n|s><##>d<####>", where p<pppp> is the projection cell ID,
        ra<##>d<####> are the whole-number and decimal portions of the right ascention, respectively, and
        dec<n|s><##>d<####> are the whole-number and decimal portions of the declination, respectively. Note
        that the "<n|s>" denotes if the declination is north (positive) or south (negative). Example: For
        skycell = 1974, ra = 201.9512, and dec = +26.0012, The filename prefix would be
        "skycell-p1974-ra201d9512-decn26d0012".

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the
        .log file. Default value is 20, or 'info'.


    RETURNS
    -------
    return_value: integer
        A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error
    """
    # This routine needs to return an exit code, return_value, for use by the calling
    # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
    return_value = 0
    log.setLevel(log_level)
    # Define trailer file (log file) that will contain the log entries for all processing
    logname = proc_utils.build_logname(input_filename, process_type='mvm')

    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname,
                        format=SPLUNK_MSG_FORMAT,
                        datefmt=MSG_DATEFMT,
                        force=True)

    # Start by reading in any environment variable related to catalog generation that has been set
    cat_switches = {
        sw: _get_envvar_switch(sw, default=envvar_cat_mvm[sw])
        for sw in envvar_cat_mvm
    }

    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))
    total_obj_list = []
    product_list = []
    manifest_name = ""
    try:
        # Parse the MVM poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info(
            "Parse the poller and determine what exposures need to be combined into separate products.\n"
        )
        obs_info_dict, total_obj_list = poller_utils.interpret_mvm_input(
            input_filename,
            log_level,
            layer_method='all',
            include_small=cat_switches['MVM_INCLUDE_SMALL'],
            only_cte=cat_switches['MVM_ONLY_CTE'])
        # The product_list is a list of all the output products which will be put into the manifest file
        product_list = []

        # Generate the name for the manifest file which is for the entire multi-visit.  It is fine
        # to use only one of the SkyCellProducts to generate the manifest name as the name
        # is only dependent on the sky cell.
        # Example: skycell-p<PPPP>x<XX>y<YY>_manifest.txt (e.g., skycell-p0797x12y05_manifest.txt)
        manifest_defined = hasattr(
            total_obj_list[0],
            "manifest_name") and total_obj_list[0].manifest_name not in [
                "", None
            ]
        manifest_name = total_obj_list[
            0].manifest_name if manifest_defined else DEFAULT_MANIFEST_NAME
        log.info(
            "\nGenerate the manifest name for this multi-visit: {}.".format(
                manifest_name))
        log.info(
            "The manifest will contain the names of all the output products.")

        # Update the SkyCellProduct objects with their associated configuration information.
        for filter_item in total_obj_list:
            _ = filter_item.generate_metawcs(custom_limits=custom_limits)
            # Compute mask keywords early in processing for use in determining what
            # parameters need to be used for processing.
            filter_item.generate_footprint_mask(save_mask=False)
            if not filter_item.valid_product:
                log.warning(
                    f"Ignoring {filter_item.info} as no input exposures overlap that layer."
                )
                continue
            # Optionally rename output products
            if output_file_prefix or custom_limits:
                filter_item = rename_output_products(
                    filter_item, output_file_prefix=output_file_prefix)

            log.info(
                "Preparing configuration parameter values for filter product {}"
                .format(filter_item.drizzle_filename))
            filter_item.configobj_pars = config_utils.HapConfig(
                filter_item,
                hap_pipeline_name='mvm',
                log_level=log_level,
                use_defaults=use_defaults_configs,
                input_custom_pars_file=input_custom_pars_file,
                output_custom_pars_file=output_custom_pars_file)

            for edp in filter_item.edp_list:
                edp.configobj_pars = config_utils.HapConfig(
                    edp,
                    hap_pipeline_name='mvm',
                    log_level=log_level,
                    use_defaults=use_defaults_configs,
                    input_custom_pars_file=input_custom_pars_file,
                    output_custom_pars_file=output_custom_pars_file)
        log.info(
            "The configuration parameters have been read and applied to the drizzle objects."
        )

        # TODO: This is the place where updated WCS info is migrated from drizzlepac params to filter objects
        if skip_gaia_alignment:
            log.info(
                "Gaia alignment step skipped. Existing input image alignment solution will be used instead."
            )
        else:
            reference_catalog = run_align_to_gaia(
                total_obj_list,
                custom_limits=custom_limits,
                log_level=log_level,
                diagnostic_mode=diagnostic_mode)
            if reference_catalog:
                product_list += [reference_catalog]

        # Run AstroDrizzle to produce drizzle-combined products
        log.info("\n{}: Create drizzled imagery products.".format(
            str(datetime.datetime.now())))
        driz_list = create_drizzle_products(total_obj_list,
                                            custom_limits=custom_limits)
        product_list += driz_list

        # Store total_obj_list to a pickle file to speed up development
        if False:
            pickle_filename = "total_obj_list_full.pickle"
            if os.path.exists(pickle_filename):
                os.remove(pickle_filename)
            pickle_out = open(pickle_filename, "wb")
            pickle.dump(total_obj_list, pickle_out)
            pickle_out.close()
            log.info(
                "Successfully wrote total_obj_list to pickle file {}!".format(
                    pickle_filename))

        # Quality assurance portion of the processing - done only if the environment
        # variable, MVM_QUALITY_TESTING, is set to 'on', 'yes', or 'true'.
        qa_switch = _get_envvar_switch(envvar_qa_mvm)

        # If requested, generate quality assessment statistics for the MVM products
        if qa_switch:
            log.info(
                "MVM Quality Assurance statistics have been requested for this dataset, {}."
                .format(input_filename))

            # Get WCSNAMEs of all input exposures for each MVM product
            mvm_qa.run_quality_analysis(total_obj_list, log_level=log_level)

        # 9: Compare results to HLA classic counterparts (if possible)
        # if diagnostic_mode:
        # run_sourcelist_comparison(total_obj_list, diagnostic_mode=diagnostic_mode, log_level=log_level)

        # If we are running in diagnostic_mode, we want to see all inputs
        del_files = []
        # for each total product...
        for tot_obj in total_obj_list:
            # get the list of unmodified files and delete those files from disk
            del_files.extend(tot_obj.verify_members(clean=not diagnostic_mode))

        # Now remove those files from the manifest file
        for f in del_files:
            # Just in case something unexpected happened, check that
            # unmodified file filename is still in product_list
            if f in product_list:
                # Remove filename from manifest file input
                product_list.remove(f)

        # Insure manifest file does not contain duplicate entries
        # Use of numpy.unique preserves the order of the entries in the product list
        product_list = np.unique(product_list).tolist()
        # Write out manifest file listing all products generated during processing
        log.info("Creating manifest file {}.".format(manifest_name))
        log.info(
            "  The manifest contains the names of products generated during processing."
        )
        with open(manifest_name, mode='w') as catfile:
            [catfile.write("{}\n".format(name)) for name in product_list]
        # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
        return_value = 0
    except Exception:
        return_value = 1
        print("\a\a\a")
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        logging.exception("message")
    # This except handles sys.exit() which raises the SystemExit exception which inherits from BaseException.
    except BaseException:
        exc_type, exc_value, exc_tb = sys.exc_info()
        formatted_lines = traceback.format_exc().splitlines()
        log.info(formatted_lines[-1])
        return_value = exc_value

        # If an exception were raised in the poller_utils, it is possible the manifest_name
        # has not been defined.  Create a manifest_name now to create the expected file which
        # will be empty.
        if manifest_name == "":
            try:
                # If the input filename is a string, it could be a poller file or it could
                # be a file containing filenames.  If it is a poller file, the necessary skycell
                # information is in column 8 (1-based), and only the first entry is needed.
                if type(input_filename) == str:
                    output_skycell = ascii.read(input_filename,
                                                format='no_header')["col8"][0]
                    manifest_name = output_skycell.lower() + "_manifest.txt"

                # Maybe the input filename was actually a Python list
                elif type(input_filename) == list:
                    skycell_dict = cell_utils.get_sky_cells(
                        [input_filename[0]])
                    output_skycell = next(iter(skycell_dict.keys()))
                    manifest_name = output_skycell.lower() + "_manifest.txt"

                # Problem case - try to use the name of the input file
                else:
                    if re.search(MATCH_STRING, input_filename.lower()):
                        manifest_name = input_filename.lower().replace(
                            "input.out", "manifest.txt")
                    else:
                        manifest_name = DEFAULT_MANIFEST_NAME
                # Bigger problem case - try to use the name of the input file
            except Exception:
                if re.search(MATCH_STRING, input_filename.lower()):
                    manifest_name = input_filename.lower().replace(
                        "input.out", "manifest.txt")
                else:
                    manifest_name = DEFAULT_MANIFEST_NAME

        log.info("Writing empty manifest file: {}".format(manifest_name))

        with open(manifest_name, mode="a"):
            pass

    finally:
        end_dt = datetime.datetime.now()
        log.info('Processing completed at {}'.format(str(end_dt)))
        log.info('Total processing time: {} sec'.format(
            (end_dt - starting_dt).total_seconds()))
        log.info(
            "Return code for use by calling Condor/OWL workflow code: 0 (zero) for success, non-zero for error or exit. "
        )
        log.info("Return condition {}".format(return_value))
        logging.shutdown()
        # Append total trailer file (from astrodrizzle) to all total log files
        if total_obj_list:
            for tot_obj in total_obj_list:
                proc_utils.append_trl_file(tot_obj.trl_filename,
                                           logname,
                                           clean=False)
        # Now remove single temp log file
        if os.path.exists(logname):
            os.remove(logname)
        else:
            print(
                "Master log file not found.  Please check logs to locate processing messages."
            )
        return return_value
コード例 #3
0
ファイル: hapsequencer.py プロジェクト: rendinam/drizzlepac
def run_hap_processing(input_filename, diagnostic_mode=False, use_defaults_configs=True,
                       input_custom_pars_file=None, output_custom_pars_file=None, phot_mode="both",
                       log_level=logutil.logging.INFO):
    """
    Run the HST Advanced Products (HAP) generation code.  This routine is the sequencer or
    controller which invokes the high-level functionality to process the single visit data.

    Parameters
    ----------
    input_filename: string
        The 'poller file' where each line contains information regarding an exposures taken
        during a single visit.

    diagnostic_mode : bool, optional
        Allows printing of additional diagnostic information to the log.  Also, can turn on
        creation and use of pickled information.

    use_defaults_configs: bool, optional
        If True, use the configuration parameters in the 'default' portion of the configuration
        JSON files.  If False, use the configuration parameters in the "parameters" portion of
        the file.  The default is True.

    input_custom_pars_file: string, optional
        Represents a fully specified input filename of a configuration JSON file which has been
        customized for specialized processing.  This file should contain ALL the input parameters
        necessary for processing.  If there is a filename present for this parameter, the
        'use_defaults_configs' parameter is ignored. The default is None.

    output_custom_pars_file: string, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session.  The default is None.

    phot_mode : str, optional
        Which algorithm should be used to generate the sourcelists? 'aperture' for aperture photometry;
        'segment' for segment map photometry; 'both' for both 'segment' and 'aperture'. Default value is 'both'.

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the .log file.
        Default value is 20, or 'info'.


    RETURNS
    -------
    return_value: integer
        A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error
    """
    # This routine needs to return an exit code, return_value, for use by the calling
    # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
    return_value = 0
    log.setLevel(log_level)
    # Define trailer file (log file) that will contain the log entries for all processing
    if isinstance(input_filename, str):  # input file is a poller file -- easy case
        logname = input_filename.replace('.out', '.log')
    else:
        logname = 'svm_process.log'
    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT)
    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))
    total_obj_list = []
    product_list = []
    try:
        # Parse the poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info("Parse the poller and determine what exposures need to be combined into separate products.\n")
        obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(input_filename, log_level)

        # Generate the name for the manifest file which is for the entire visit.  It is fine
        # to use only one of the Total Products to generate the manifest name as the name is not
        # dependent on the detector.
        # Example: instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt)
        manifest_name = total_obj_list[0].manifest_name
        log.info("\nGenerate the manifest name for this visit.")
        log.info("The manifest will contain the names of all the output products.")

        # The product_list is a list of all the output products which will be put into the manifest file
        product_list = []

        # Update all of the product objects with their associated configuration information.
        for total_item in total_obj_list:
            log.info("Preparing configuration parameter values for total product {}".format(total_item.drizzle_filename))
            total_item.configobj_pars = config_utils.HapConfig(total_item,
                                                               log_level=log_level,
                                                               use_defaults=use_defaults_configs,
                                                               input_custom_pars_file=input_custom_pars_file,
                                                               output_custom_pars_file=output_custom_pars_file)
            for filter_item in total_item.fdp_list:
                log.info("Preparing configuration parameter values for filter product {}".format(filter_item.drizzle_filename))
                filter_item.configobj_pars = config_utils.HapConfig(filter_item,
                                                                    log_level=log_level,
                                                                    use_defaults=use_defaults_configs,
                                                                    input_custom_pars_file=input_custom_pars_file,
                                                                    output_custom_pars_file=output_custom_pars_file)
            for expo_item in total_item.edp_list:
                log.info("Preparing configuration parameter values for exposure product {}".format(expo_item.drizzle_filename))
                expo_item.configobj_pars = config_utils.HapConfig(expo_item,
                                                                  log_level=log_level,
                                                                  use_defaults=use_defaults_configs,
                                                                  input_custom_pars_file=input_custom_pars_file,
                                                                  output_custom_pars_file=output_custom_pars_file)
                expo_item = poller_utils.add_primary_fits_header_as_attr(expo_item, log_level)

            log.info("The configuration parameters have been read and applied to the drizzle objects.")

            reference_catalog = run_align_to_gaia(total_item, log_level=log_level, diagnostic_mode=diagnostic_mode)
            if reference_catalog:
                product_list += reference_catalog

        # Run AstroDrizzle to produce drizzle-combined products
        log.info("\n{}: Create drizzled imagery products.".format(str(datetime.datetime.now())))
        driz_list = create_drizzle_products(total_obj_list)
        product_list += driz_list

        # Create source catalogs from newly defined products (HLA-204)
        log.info("{}: Create source catalog from newly defined product.\n".format(str(datetime.datetime.now())))
        if "total detection product 00" in obs_info_dict.keys():
            catalog_list = create_catalog_products(total_obj_list, log_level,
                                                   diagnostic_mode=diagnostic_mode,
                                                   phot_mode=phot_mode)
            product_list += catalog_list
        else:
            log.warning("No total detection product has been produced. The sourcelist generation step has been skipped")

        # Store total_obj_list to a pickle file to speed up development
        if log_level <= logutil.logging.DEBUG:
            pickle_filename = "total_obj_list_full.pickle"
            if os.path.exists(pickle_filename):
                os.remove(pickle_filename)
            pickle_out = open(pickle_filename, "wb")
            pickle.dump(total_obj_list, pickle_out)
            pickle_out.close()
            log.info("Successfully wrote total_obj_list to pickle file {}!".format(pickle_filename))

        # Quality assurance portion of the processing - done only if the environment
        # variable, SVM_QUALITY_TESTING, is set to 'on', 'yes', or 'true'.
        qa_switch = _get_envvar_switch(envvar_qa_svm)

        # If requested, generate quality assessment statistics for the SVM products
        if qa_switch:
            log.info("SVM Quality Assurance statistics have been requested for this dataset, {}.".format(input_filename))
            svm_qa.run_quality_analysis(total_obj_list, log_level=log_level)

        # Write out manifest file listing all products generated during processing
        log.info("Creating manifest file {}.".format(manifest_name))
        log.info("  The manifest contains the names of products generated during processing.")
        with open(manifest_name, mode='w') as catfile:
            [catfile.write("{}\n".format(name)) for name in product_list]
        # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition
        return_value = 0
    except Exception:
        return_value = 1
        print("\a\a\a")
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        logging.exception("message")

    finally:
        end_dt = datetime.datetime.now()
        log.info('Processing completed at {}'.format(str(end_dt)))
        log.info('Total processing time: {} sec'.format((end_dt - starting_dt).total_seconds()))
        log.info("Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error ")
        log.info("Return condition {}".format(return_value))
        logging.shutdown()
        # Append total trailer file (from astrodrizzle) to all total log files
        if total_obj_list:
            for tot_obj in total_obj_list:
                proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False)
        # Now remove single temp log file
        if os.path.exists(logname):
            os.remove(logname)
        else:
            print("Master log file not found.  Please check logs to locate processing messages.")
        return return_value
コード例 #4
0
def make_svm_input_file(input_filename,
                        output_custom_pars_file='custom_svm_params.json',
                        clobber=False,
                        log_level=logutil.logging.INFO):
    """
    create a custom SVM processing pipeline parameter file based on the observations present in the current
    working directory using config_utils.HapConfig() and optionally update_ci_values() to adjust CI upper and
    lower limits for filter products

    Parameters
    ----------
    input_filename: str
        The 'poller file' where each line contains information regarding an exposures taken
        during a single visit.

    output_custom_pars_file: str, optional
        Fully specified output filename which contains all the configuration parameters
        available during the processing session. Default is 'custom_svm_params.json'.

    clobber : Bool, optional
        If set to Boolean 'True', existing files with the same name as *output_custom_pars_file*, the output
        custom SVM parameter file created by this script will be overwritten. Default value is Boolean
        'False'.

    log_level : int, optional
        The desired level of verboseness in the log statements displayed on the screen and written to the
        .log file. Default value is 20, or 'info'.

    RETURNS
    -------
    Nothing.
    """
    log.setLevel(log_level)
    if not clobber:
        if os.path.exists(output_custom_pars_file):
            msg = "A file named '{}' already exists. Please choose a unique name for the custom SVM parameter file.".format(
                output_custom_pars_file)
            log.critical(msg)
            sys.exit()
    # Define trailer file (log file) that will contain the log entries for all processing
    if isinstance(input_filename,
                  str):  # input file is a poller file -- easy case
        logname = input_filename.replace('.out', '_svm_partam_gen.log')

    else:
        logname = 'svm_param_gen.log'

    # Initialize total trailer filename as temp logname
    logging.basicConfig(filename=logname,
                        format=SPLUNK_MSG_FORMAT,
                        datefmt=MSG_DATEFMT)
    # start processing
    starting_dt = datetime.datetime.now()
    log.info("Run start time: {}".format(str(starting_dt)))

    try:
        # Parse the poller file and generate the the obs_info_dict, as well as the total detection
        # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects
        # A poller file contains visit data for a single instrument.  The TotalProduct discriminant
        # is the detector.  A TotalProduct object is comprised of FilterProducts and ExposureProducts
        # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct
        # is the atomic exposure data.
        log.info(
            "Parse the poller and determine what exposures need to be combined into separate products.\n"
        )
        obs_info_dict, total_obj_list = poller_utils.interpret_obset_input(
            input_filename, log_level)

        # Update all of the product objects with their associated configuration information.
        for total_item in total_obj_list:
            log.info(
                "Preparing configuration parameter values for total product {}"
                .format(total_item.drizzle_filename))
            total_item.configobj_pars = config_utils.HapConfig(
                total_item,
                log_level=log_level,
                output_custom_pars_file=output_custom_pars_file)
            for filter_item in total_item.fdp_list:
                log.info(
                    "Preparing configuration parameter values for filter product {}"
                    .format(filter_item.drizzle_filename))
                filter_item.configobj_pars = config_utils.HapConfig(
                    filter_item,
                    log_level=log_level,
                    output_custom_pars_file=output_custom_pars_file)
                update_ci_values(filter_item, output_custom_pars_file,
                                 log_level)

            for expo_item in total_item.edp_list:
                log.info(
                    "Preparing configuration parameter values for exposure product {}"
                    .format(expo_item.drizzle_filename))
                expo_item.configobj_pars = config_utils.HapConfig(
                    expo_item,
                    log_level=log_level,
                    output_custom_pars_file=output_custom_pars_file)
                # Housekeeping: remove those pesky renamed copies of the input flc.fits/flt.fits files
                # generated by drizzlepac.haputils.product()
                if expo_item.drizzle_filename.endswith("_drc.fits"):
                    file_to_remove = expo_item.drizzle_filename.replace(
                        "_drc.fits", "_flc.fits")
                if expo_item.drizzle_filename.endswith("_drz.fits"):
                    file_to_remove = expo_item.drizzle_filename.replace(
                        "_drz.fits", "_flt.fits")
                if os.path.exists(file_to_remove):
                    os.remove(file_to_remove)
    except Exception:
        exc_type, exc_value, exc_tb = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout)
        err_msg = "Something went wrong!"
        log.error(err_msg)
        raise Exception(err_msg)
コード例 #5
0
import pdb
import sys


from drizzlepac.haputils import config_utils
from drizzlepac.haputils import poller_utils



input_filename = sys.argv[1]

obs_info_dict, total_list = poller_utils.interpret_obset_input(input_filename)
out_pars_file = None
for total_item in total_list:

    total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,
                                                       use_defaults=True)
    for filter_item in total_item.fdp_list:
        filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,
                                                            use_defaults=True)
    for expo_item in total_item.edp_list:
        expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,
                                                          use_defaults=True)

# #
# print(" ")
# print(expo_list[0].configobj_pars.get_pars("alignment"))
#

# rv = hapsequencer.run_hap_processing(input_filename)
pdb.set_trace()