def main(): """Super simple testing interface for the catalog_utils code.""" parser = argparse.ArgumentParser(description='test interface for sourcelist_generation') parser.add_argument('input_file', help="input filename (ends with '.out'") parser.add_argument('-d', '--debug', required=False, choices=['True', 'False'], default='False', help='debug mode on? (generate region files?)') parser.add_argument('-m', '--phot_mode', required=False, choices=['aperture', 'segment', 'both'], default='both', help="which photometry mode should be run? 'aperture' for aperture only; 'seg' for segment only, and 'both' for both aperture and segment photometry.") args = parser.parse_args() if args.debug == "True": args.debug = True else: args.debug = False log.info("python {} {} -d {} -m {}".format(os.path.realpath(__file__), args.input_file, args.debug, args.phot_mode)) obs_info_dict, total_list = poller_utils.interpret_obset_input(args.input_file) out_pars_file = 'pars.json' for total_item in total_list: total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file,use_defaults=True) for filter_item in total_item.fdp_list: filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file,use_defaults=True) for expo_item in total_item.edp_list: expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file,use_defaults=True) starting_dt = datetime.datetime.now() log.info("Run start time: {}".format(str(starting_dt))) product_list = run_catalog_utils(total_list, args.debug, args.phot_mode) log.info('Total processing time: {} sec\a'.format((datetime.datetime.now() - starting_dt).total_seconds())) for item in product_list: print(item)
def run_hap_processing(input_filename, diagnostic_mode=False, use_defaults_configs=True, input_custom_pars_file=None, output_custom_pars_file=None, phot_mode="both", log_level=logutil.logging.INFO): """ Run the HST Advanced Products (HAP) generation code. This routine is the sequencer or controller which invokes the high-level functionality to process the single visit data. Parameters ---------- input_filename: string The 'poller file' where each line contains information regarding an exposures taken during a single visit. diagnostic_mode : bool, optional Allows printing of additional diagnostic information to the log. Also, can turn on creation and use of pickled information. use_defaults_configs: bool, optional If True, use the configuration parameters in the 'default' portion of the configuration JSON files. If False, use the configuration parameters in the "parameters" portion of the file. The default is True. input_custom_pars_file: string, optional Represents a fully specified input filename of a configuration JSON file which has been customized for specialized processing. This file should contain ALL the input parameters necessary for processing. If there is a filename present for this parameter, the 'use_defaults_configs' parameter is ignored. The default is None. output_custom_pars_file: string, optional Fully specified output filename which contains all the configuration parameters available during the processing session. The default is None. phot_mode : str, optional Which algorithm should be used to generate the sourcelists? 'aperture' for aperture photometry; 'segment' for segment map photometry; 'both' for both 'segment' and 'aperture'. Default value is 'both'. log_level : int, optional The desired level of verboseness in the log statements displayed on the screen and written to the .log file. Default value is 20, or 'info'. RETURNS ------- return_value: integer A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error """ # This routine needs to return an exit code, return_value, for use by the calling # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 log.setLevel(log_level) # Define trailer file (log file) that will contain the log entries for all processing if isinstance(input_filename, str): # input file is a poller file -- easy case logname = input_filename.replace('.out', '.log') else: logname = 'svm_process.log' # Initialize total trailer filename as temp logname logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) # start processing starting_dt = datetime.datetime.now() log.info("Run start time: {}".format(str(starting_dt))) total_list = [] product_list = [] try: # Parse the poller file and generate the the obs_info_dict, as well as the total detection # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects # A poller file contains visit data for a single instrument. The TotalProduct discriminant # is the detector. A TotalProduct object is comprised of FilterProducts and ExposureProducts # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct # is the atomic exposure data. log.info( "Parse the poller and determine what exposures need to be combined into separate products.\n" ) obs_info_dict, total_list = poller_utils.interpret_obset_input( input_filename, log_level) # Generate the name for the manifest file which is for the entire visit. It is fine # to use only one of the Total Products to generate the manifest name as the name is not # dependent on the detector. # Example: instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt) manifest_name = total_list[0].manifest_name log.info("\nGenerate the manifest name for this visit.") log.info( "The manifest will contain the names of all the output products.") # The product_list is a list of all the output products which will be put into the manifest file product_list = [] # Update all of the product objects with their associated configuration information. for total_item in total_list: log.info( "Preparing configuration parameter values for total product {}" .format(total_item.drizzle_filename)) total_item.configobj_pars = config_utils.HapConfig( total_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for filter_item in total_item.fdp_list: log.info( "Preparing configuration parameter values for filter product {}" .format(filter_item.drizzle_filename)) filter_item.configobj_pars = config_utils.HapConfig( filter_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for expo_item in total_item.edp_list: log.info( "Preparing configuration parameter values for exposure product {}" .format(expo_item.drizzle_filename)) expo_item.configobj_pars = config_utils.HapConfig( expo_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) log.info( "The configuration parameters have been read and applied to the drizzle objects." ) # Run align.py on images on a filter-by-filter basis. # Process each filter object which contains a list of exposure objects/products. log.info("\n{}: Align the images on a filter-by-filter basis.".format( str(datetime.datetime.now()))) for tot_obj in total_list: for filt_obj in tot_obj.fdp_list: align_table, filt_exposures = filt_obj.align_to_gaia( output=diagnostic_mode) # Report results and track the output files if align_table: log.info("ALIGN_TABLE: {}".format( align_table.filtered_table)) for row in align_table.filtered_table: log.info(row['status']) if row['status'] == 0: log.info( "Successfully aligned {} to {} astrometric frame\n" .format(row['imageName'], row['catalog'])) # Alignment did not work for this particular image # If alignment did not work for an image, image still has WCS so continue processing. else: log.info( "Could not align {} to absolute astrometric frame\n" .format(row['imageName'])) hdrlet_list = align_table.filtered_table[ 'headerletFile'].tolist() product_list += hdrlet_list product_list += filt_exposures # Remove reference catalogs created for alignment of each filter product for catalog_name in align_table.reference_catalogs: log.info( "Looking to clean up reference catalog: {}".format( catalog_name)) if os.path.exists(catalog_name): os.remove(catalog_name) else: log.warning( "Step to align the images has failed. No alignment table has been generated." ) # Run AstroDrizzle to produce drizzle-combined products log.info("\n{}: Create drizzled imagery products.".format( str(datetime.datetime.now()))) driz_list = create_drizzle_products(total_list) product_list += driz_list # Create source catalogs from newly defined products (HLA-204) log.info( "{}: Create source catalog from newly defined product.\n".format( str(datetime.datetime.now()))) if "total detection product 00" in obs_info_dict.keys(): catalog_list = create_catalog_products( total_list, log_level, diagnostic_mode=diagnostic_mode, phot_mode=phot_mode) product_list += catalog_list else: log.warning( "No total detection product has been produced. The sourcelist generation step has been skipped" ) """ # 8: (OPTIONAL) Determine whether there are any problems with alignment or photometry of product log.info("8: (TODO) (OPTIONAL) Determine whether there are any problems with alignment or photometry" "of product") # TODO: QUALITY CONTROL SUBROUTINE CALL GOES HERE. """ # 9: Compare results to HLA classic counterparts (if possible) if diagnostic_mode: run_sourcelist_comparision(total_list, log_level=log_level) # Write out manifest file listing all products generated during processing log.info("Creating manifest file {}.".format(manifest_name)) log.info( " The manifest contains the names of products generated during processing." ) with open(manifest_name, mode='w') as catfile: [catfile.write("{}\n".format(name)) for name in product_list] # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 except Exception: return_value = 1 print("\a\a\a") exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) logging.exception("message") finally: end_dt = datetime.datetime.now() log.info('Processing completed at {}'.format(str(end_dt))) log.info('Total processing time: {} sec'.format( (end_dt - starting_dt).total_seconds())) log.info( "Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error " ) log.info("Return condition {}".format(return_value)) logging.shutdown() # Append total trailer file (from astrodrizzle) to all total log files if total_list: for tot_obj in total_list: proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False) # Now remove single temp log file if os.path.exists(logname): os.remove(logname) else: print( "Master log file not found. Please check logs to locate processing messages." ) return return_value
def run_hap_processing(input_filename, diagnostic_mode=False, use_defaults_configs=True, input_custom_pars_file=None, output_custom_pars_file=None, phot_mode="both", log_level=logutil.logging.INFO): """ Run the HST Advanced Products (HAP) generation code. This routine is the sequencer or controller which invokes the high-level functionality to process the single visit data. Parameters ---------- input_filename: string The 'poller file' where each line contains information regarding an exposures taken during a single visit. diagnostic_mode : bool, optional Allows printing of additional diagnostic information to the log. Also, can turn on creation and use of pickled information. use_defaults_configs: bool, optional If True, use the configuration parameters in the 'default' portion of the configuration JSON files. If False, use the configuration parameters in the "parameters" portion of the file. The default is True. input_custom_pars_file: string, optional Represents a fully specified input filename of a configuration JSON file which has been customized for specialized processing. This file should contain ALL the input parameters necessary for processing. If there is a filename present for this parameter, the 'use_defaults_configs' parameter is ignored. The default is None. output_custom_pars_file: string, optional Fully specified output filename which contains all the configuration parameters available during the processing session. The default is None. phot_mode : str, optional Which algorithm should be used to generate the sourcelists? 'aperture' for aperture photometry; 'segment' for segment map photometry; 'both' for both 'segment' and 'aperture'. Default value is 'both'. log_level : int, optional The desired level of verboseness in the log statements displayed on the screen and written to the .log file. Default value is 20, or 'info'. RETURNS ------- return_value: integer A return exit code used by the calling Condor/OWL workflow code: 0 (zero) for success, 1 for error """ # This routine needs to return an exit code, return_value, for use by the calling # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 log.setLevel(log_level) # Define trailer file (log file) that will contain the log entries for all processing if isinstance(input_filename, str): # input file is a poller file -- easy case logname = input_filename.replace('.out', '.log') else: logname = 'svm_process.log' # Initialize total trailer filename as temp logname logging.basicConfig(filename=logname, format=SPLUNK_MSG_FORMAT, datefmt=MSG_DATEFMT) # start processing starting_dt = datetime.datetime.now() log.info("Run start time: {}".format(str(starting_dt))) total_obj_list = [] product_list = [] try: # Parse the poller file and generate the the obs_info_dict, as well as the total detection # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects # A poller file contains visit data for a single instrument. The TotalProduct discriminant # is the detector. A TotalProduct object is comprised of FilterProducts and ExposureProducts # where its FilterProduct is distinguished by the filter in use, and the ExposureProduct # is the atomic exposure data. log.info( "Parse the poller and determine what exposures need to be combined into separate products.\n" ) obs_info_dict, total_obj_list = poller_utils.interpret_obset_input( input_filename, log_level) # Generate the name for the manifest file which is for the entire visit. It is fine # to use only one of the Total Products to generate the manifest name as the name is not # dependent on the detector. # Example: instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt) manifest_name = total_obj_list[0].manifest_name log.info("\nGenerate the manifest name for this visit.") log.info( "The manifest will contain the names of all the output products.") # The product_list is a list of all the output products which will be put into the manifest file product_list = [] # Update all of the product objects with their associated configuration information. for total_item in total_obj_list: log.info( "Preparing configuration parameter values for total product {}" .format(total_item.drizzle_filename)) total_item.configobj_pars = config_utils.HapConfig( total_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for filter_item in total_item.fdp_list: log.info( "Preparing configuration parameter values for filter product {}" .format(filter_item.drizzle_filename)) filter_item.configobj_pars = config_utils.HapConfig( filter_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for expo_item in total_item.edp_list: log.info( "Preparing configuration parameter values for exposure product {}" .format(expo_item.drizzle_filename)) expo_item.configobj_pars = config_utils.HapConfig( expo_item, log_level=log_level, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) expo_item = poller_utils.add_primary_fits_header_as_attr( expo_item, log_level) log.info( "The configuration parameters have been read and applied to the drizzle objects." ) reference_catalog = run_align_to_gaia(total_obj_list, log_level=log_level, diagnostic_mode=diagnostic_mode) if reference_catalog: product_list += [reference_catalog] # Run AstroDrizzle to produce drizzle-combined products log.info("\n{}: Create drizzled imagery products.".format( str(datetime.datetime.now()))) driz_list = create_drizzle_products(total_obj_list) product_list += driz_list # Create source catalogs from newly defined products (HLA-204) log.info( "{}: Create source catalog from newly defined product.\n".format( str(datetime.datetime.now()))) if "total detection product 00" in obs_info_dict.keys(): catalog_list = create_catalog_products( total_obj_list, log_level, diagnostic_mode=diagnostic_mode, phot_mode=phot_mode) product_list += catalog_list else: log.warning( "No total detection product has been produced. The sourcelist generation step has been skipped" ) # Store total_obj_list to a pickle file to speed up development if False: pickle_filename = "total_obj_list_full.pickle" if os.path.exists(pickle_filename): os.remove(pickle_filename) pickle_out = open(pickle_filename, "wb") pickle.dump(total_obj_list, pickle_out) pickle_out.close() log.info( "Successfully wrote total_obj_list to pickle file {}!".format( pickle_filename)) # Quality assurance portion of the processing - done only if the environment # variable, SVM_QUALITY_TESTING, is set to 'on', 'yes', or 'true'. qa_switch = _get_envvar_switch(envvar_qa_svm) # If requested, generate quality assessment statistics for the SVM products if qa_switch: log.info( "SVM Quality Assurance statistics have been requested for this dataset, {}." .format(input_filename)) # Number of sources in Point and Segment catalogs total_catalog_list = [i for i in catalog_list if 'total' in i] fits_list = [i for i in driz_list if 'fits' in i] total_drizzle_list = [i for i in fits_list if 'total' in i] svm_qa.compare_num_sources(total_catalog_list, total_drizzle_list, log_level=log_level) # Get point/segment cross-match RA/Dec statistics for total_obj in total_obj_list: for filter_obj in total_obj.fdp_list: svm_qa.compare_ra_dec_crossmatches(filter_obj, log_level=log_level) # Identify the number of GAIA sources in final product footprints for total_obj in total_obj_list: svm_qa.find_gaia_sources(total_obj, log_level=log_level) for filter_obj in total_obj.fdp_list: svm_qa.find_gaia_sources(filter_obj, log_level=log_level) for exp_obj in filter_obj.edp_list: svm_qa.find_gaia_sources(exp_obj, log_level=log_level) # Photometry of cross-matched sources in Point and Segment catalogs for Filter products tot_len = len(total_obj_list) filter_drizzle_list = [] temp_list = [] for tot in total_obj_list: temp_list = [x.drizzle_filename for x in tot.fdp_list] filter_drizzle_list.extend(temp_list) svm_qa.compare_photometry(filter_drizzle_list, log_level=log_level) # 9: Compare results to HLA classic counterparts (if possible) if diagnostic_mode: run_sourcelist_comparision(total_obj_list, diagnostic_mode=diagnostic_mode, log_level=log_level) # Write out manifest file listing all products generated during processing log.info("Creating manifest file {}.".format(manifest_name)) log.info( " The manifest contains the names of products generated during processing." ) with open(manifest_name, mode='w') as catfile: [catfile.write("{}\n".format(name)) for name in product_list] # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 except Exception: return_value = 1 print("\a\a\a") exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) logging.exception("message") finally: end_dt = datetime.datetime.now() log.info('Processing completed at {}'.format(str(end_dt))) log.info('Total processing time: {} sec'.format( (end_dt - starting_dt).total_seconds())) log.info( "Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error " ) log.info("Return condition {}".format(return_value)) logging.shutdown() # Append total trailer file (from astrodrizzle) to all total log files if total_obj_list: for tot_obj in total_obj_list: proc_utils.append_trl_file(tot_obj.trl_filename, logname, clean=False) # Now remove single temp log file if os.path.exists(logname): os.remove(logname) else: print( "Master log file not found. Please check logs to locate processing messages." ) return return_value
#!/usr/bin/env python import pdb import sys import drizzlepac from drizzlepac import hapsequencer from drizzlepac.hlautils import config_utils from drizzlepac.hlautils import poller_utils # input_filename = sys.argv[1] obs_info_dict, total_list = poller_utils.interpret_obset_input('j92c01.out') product_list = total_list.copy() for total_item in total_list: product_list += total_item.fdp_list product_list += total_item.edp_list param_filename = "superparamfile.json" for item in product_list: item.pars = config_utils.HapConfig(item, output_custom_pars_file=param_filename, use_defaults=False) # print(" ") print(product_list[1].pars.get_pars("alignment")) # rv = hapsequencer.run_hla_processing(input_filename) # pdb.set_trace()
#!/usr/bin/env python import pdb import sys import drizzlepac from drizzlepac import hapsequencer from drizzlepac.hlautils import config_utils from drizzlepac.hlautils import poller_utils input_filename = sys.argv[1] obs_info_dict, total_list = poller_utils.interpret_obset_input(input_filename) out_pars_file = None for total_item in total_list: total_item.configobj_pars = config_utils.HapConfig( total_item, output_custom_pars_file=out_pars_file, use_defaults=True) for filter_item in total_item.fdp_list: filter_item.configobj_pars = config_utils.HapConfig( filter_item, output_custom_pars_file=out_pars_file, use_defaults=True) for expo_item in total_item.edp_list: expo_item.configobj_pars = config_utils.HapConfig( expo_item, output_custom_pars_file=out_pars_file, use_defaults=True) # # # print(" ") # print(expo_list[0].configobj_pars.get_pars("alignment"))
def run_hla_processing(input_filename, result=None, debug=False, use_defaults_configs=True, input_custom_pars_file=None, output_custom_pars_file=None, phot_mode='both'): # This routine needs to return an exit code, return_value, for use by the calling # Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 starting_dt = datetime.datetime.now() log.info("Run start time: {}".format(str(starting_dt))) product_list = [] try: # Parse the poller file and generate the the obs_info_dict, as well as the total detection # product lists which contain the ExposureProduct, FilterProduct, and TotalProduct objects log.info( "Parse the poller and determine what exposures need to be combined into separate products" ) obs_info_dict, total_list = poller_utils.interpret_obset_input( input_filename) # Generate the name for the manifest file which is for the entire visit. It is fine # to use only one of the Total Products to generate the manifest name as it is not # dependent on the detector. # instrument_programID_obsetID_manifest.txt (e.g.,wfc3_b46_06_manifest.txt) manifest_name = total_list[0].manifest_name # Set up all pipeline parameter values that will be used later on in the run. for total_item in total_list: total_item.pars = config_utils.HapConfig( total_item, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for filter_item in total_item.fdp_list: filter_item.pars = config_utils.HapConfig( filter_item, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) for expo_item in total_item.edp_list: expo_item.pars = config_utils.HapConfig( expo_item, use_defaults=use_defaults_configs, input_custom_pars_file=input_custom_pars_file, output_custom_pars_file=output_custom_pars_file) # Run alignimages.py on images on a filter-by-filter basis. # Process each filter object which contains a list of exposure objects/products, # regardless of detector. log.info("Run alignimages.py on images on a filter-by-filter basis.") exposure_filenames = [] for tot_obj in total_list: for filt_obj in tot_obj.fdp_list: align_table, filt_exposures = filt_obj.align_to_gaia() # Report results and track the output files # FIX - Add info here in the case of alignment working on data that should not be aligned # as well as outright failure (exception vs msgs) if align_table: log.info("ALIGN_TABLE: {}".format(align_table)) # FIX # os.remove("alignimages.log") # FIX This log needs to be included in total product trailer file for row in align_table: if row['status'] == 0: log.info( "Successfully aligned {} to {} astrometric frame\n" .format(row['imageName'], row['catalog'])) # Alignment did not work for this particular image # FIX - If alignment did not work for an image, it seems this exposure should # be removed from the exposure lists. TotalProduct and FilterProduct need # methods to do this. else: log.info( "Could not align {} to absolute astrometric frame\n" .format(row['imageName'])) hdrlet_list = align_table['headerletFile'].tolist() product_list += hdrlet_list exposure_filenames += filt_exposures else: log.info("Alignimages step skipped.") # Run meta wcs code to get common WCS for all images in this obset_id, regardless of detector. # FIX (1) Intended for this to be a method of TotalProduct, but it should be # associated with all the exposures really used in the alignment (the "as built") # as is done here. # This function used based upon WH analysis but make sure to set # the size of the output image. This comment is related to the previously mentioned issue. # This produced incompatible results. Perhaps accessing wrong dimension information. """ log.info("Run make_mosaic_wcs to create a common WCS for all images aligned in the previous step.") log.info("The following images will be used: ") for imgname in exposure_filenames: log.info("{}".format(imgname)) if exposure_filenames: meta_wcs = wcs_functions.make_mosaic_wcs(exposure_filenames) """ # Not using meta_wcs at this time meta_wcs = [] # Run AstroDrizzle to produce drizzle-combined products log.info("Create drizzled imagery products") driz_list = create_drizzle_products(obs_info_dict, total_list, meta_wcs) product_list += driz_list # Create source catalogs from newly defined products (HLA-204) log.info("Create source catalog from newly defined product") if 'total detection product 00' in obs_info_dict.keys(): catalog_list = create_catalog_products(total_list, debug=debug, phot_mode=phot_mode) product_list += catalog_list else: print("Sourcelist generation step skipped.") """ # 8: (OPTIONAL) Determine whether there are any problems with alignment or photometry of product log.info("8: (TODO) (OPTIONAL) Determine whether there are any problems with alignment or photometry" "of product") # TODO: QUALITY CONTROL SUBROUTINE CALL GOES HERE. """ # Write out manifest file listing all products generated during processing log.info("Creating manifest file {}".format(manifest_name)) log.info( " Manifest contains the names of products generated during processing." ) with open(manifest_name, mode='w') as catfile: [catfile.write("{}\n".format(name)) for name in product_list] # 10: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error condition return_value = 0 except Exception: return_value = 1 if debug: log.info("\a\a\a") exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stdout) finally: log.info('Total processing time: {} sec'.format( (datetime.datetime.now() - starting_dt).total_seconds())) log.info( "9: Return exit code for use by calling Condor/OWL workflow code: 0 (zero) for success, 1 for error " "condition") return return_value
def run_hla_flag_filter(): from drizzlepac.hlautils import hla_flag_filter # + + + + + + + + + + + + + + + + + + + + + + + + + + + + # All below lines are to get it working, not actual final code. out_file = glob.glob("??????.out")[0] # out_file = "j92c01.out" # acs_10265_01 # #out_file = "j9es06.out" # acs_10595_06 # Get parameter values if os.getcwd().endswith("orig"): sys.exit("Don't run in the orig dir! YOU'LL RUIN EVERYTHING!") for cmd in ['rm -f *.*', 'cp orig/* .']: print(cmd) os.system(cmd) obs_info_dict, total_list = poller_utils.interpret_obset_input(out_file) out_pars_file = "pars.json" for total_item in total_list: total_item.configobj_pars = config_utils.HapConfig(total_item, output_custom_pars_file=out_pars_file, use_defaults=True) for filter_item in total_item.fdp_list: filter_item.configobj_pars = config_utils.HapConfig(filter_item, output_custom_pars_file=out_pars_file, use_defaults=True) for expo_item in total_item.edp_list: expo_item.configobj_pars = config_utils.HapConfig(expo_item, output_custom_pars_file=out_pars_file, use_defaults=True) # * * * * hla_flag_filter.run_source_list_flagging inputs for HLA Classic test run* * * * if out_file == "j92c01.out": # acs_10265_01 # settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10265_01/flag_testing/hla mode = "dao" drizzled_image = "hst_10265_01_acs_wfc_f606w_drz.fits" flt_list = ["j92c01b4q_flc.fits", "j92c01b5q_flc.fits", "j92c01b7q_flc.fits", "j92c01b9q_flc.fits"] param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict() param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 # force it to use the value from HLA classic param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic exptime = 5060.0 catalog_name = "hst_10265_01_acs_wfc_f606w_{}phot.txt".format(mode) catalog_data = Table.read(catalog_name, format='ascii') proc_type = "{}phot".format(mode) drz_root_dir = os.getcwd() # for filt_key in filter_sorted_flt_dict.keys(): flt_list = filter_sorted_flt_dict[filt_key] # os.remove("hst_10265_01_acs_wfc_f606w_msk.fits") # from devutils import make_mask_file # make_mask_file.make_mask_file_old(all_drizzled_filelist[0].replace("drz.fits","wht.fits")) comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig/hst_10265_01_acs_wfc_f606w_{}phot_orig.txt hst_10265_01_acs_wfc_f606w_{}phot.txt -i hst_10265_01_acs_wfc_f606w_drz.fits hst_10265_01_acs_wfc_f606w_drz.fits -m absolute -p none".format(mode,mode) if out_file == "j9es06.out": # acs_10595_06 # settings for testing ~/Documents/HLAtransition/runhlaprocessing_testing/acs_10595_06_flag_testing/ mode = "sex" drizzled_image = "hst_10595_06_acs_wfc_f435w_drz.fits" flt_list = ["j9es06rbq_flc.fits", "j9es06rcq_flc.fits", "j9es06req_flc.fits", "j9es06rgq_flc.fits"] param_dict = total_list[0].fdp_list[0].configobj_pars.as_single_giant_dict() param_dict['quality control']['ci filter']['sourcex_bthresh'] = 5.0 #force it to use the value from HLA classic param_dict['quality control']['ci filter']['dao_bthresh'] = 5.0 # force it to use the value from HLA classic exptime = 710.0 catalog_data = Table.read(catalog_name, format='ascii') catalog_data = Table.read(dict_newTAB_matched2drz[all_drizzled_filelist[0]], format='ascii') proc_type = "{}phot".format(mode) drz_root_dir = os.getcwd() # os.remove("hst_10595_06_acs_wfc_f435w_msk.fits") # from devutils import make_mask_file # make_mask_file.make_mask_file("hst_10595_06_acs_wfc_f435w_wht.fits") comp_cmd = "python /Users/dulude/Documents/Code/HLATransition/drizzlepac/drizzlepac/devutils/comparison_tools/compare_sourcelists.py orig_cats/hst_10595_06_acs_wfc_f435w_{}phot.txt hst_10595_06_acs_wfc_f435w_{}phot.txt -i hst_10595_06_acs_wfc_f435w_drz.fits hst_10595_06_acs_wfc_f435w_drz.fits -m absolute -p none".format(mode,mode) # + + + + + + + + + + + + + + + + + + + + + + + + + + + + # Execute hla_flag_filter.run_source_list_flaging catalog_data = hla_flag_filter.run_source_list_flaging(drizzled_image, flt_list, param_dict, exptime, catalog_name, catalog_data, proc_type, drz_root_dir, debug = True) catalog_data.write(catalog_name, delimiter=",",format='ascii',overwrite=True) print("Wrote {}".format(catalog_name)) try: os.system(comp_cmd) except: print("skipping automatic comparision run")