def check_output_is_zero(output_hdul): """ This test is a simple subtraction of the input file minus a copy of the input file (instead of the actual MSA imprint file. The output is expected to be zero. :param output_hdul: list :return: result: boolean """ # output_hdul = hdul, step_output_file, step_input_file, run_pytests step_input_file = output_hdul[2] step_output_file = output_hdul[1] # Only run test if data is IFU or MSA inhdu = core_utils.read_hdrfits(step_input_file, info=False, show_hdr=False) if core_utils.check_IFU_true(inhdu) or core_utils.check_MOS_true(inhdu): # set specifics for the test msa_imprint_structure = copy.deepcopy(step_input_file) result_to_check = step_output_file.replace(".fits", "_zerotest.fits") # run the step with the specifics stp = ImprintStep() res = stp.call(step_input_file, msa_imprint_structure) res.save(result_to_check) # check that the end product of image - image is zero c = fits.getdata(result_to_check) subtraction = sum(c.flatten()) result = False if subtraction == 0.0: result = True # erase test output file subprocess.run(["rm", result_to_check]) return result
def output_hdul(set_inandout_filenames, config): # determine if the pipeline is to be run in full, per steps, or skipped run_calwebb_spec2 = config.get("run_calwebb_spec2_in_full", "run_calwebb_spec2") if run_calwebb_spec2 == "skip": print( '\n * PTT finished processing run_calwebb_spec2 is set to skip. \n' ) pytest.exit( "Skipping pipeline run and tests for spec2, run_calwebb_spec2 is set to skip in PTT_config file." ) elif "T" in run_calwebb_spec2: run_calwebb_spec2 = True else: run_calwebb_spec2 = False # get the general info set_inandout_filenames_info = core_utils.read_info4outputhdul( config, set_inandout_filenames) step, txt_name, step_input_file, step_output_file, outstep_file_suffix = set_inandout_filenames_info run_pipe_step = config.getboolean("run_pipe_steps", step) # determine which tests are to be run extract_2d_completion_tests = config.getboolean( "run_pytest", "_".join((step, "completion", "tests"))) extract_2d_validation_tests = config.getboolean( "run_pytest", "_".join((step, "validation", "tests"))) assign_wcs_validation_tests = config.getboolean( "run_pytest", "_".join((step, "validation", "tests"))) run_pytests = [ extract_2d_completion_tests, extract_2d_validation_tests, assign_wcs_validation_tests ] # get other relevant info from PTT config file compare_assign_wcs_and_extract_2d_with_esa = config.getboolean( "benchmark_intermediary_products", "compare_assign_wcs_and_extract_2d_with_esa") esa_files_path = config.get("benchmark_intermediary_products", "esa_files_path") data_directory = config.get("calwebb_spec2_input_file", "data_directory") truth_file = os.path.join( data_directory, config.get("benchmark_intermediary_products", "truth_file_assign_wcs")) if compare_assign_wcs_and_extract_2d_with_esa: truth_file = esa_files_path print("Will use this 'truth' file to compare result of extract_2d: ") print(truth_file) msa_conf_name = config.get("benchmark_intermediary_products", "msa_conf_name") extract_2d_threshold_diff = int( config.get("additional_arguments", "extract_2d_threshold_diff")) # Check if the mode used is MOS_sim and get the threshold for the assign_wcs test mode_used = config.get("calwebb_spec2_input_file", "mode_used").lower() wcs_threshold_diff = config.get("additional_arguments", "wcs_threshold_diff") save_wcs_plots = config.getboolean("additional_arguments", "save_wcs_plots") # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran step_completed = False end_time = '0.0' # only do this step if data is NOT IFU output_directory = config.get("calwebb_spec2_input_file", "output_directory") initial_input_file = config.get("calwebb_spec2_input_file", "input_file") initial_input_file = os.path.join(output_directory, initial_input_file) if os.path.isfile(initial_input_file): inhdu = core_utils.read_hdrfits(initial_input_file, info=False, show_hdr=False) detector = fits.getval(initial_input_file, "DETECTOR", 0) else: pytest.skip( "Skipping " + step + " because the initial input file given in PTT_config.cfg does not exist." ) if not core_utils.check_IFU_true(inhdu): if run_calwebb_spec2: hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) return hdul, step_output_file, msa_conf_name, truth_file, run_pytests, mode_used, wcs_threshold_diff, \ save_wcs_plots, extract_2d_threshold_diff, compare_assign_wcs_and_extract_2d_with_esa else: if run_pipe_step: # Create the logfile for PTT, but erase the previous one if it exists PTTcalspec2_log = os.path.join( output_directory, 'PTT_calspec2_' + detector + '_' + step + '.log') if os.path.isfile(PTTcalspec2_log): os.remove(PTTcalspec2_log) print( "Information outputed to screen from PTT will be logged in file: ", PTTcalspec2_log) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(filename=PTTcalspec2_log, level=logging.INFO) # print pipeline version import jwst pipeline_version = "\n *** Using jwst pipeline version: " + jwst.__version__ + " *** \n" print(pipeline_version) logging.info(pipeline_version) if os.path.isfile(step_input_file): msg = " The input file " + step_input_file + " exists... will run step " + step print(msg) logging.info(msg) stp = Extract2dStep() # check that previous pipeline steps were run up to this point core_utils.check_completed_steps(step, step_input_file) # get the right configuration files to run the step local_pipe_cfg_path = config.get( "calwebb_spec2_input_file", "local_pipe_cfg_path") # start the timer to compute the step running time start_time = time.time() if local_pipe_cfg_path == "pipe_source_tree_code": result = stp.call(step_input_file) else: result = stp.call(step_input_file, config_file=local_pipe_cfg_path + '/extract_2d.cfg') result.save(step_output_file) step_completed = True hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) # end the timer to compute the step running time end_time = repr(time.time() - start_time) # this is in seconds msg = "Step " + step + " took " + end_time + " seconds to finish" print(msg) logging.info(msg) # rename and move the pipeline log file pipelog = "pipeline_" + detector + ".log" try: calspec2_pilelog = "calspec2_pipeline_" + step + "_" + detector + ".log" pytest_workdir = TESTSDIR logfile = glob(pytest_workdir + "/" + pipelog)[0] os.rename( logfile, os.path.join(output_directory, calspec2_pilelog)) except IndexError: print( "\n* WARNING: Something went wrong. Could not find a ", pipelog, " file \n") # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) return hdul, step_output_file, msa_conf_name, truth_file, run_pytests, mode_used, \ wcs_threshold_diff, save_wcs_plots, extract_2d_threshold_diff, compare_assign_wcs_and_extract_2d_with_esa else: msg = " The input file does not exist. Skipping step." print(msg) logging.info(msg) core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Skiping " + step + " because the input file does not exist.") else: msg = "Skipping running pipeline step " + step print(msg) logging.info(msg) end_time = core_utils.get_stp_run_time_from_screenfile( step, detector, output_directory) if os.path.isfile(step_output_file): hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) step_completed = True # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) return hdul, step_output_file, msa_conf_name, truth_file, run_pytests, mode_used, \ wcs_threshold_diff, save_wcs_plots, extract_2d_threshold_diff, compare_assign_wcs_and_extract_2d_with_esa else: step_completed = False # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Test skipped because input file " + step_output_file + " does not exist.") else: core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Skipping " + step + " because data is IFU.")
def output_hdul(set_inandout_filenames, config): # determine if the pipeline is to be run in full, per steps, or skipped run_calwebb_spec2 = config.get("run_calwebb_spec2_in_full", "run_calwebb_spec2") if run_calwebb_spec2 == "skip": print( '\n * PTT finished processing run_calwebb_spec2 is set to skip. \n' ) pytest.exit( "Skipping pipeline run and tests for spec2, run_calwebb_spec2 is set to skip in PTT_config file." ) elif "T" in run_calwebb_spec2: run_calwebb_spec2 = True else: run_calwebb_spec2 = False # get the general info set_inandout_filenames_info = core_utils.read_info4outputhdul( config, set_inandout_filenames) step, txt_name, step_input_file, step_output_file, outstep_file_suffix = set_inandout_filenames_info run_pipe_step = config.getboolean("run_pipe_steps", step) # determine which tests are to be run cube_build_completion_tests = config.getboolean( "run_pytest", "_".join((step, "completion", "tests"))) #cube_build_reffile_tests = config.getboolean("run_pytest", "_".join((step, "reffile", "tests"))) #cube_build_validation_tests = config.getboolean("run_pytest", "_".join((step, "validation", "tests"))) run_pytests = [cube_build_completion_tests ] #, cube_build_reffile_tests, cube_build_validation_tests] # Only run step if data is IFU output_directory = config.get("calwebb_spec2_input_file", "output_directory") initial_input_file = config.get("calwebb_spec2_input_file", "input_file") initial_input_file = os.path.join(output_directory, initial_input_file) if os.path.isfile(initial_input_file): inhdu = core_utils.read_hdrfits(initial_input_file, info=False, show_hdr=False) detector = fits.getval(initial_input_file, "DETECTOR", 0) filt = fits.getval(initial_input_file, 'filter') grat = fits.getval(initial_input_file, 'grating') gratfilt = grat + "-" + filt + "_s3d" else: pytest.skip( "Skipping " + step + " because the initial input file given in PTT_config.cfg does not exist." ) end_time = '0.0' if core_utils.check_IFU_true(inhdu): # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran step_completed = False # check if the filter is to be changed change_filter_opaque = config.getboolean("calwebb_spec2_input_file", "change_filter_opaque") if change_filter_opaque: is_filter_opaque, step_input_filename = change_filter_opaque2science.change_filter_opaque( step_input_file, step=step) if is_filter_opaque: filter_opaque_msg = "With FILTER=OPAQUE, the calwebb_spec2 will run up to the extract_2d step. " \ "Cube build pytest now set to Skip." print(filter_opaque_msg) core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Skipping " + step + " because FILTER=OPAQUE.") if run_calwebb_spec2: hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) return hdul, step_output_file, run_pytests else: if run_pipe_step: # Create the logfile for PTT, but erase the previous one if it exists PTTcalspec2_log = os.path.join( output_directory, 'PTT_calspec2_' + detector + '_' + step + '.log') if os.path.isfile(PTTcalspec2_log): os.remove(PTTcalspec2_log) print( "Information outputed to screen from PTT will be logged in file: ", PTTcalspec2_log) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(filename=PTTcalspec2_log, level=logging.INFO) # print pipeline version import jwst pipeline_version = "\n *** Using jwst pipeline version: " + jwst.__version__ + " *** \n" print(pipeline_version) logging.info(pipeline_version) if change_filter_opaque: logging.info(filter_opaque_msg) if os.path.isfile(step_input_file): msg = " *** Step " + step + " set to True" print(msg) logging.info(msg) stp = CubeBuildStep() # check that previous pipeline steps were run up to this point core_utils.check_completed_steps(step, step_input_file) # get the right configuration files to run the step local_pipe_cfg_path = config.get( "calwebb_spec2_input_file", "local_pipe_cfg_path") # start the timer to compute the step running time start_time = time.time() if local_pipe_cfg_path == "pipe_source_tree_code": result = stp.call(step_input_file) else: result = stp.call(step_input_file, config_file=local_pipe_cfg_path + '/cube_build.cfg') result.save(step_output_file) # end the timer to compute the step running time end_time = repr(time.time() - start_time) # this is in seconds msg = "Step " + step + " took " + end_time + " seconds to finish" print(msg) logging.info(msg) # determine the specific output of the cube step specific_output_file = glob( step_output_file.replace( 'cube.fits', (gratfilt + '*.fits').lower()))[0] cube_suffix = specific_output_file.split( 'cube_build_')[-1].replace('.fits', '') # record info step_completed = True hdul = core_utils.read_hdrfits(specific_output_file, info=False, show_hdr=False) # rename and move the pipeline log file pipelog = "pipeline_" + detector + ".log" try: calspec2_pilelog = "calspec2_pipeline_" + step + "_" + detector + ".log" pytest_workdir = TESTSDIR logfile = glob(pytest_workdir + "/" + pipelog)[0] os.rename( logfile, os.path.join(output_directory, calspec2_pilelog)) except IndexError: print( "\n* WARNING: Something went wrong. Could not find a ", pipelog, " file \n") # add the running time for this step core_utils.add_completed_steps(txt_name, step, "_" + cube_suffix, step_completed, end_time) return hdul, step_output_file, run_pytests else: msg = " The input file does not exist. Skipping step." print(msg) logging.info(msg) core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Skipping " + step + " because the input file does not exist.") else: msg = "Skipping running pipeline step " + step print(msg) logging.info(msg) end_time = core_utils.get_stp_run_time_from_screenfile( step, detector, output_directory) # record info # specific cube step suffix cube_suffix = "_s3d" if os.path.isfile(step_output_file): hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) step_completed = True # add the running time for this step core_utils.add_completed_steps(txt_name, step, cube_suffix, step_completed, end_time) return hdul, step_output_file, run_pytests else: step_completed = False # add the running time for this step core_utils.add_completed_steps(txt_name, step, cube_suffix, step_completed, end_time) pytest.skip("Test skipped because input file " + step_output_file + " does not exist.") else: pytest.skip("Skipping " + step + " because data is not IFU.")
def output_hdul(set_inandout_filenames, config): # determine if the pipeline is to be run in full, per steps, or skipped run_calwebb_spec2 = config.get("run_calwebb_spec2_in_full", "run_calwebb_spec2") if run_calwebb_spec2 == "skip": print( '\n * PTT finished processing run_calwebb_spec2 is set to skip. \n' ) pytest.exit( "Skipping pipeline run and tests for spec2, run_calwebb_spec2 is set to skip in PTT_config file." ) elif "T" in run_calwebb_spec2: run_calwebb_spec2 = True else: run_calwebb_spec2 = False # get the general info set_inandout_filenames_info = core_utils.read_info4outputhdul( config, set_inandout_filenames) step, txt_name, step_input_file, step_output_file, outstep_file_suffix = set_inandout_filenames_info # determine which steps are to be run, if not run in full run_pipe_step = config.getboolean("run_pipe_steps", step) # determine which tests are to be run imprint_subtract_completion_tests = config.getboolean( "run_pytest", "_".join((step, "completion", "tests"))) imprint_subtract_numerical_tests = config.getboolean( "run_pytest", "_".join((step, "numerical", "tests"))) #imprint_subtract_validation_tests = config.getboolean("run_pytest", "_".join((step, "validation", "tests"))) run_pytests = [ imprint_subtract_completion_tests, imprint_subtract_numerical_tests ] #, imprint_subtract_validation_tests] end_time = '0.0' # Only run step if data is IFU or MSA output_directory = config.get("calwebb_spec2_input_file", "output_directory") initial_input_file = config.get("calwebb_spec2_input_file", "input_file") initial_input_file = os.path.join(output_directory, initial_input_file) detector = fits.getval(initial_input_file, "DETECTOR", 0) calspec2_pilelog = "calspec2_pipeline_" + step + "_" + detector + ".log" pytest_workdir = TESTSDIR if os.path.isfile(initial_input_file): inhdu = core_utils.read_hdrfits(initial_input_file, info=False, show_hdr=False) else: pytest.skip( "Skipping " + step + " because the initial input file given in PTT_config.cfg does not exist." ) if core_utils.check_IFU_true(inhdu) or core_utils.check_MOS_true(inhdu): # if run_calwebb_spec2 is True calwebb_spec2 will be called, else individual steps will be ran step_completed = False if run_calwebb_spec2: if os.path.isfile(step_output_file): hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) else: pytest.skip("Skipping " + step + " because the output file does not exist.") return hdul, step_output_file, step_input_file, run_pytests else: if run_pipe_step: # Create the logfile for PTT, but erase the previous one if it exists PTTcalspec2_log = os.path.join( output_directory, 'PTT_calspec2_' + detector + '_' + step + '.log') if os.path.isfile(PTTcalspec2_log): os.remove(PTTcalspec2_log) print( "Information outputed to screen from PTT will be logged in file: ", PTTcalspec2_log) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(filename=PTTcalspec2_log, level=logging.INFO) # print pipeline version import jwst pipeline_version = "\n *** Using jwst pipeline version: " + jwst.__version__ + " *** \n" print(pipeline_version) logging.info(pipeline_version) if os.path.isfile(step_input_file): msg = " The input file " + step_input_file + " exists... will run step " + step print(msg) logging.info(msg) msa_imprint_structure = config.get( "additional_arguments", "msa_imprint_structure") msg = "msa_imprint_structure file: " + msa_imprint_structure print(msg) logging.info(msg) if not os.path.isfile(msa_imprint_structure): print( " Need msa_imprint_structure file to continue. Step will be skipped." ) core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip( "Skipping " + step + " because msa_imprint_structure file in the configuration " "file does not exist.") else: msg = "*** Step " + step + " set to True" print(msg) logging.info(msg) stp = ImprintStep() # check that previous pipeline steps were run up to this point core_utils.check_completed_steps(step, step_input_file) # get the right configuration files to run the step local_pipe_cfg_path = config.get( "calwebb_spec2_input_file", "local_pipe_cfg_path") # start the timer to compute the step running time start_time = time.time() if local_pipe_cfg_path == "pipe_source_tree_code": result = stp.call(step_input_file, msa_imprint_structure) else: result = stp.call(step_input_file, msa_imprint_structure, config_file=local_pipe_cfg_path + '/imprint.cfg') if result is not None: result.save(step_output_file) # end the timer to compute the step running time end_time = repr(time.time() - start_time) # this is in seconds msg = "Step " + step + " took " + end_time + " seconds to finish" print(msg) logging.info(msg) hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) step_completed = True else: hdul = core_utils.read_hdrfits(step_input_file, info=False, show_hdr=False) # rename and move the pipeline log file pipelog = "pipeline_" + detector + ".log" try: calspec2_pilelog = "calspec2_pipeline_" + step + "_" + detector + ".log" pytest_workdir = TESTSDIR logfile = glob(pytest_workdir + "/" + pipelog)[0] os.rename( logfile, os.path.join(output_directory, calspec2_pilelog)) except IndexError: print( "\n* WARNING: Something went wrong. Could not find a ", pipelog, " file \n") # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) return hdul, step_output_file, step_input_file, run_pytests else: msg = " The input file does not exist. Skipping step." print(msg) logging.info(msg) core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Skipping " + step + " because the input file does not exist.") else: msg = "Skipping running pipeline step " + step print(msg) logging.info(msg) end_time = core_utils.get_stp_run_time_from_screenfile( step, detector, output_directory) if os.path.isfile(step_output_file): hdul = core_utils.read_hdrfits(step_output_file, info=False, show_hdr=False) step_completed = True # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) return hdul, step_output_file, step_input_file, run_pytests else: step_completed = False # add the running time for this step core_utils.add_completed_steps(txt_name, step, outstep_file_suffix, step_completed, end_time) pytest.skip("Test skipped because input file " + step_output_file + " does not exist.") else: pytest.skip("Skipping " + step + " because data is neither IFU or MOS.")
def validate_flat_field(output_hdul): hdu = output_hdul[0] step_output_file, msa_shutter_conf, dflat_path, sflat_path, fflat_path = output_hdul[ 2] flattest_threshold_diff, save_flattest_plot, write_flattest_files = output_hdul[ 3] # show the figures show_figs = False log_msgs = None # determine the testing mode do_fs_flattest = False if core_utils.check_FS_true(hdu) or core_utils.check_BOTS_true(hdu): do_fs_flattest = True elif 'bots' in step_output_file.lower() or 'fs' in step_output_file.lower( ): do_fs_flattest = True do_mos_flattest = False if core_utils.check_MOS_true(hdu) or 'mos' in step_output_file.lower(): do_mos_flattest = True do_ifu_flattest = False if core_utils.check_IFU_true(hdu) or 'ifu' in step_output_file.lower(): do_ifu_flattest = True # run the test if do_fs_flattest: median_diff, result_msg, log_msgs = flattest_fs.flattest( step_output_file, dflat_path=dflat_path, sflat_path=sflat_path, fflat_path=fflat_path, writefile=write_flattest_files, show_figs=show_figs, save_figs=save_flattest_plot, interpolated_flat=None, threshold_diff=flattest_threshold_diff, output_directory=None, debug=False) elif do_mos_flattest: median_diff, result_msg, log_msgs = flattest_mos.flattest( step_output_file, dflat_path=dflat_path, sflat_path=sflat_path, fflat_path=fflat_path, msa_shutter_conf=msa_shutter_conf, writefile=write_flattest_files, show_figs=show_figs, save_figs=save_flattest_plot, interpolated_flat=None, threshold_diff=flattest_threshold_diff, debug=False) elif do_ifu_flattest: median_diff, result_msg, log_msgs = flattest_ifu.flattest( step_output_file, dflat_path=dflat_path, sflat_path=sflat_path, fflat_path=fflat_path, writefile=write_flattest_files, mk_all_slices_plt=False, show_figs=show_figs, save_figs=save_flattest_plot, interpolated_flat=None, threshold_diff=flattest_threshold_diff, debug=False) else: pytest.skip( "Skipping pytest: The input fits file is not FS, MOS, or IFU. This tool does not yet include the " "routine to verify this kind of file.") if log_msgs is not None: for msg in log_msgs: logging.info(msg) if median_diff == "skip": logging.info(result_msg) pytest.skip(result_msg) else: print(result_msg) logging.info(result_msg) return median_diff
def validate_pathloss(output_hdul): hdu = output_hdul[0] step_input_filename = output_hdul[2] comparison_filename = output_hdul[1] threshold_diff, save_figs, writefile = output_hdul[4] # other default variables show_figs = False log_msgs = None debug = False # determine the type of source srouce_type = fits.getval(comparison_filename, "SRCTYPE", "SCI", 1) msg = "Source type is: " + srouce_type print(msg) logging.info(msg) # get the corresponding reference file reffile = hdu["R_PTHLOS"].replace("crds://", "") # download the file if necessary if not os.path.isfile(reffile): reffile_url = "https://jwst-crds.stsci.edu/unchecked_get/references/jwst/" + reffile urllib.request.urlretrieve(reffile_url, reffile) if core_utils.check_FS_true(hdu) or core_utils.check_BOTS_true(hdu): if "point" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_fs_ps.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) elif "extend" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_fs_uni.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) elif core_utils.check_MOS_true(hdu): if "point" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_mos_ps.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) elif "extend" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_mos_uni.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) elif core_utils.check_IFU_true(hdu): if "point" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_ifu_ps.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) elif "extend" in srouce_type.lower(): median_diff, result_msg, log_msgs = pathloss_ifu_uni.pathtest( step_input_filename, reffile, comparison_filename, writefile=writefile, show_figs=show_figs, save_figs=save_figs, threshold_diff=threshold_diff, debug=debug) else: pytest.skip( "Skipping pytest: The input fits file is not FS, MOS, or IFU. This tool does not yet include the " "routine to verify this kind of file.") if log_msgs is not None: for msg in log_msgs: logging.info(msg) if median_diff == "skip": logging.info(result_msg) pytest.skip(result_msg) else: print(result_msg) logging.info(result_msg) return median_diff
def validate_wcs(output_hdul): # get the input information for the wcs routine hdu = output_hdul[0] infile_name = output_hdul[1] msa_conf_name = output_hdul[2] truth_file = output_hdul[3] mode_used = output_hdul[7] compare_assign_wcs_and_extract_2d_with_esa = output_hdul[8] esa_files_path = None if compare_assign_wcs_and_extract_2d_with_esa: esa_files_path = output_hdul[3] truth_file = None # define the threshold difference between the pipeline output and the truth files for the pytest to pass or fail threshold_diff = float(output_hdul[4]) # save the output plots save_wcs_plots = output_hdul[5] # show the figures show_figs = False msg = "\n Performing WCS validation test... " print(msg) logging.info(msg) log_msgs = None if core_utils.check_FS_true(hdu): result, log_msgs = compare_wcs_fs.compare_wcs( infile_name, truth_file=truth_file, esa_files_path=esa_files_path, show_figs=show_figs, save_figs=save_wcs_plots, threshold_diff=threshold_diff, raw_data_root_file=None, output_directory=None, debug=False) elif core_utils.check_MOS_true(hdu) and mode_used != "MOS_sim": result, log_msgs = compare_wcs_mos.compare_wcs( infile_name, msa_conf_name=msa_conf_name, truth_file=truth_file, esa_files_path=esa_files_path, show_figs=show_figs, save_figs=save_wcs_plots, threshold_diff=threshold_diff, raw_data_root_file=None, output_directory=None, debug=False) elif core_utils.check_IFU_true(hdu): result, log_msgs = compare_wcs_ifu.compare_wcs( infile_name, truth_file=truth_file, esa_files_path=esa_files_path, show_figs=show_figs, save_figs=save_wcs_plots, threshold_diff=threshold_diff, raw_data_root_file=None, output_directory=None, debug=False) else: # We do not have truth data to compare with for BOTS pytest.skip("Skipping pytest: The fits file is not FS, MOS, or IFU.") if log_msgs is not None: for msg in log_msgs: logging.info(msg) if "skip" in result: pytest.skip("Skipping assign_wcs pytest.") elif "PASS" in result: result = True else: result = False return result