def test_ramp_fit_nircam(_bigdata): """ Regression test of ramp_fit step performed on NIRCam data. """ suffix = 'rampfit' output_file_base, output_files = add_suffix('rampfit_output.fits', suffix, list(range(1))) try: os.remove(output_files[0]) os.remove("rampfit_opt_out.fits") except: pass RampFitStep.call( _bigdata + '/nircam/test_ramp_fit/jw00017001001_01101_00001_NRCA1_jump.fits', output_file=output_file_base, suffix=suffix, save_opt=True, opt_name='rampfit_opt_out.fits') # compare primary output n_priout = output_files[0] h = pf.open(n_priout) n_priref = _bigdata + '/nircam/test_ramp_fit/jw00017001001_01101_00001_NRCA1_ramp_fit.fits' href = pf.open(n_priref) newh = pf.HDUList([h['primary'], h['sci'], h['err'], h['dq']]) newhref = pf.HDUList( [href['primary'], href['sci'], href['err'], href['dq']]) result = pf.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report() # compare optional output n_optout = 'rampfit_opt_out_fitopt.fits' h = pf.open(n_optout) n_optref = _bigdata + '/nircam/test_ramp_fit/rampfit_opt_out.fits' href = pf.open(n_optref) newh = pf.HDUList([ h['primary'], h['slope'], h['sigslope'], h['yint'], h['sigyint'], h['pedestal'], h['weights'], h['crmag'] ]) newhref = pf.HDUList([ href['primary'], href['slope'], href['sigslope'], href['yint'], href['sigyint'], href['pedestal'], href['weights'], href['crmag'] ]) result = pf.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report()
def test_ramp_fit_niriss(_bigdata): """ Regression test of ramp_fit step performed on NIRISS data. """ suffix = 'rampfit' output_file_base, output_files = add_suffix('rampfit_output.fits', suffix, list(range(1))) RampFitStep.call( _bigdata + '/niriss/test_ramp_fit/jw00034001001_01101_00001_NIRISS_jump.fits', save_opt=True, output_file=output_file_base, suffix=suffix, opt_name='rampfit_opt_out.fits') # primary output n_priout = output_files[0] h = fits.open(n_priout) n_priref = _bigdata + '/niriss/test_ramp_fit/jw00034001001_01101_00001_NIRISS_ramp_fit.fits' href = fits.open(n_priref) newh = fits.HDUList([h['primary'], h['sci'], h['err'], h['dq']]) newhref = fits.HDUList( [href['primary'], href['sci'], href['err'], href['dq']]) result = fits.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report() # optional output n_optout = 'rampfit_opt_out_fitopt.fits' h = fits.open(n_optout) n_optref = _bigdata + '/niriss/test_ramp_fit/jw00034001001_01101_00001_NIRISS_uncal_opt.fits' href = fits.open(n_optref) newh = fits.HDUList([ h['primary'], h['slope'], h['sigslope'], h['yint'], h['sigyint'], h['pedestal'], h['weights'], h['crmag'] ]) newhref = fits.HDUList([ href['primary'], href['slope'], href['sigslope'], href['yint'], href['sigyint'], href['pedestal'], href['weights'], href['crmag'] ]) result = fits.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report()
def test_int_times2(generate_miri_reffiles, setup_inputs): # Test whether int_times table gets copied to output when it should override_gain, override_readnoise = generate_miri_reffiles ingain, inreadnoise = 6, 7 grouptime = 3.0 nints, ngroups, nrows, ncols = 5, 3, 2, 2 model, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, readnoise=inreadnoise, nints=nints, nrows=nrows, ncols=ncols, gain=ingain, deltatime=grouptime) # Set TSOVISIT false, in which case the int_times table should come back with zero length model.meta.visit.tsovisit = True # Call ramp fit through the step class slopes, cube_model = RampFitStep.call( model, override_gain=override_gain, override_readnoise=override_readnoise, maximum_cores="none") assert slopes is not None assert cube_model is not None assert (len(cube_model.int_times) == nints)
def test_ramp_fit_step(generate_miri_reffiles, setup_inputs): """ Create a simple input to instantiate RampFitStep and execute a call to test the step class and class method. """ override_gain, override_readnoise = generate_miri_reffiles ingain, inreadnoise = 6, 7 grouptime = 3.0 nints, ngroups, nrows, ncols = 1, 5, 2, 2 model, gdq, rnModel, pixdq, err, gain = setup_inputs(ngroups=ngroups, readnoise=inreadnoise, nints=nints, nrows=nrows, ncols=ncols, gain=ingain, deltatime=grouptime) # Add basic ramps to each pixel pix = [(0, 0), (0, 1), (1, 0), (1, 1)] base_ramp = np.array([k + 1 for k in range(ngroups)]) ans_slopes = np.zeros(shape=(2, 2)) for k, p in enumerate(pix): ramp = base_ramp * (k + 1) # A simple linear ramp x, y = p model.data[0, :, x, y] = ramp ans_slopes[x, y] = ramp[0] / grouptime # Call ramp fit through the step class slopes, cube_model = RampFitStep.call( model, override_gain=override_gain, override_readnoise=override_readnoise, maximum_cores="none") assert slopes is not None assert cube_model is not None # Test to make sure the ramps are as expected and that the step is complete np.testing.assert_allclose(slopes.data, ans_slopes, rtol=1e-5) assert slopes.meta.cal_step.ramp_fit == "COMPLETE"
def test_subarray_5groups(tmpdir_factory): # all pixel values are zero. So slope should be zero gainfile = str(tmpdir_factory.mktemp("data").join("gain.fits")) readnoisefile = str(tmpdir_factory.mktemp("data").join('readnoise.fits')) model1, gdq, rnModel, pixdq, err, gain = setup_subarray_inputs( ngroups=5, subxstart=10, subystart=20, subxsize=5, subysize=15, readnoise=50) gain.save(gainfile) rnModel.save(readnoisefile) model1.meta.exposure.ngroups = 11 model1.data[0, 0, 12, 1] = 10.0 model1.data[0, 1, 12, 1] = 15.0 model1.data[0, 2, 12, 1] = 25.0 model1.data[0, 3, 12, 1] = 33.0 model1.data[0, 4, 12, 1] = 60.0 # Call ramp fit through the step class slopes, cube_model = RampFitStep.call(model1, override_gain=gainfile, override_readnoise=readnoisefile, maximum_cores="none", save_opt=True) assert slopes is not None assert cube_model is not None xvalues = np.arange(5) * 1.0 yvalues = np.array([10, 15, 25, 33, 60]) coeff = np.polyfit(xvalues, yvalues, 1) np.testing.assert_allclose(slopes.data[12, 1], coeff[0], 1e-6)
with open(txt_outputs_summary, "a") as tf: tf.write(line2write + "\n") else: # steps to be ran, in order steps_to_run = [ GroupScaleStep(), DQInitStep(), SaturationStep(), SuperBiasStep(), RefPixStep(), RSCD_Step(), LastFrameStep(), LinearityStep(), DarkCurrentStep(), JumpStep(), RampFitStep(), GainScaleStep() ] comp_keys = [ "S_GRPSCL", "S_DQINIT", "S_SATURA", "S_SUPERB", "S_REFPIX", "S_RSCD", "S_LASTFR", "S_LINEAR", "S_DARK", "S_JUMP", "S_RAMP", "S_GANSCL" ] # run the pipeline step by step for i, stp_instance in enumerate(steps_to_run): stp = stp_instance if i == 0: step_input_file = fits_input_uncal_file else: # check if step was completed and find the appropriate input file j = 1
def one_group_suppressed(nints, suppress, setup_inputs): """ Creates three pixel ramps. The first ramp has no good groups. The second ramp has one good group. The third ramp has all good groups. Sets up the models to be used by the tests for the one group suppression flag. """ # Define the data. ngroups, nrows, ncols = 5, 1, 3 dims = nints, ngroups, nrows, ncols rnoise, gain = 10, 1 group_time, frame_time = 5.0, 1 rampmodel, gdq, rnModel, pixdq, err, gmodel = setup_inputs( ngroups=ngroups, readnoise=rnoise, nints=nints, nrows=nrows, ncols=ncols, gain=gain, deltatime=group_time) rampmodel.meta.exposure.frame_time = frame_time # Setup the ramp data and DQ. arr = np.array([k + 1 for k in range(ngroups)], dtype=float) sat = dqflags.pixel["SATURATED"] sat_dq = np.array([sat] * ngroups, dtype=rampmodel.groupdq.dtype) zdq = np.array([0] * ngroups, dtype=rampmodel.groupdq.dtype) rampmodel.data[0, :, 0, 0] = arr rampmodel.data[0, :, 0, 1] = arr rampmodel.data[0, :, 0, 2] = arr rampmodel.groupdq[0, :, 0, 0] = sat_dq # All groups sat rampmodel.groupdq[0, :, 0, 1] = sat_dq # 0th good, all others sat rampmodel.groupdq[0, 0, 0, 1] = 0 rampmodel.groupdq[0, :, 0, 2] = zdq # All groups good if nints > 1: rampmodel.data[1, :, 0, 0] = arr rampmodel.data[1, :, 0, 1] = arr rampmodel.data[1, :, 0, 2] = arr # All good ramps rampmodel.groupdq[1, :, 0, 0] = zdq rampmodel.groupdq[1, :, 0, 1] = zdq rampmodel.groupdq[1, :, 0, 2] = zdq rampmodel.suppress_one_group_ramps = suppress # Call ramp fit through the step class slopes, cube_model = RampFitStep.call(rampmodel, override_gain=gmodel, override_readnoise=rnModel, suppress_one_group=suppress, maximum_cores="none") return slopes, cube_model, dims
def run_caldet1(fits_input_uncal_file, step_by_step=False): if not os.path.isfile(fits_input_uncal_file): print("Input file not found in the current directory. Unable to proceed, exiting script.") exit() # Get the detector used detector = fits.getval(fits_input_uncal_file, "DETECTOR", 0) # Get the cfg file calwebb_detector1_cfg, calwebb_tso1_cfg, calwebb_dark_cfg, output_dir, mode_used, rawdatrt = get_caldet1cfg_and_workingdir() if mode_used != "BOTS" and mode_used.lower() != "dark": cfg_file = calwebb_detector1_cfg elif mode_used == "BOTS": cfg_file = calwebb_tso1_cfg elif mode_used.lower() == "dark": cfg_file = calwebb_dark_cfg configfile_used = "Using this configuration file: "+cfg_file # Initiate the PTT log file PTTcaldetector1_log = os.path.join(output_dir, 'PTT_caldetector1_'+detector+'.log') print("Information outputed to screen from this script will be logged in file: ", PTTcaldetector1_log) for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig(filename=PTTcaldetector1_log, level=logging.DEBUG) print(configfile_used) logging.info(configfile_used) # create the text file to record the names of the output files and the time the pipeline took to run txt_outputs_summary = "cal_detector1_outputs_and_times_"+detector+".txt" end_time_total = [] line0 = "# {:<20}".format("Input file: "+fits_input_uncal_file) line1 = "# {:<16} {:<19} {:<20}".format("Step", "Output file", "Time to run [s]") with open(txt_outputs_summary, "w+") as tf: tf.write(line0+"\n") tf.write(line1+"\n") # Name of the file containing all the pipeline output caldetector1_pipeline_log = "pipeline.log" # copy the configuration file to create the pipeline log stpipelogcfg = calwebb_detector1_cfg.replace("calwebb_detector1.cfg", "stpipe-log.cfg") subprocess.run(["cp", stpipelogcfg, "."]) #final_output_caldet1 = "gain_scale.fits" final_output_caldet1 = "final_output_caldet1_"+detector+".fits" output_names = ["group_scale.fits", "dq_init.fits", "saturation.fits", "superbias.fits", "refpix.fits", "lastframe.fits", "linearity.fits", "dark_current.fits", "jump.fits", "ramp_fit.fits", final_output_caldet1] if not step_by_step: print("Got arguments and will run the calwebb_detector1 pipeline in full. This may take a while...") # start the timer to compute the step running time start_time = time.time() result = Detector1Pipeline.call(fits_input_uncal_file, config_file=cfg_file) result.save(final_output_caldet1) # end the timer to compute pipeline running time end_time = time.time() - start_time # this is in seconds time2finish_string = " * calwebb_detector1 took "+repr(end_time)+" seconds to finish *" print(time2finish_string) logging.info(time2finish_string) if end_time > 60.0: end_time_min = round(end_time / 60.0, 1) # in minutes tot_time = repr(end_time_min)+"min" if end_time_min > 60.0: end_time_hr = round(end_time_min / 60.0, 1) # in hrs tot_time = repr(end_time_hr)+"hr" else: tot_time = str(round(end_time, 1))+"sec" total_time = "{:<18} {:<20} {:<20}".format("", "total_time = ", repr(end_time)+" ="+tot_time) # get the running time for the individual steps if os.path.isfile(caldetector1_pipeline_log): step_running_times = calculate_step_run_time(caldetector1_pipeline_log, output_names) # write step running times in the text file end_time_list = [] for outnm in output_names: stp = outnm.replace(".fits", "") if stp in step_running_times: step_time = step_running_times[stp]["run_time"] else: step_time = "N/A" end_time_list.append(step_time) line2write = "{:<18} {:<20} {:<20}".format(stp, outnm, step_time) with open(txt_outputs_summary, "a") as tf: tf.write(line2write+"\n") else: print("No pipeline.log found. Unable to record times per step.") else: print("Got arguments and will run the calwebb_detector1 pipeline step by step.") # steps to be ran, in order steps_to_run = [GroupScaleStep(), DQInitStep(), SaturationStep(), SuperBiasStep(), RefPixStep(), LastFrameStep(), LinearityStep(), DarkCurrentStep(), JumpStep(), RampFitStep(), GainScaleStep()] comp_keys = ["S_GRPSCL", "S_DQINIT", "S_SATURA", "S_SUPERB", "S_REFPIX", "S_LASTFR", "S_LINEAR", "S_DARK", "S_JUMP", "S_RAMP", "S_GANSCL"] # run the pipeline step by step for i, stp_instance in enumerate(steps_to_run): stp = stp_instance if i == 0: step_input_file = fits_input_uncal_file else: # check if step was completed and find the appropriate input file j = 1 continue_while = True while continue_while: step_input_file = output_names[i-j] if (i-j == 0): step_input_file = fits_input_uncal_file break if i == len(output_names)-1: step_input_file = glob("*ramp*fit.fits")[0] break if os.path.isfile(step_input_file): completion_key_val = fits.getval(step_input_file, comp_keys[i-j]) msg = "Checking for next step... " completion_keywd_msg = " * Completion keyword: "+comp_keys[i-j]+" and value: "+completion_key_val print(msg) print(completion_keywd_msg) logging.info(msg) logging.info(completion_keywd_msg) if "SKIPPED" in completion_key_val: j += 1 elif "COMPLETE" in completion_key_val: continue_while = False running_stp_msg = "\n-> Running step: "+str(stp)+" with input file: "+step_input_file output_msg = " output will be saved as: "+output_names[i] logging.info(running_stp_msg) logging.info(output_msg) # start the timer to compute the step running time start_time = time.time() if "ramp" not in output_names[i]: result = stp.call(step_input_file) result.save(output_names[i]) else: # the pipeline works differently for the ramp_fit step because it has more than one output # this step is also hanging from the command line #subprocess.call(["strun", "jwst.ramp_fitting.RampFitStep", "jump.fits"]) (out_slope, int_slope) = stp.call(step_input_file) out_slope.save(output_names[i]) try: int_slope.save("ramp_fit_int.fits") except AttributeError: msg = "File has only 1 integration." print(msg) logging.info(msg) # end the timer to compute cal_detector1 running time et = time.time() - start_time # this is in seconds end_time = repr(et) end_time_total.append(et) step = output_names[i].replace(".fits", "") msg = " * calwebb_detector1 step "+step+" took "+end_time+" seconds to finish * \n" print(msg) logging.info(msg) if et > 60.0: end_time_min = round(et / 60.0, 1) # in minutes end_time = repr(end_time_min)+"min" if end_time_min > 60.0: end_time_hr = round(end_time_min / 60.0, 1) # in hrs end_time = repr(end_time_hr)+"hr" else: end_time = repr(round(et, 1))+"sec" # record results in text file line2write = "{:<18} {:<20} {:<20}".format(step, output_names[i], end_time) with open(txt_outputs_summary, "a") as tf: tf.write(line2write+"\n") # record total time in text file tot_time_sec = sum(end_time_total) if tot_time_sec > 60.0: tot_time_min = round((tot_time_sec/60.0), 1) tot_time = repr(tot_time_min)+"min" if tot_time_min > 60.0: tot_time_hr = round((tot_time_min/60.0), 1) tot_time = repr(tot_time_hr)+"hr" else: tot_time = round((tot_time_sec/60.0), 1) total_time = "{:<18} {:>20} {:>20}".format("", "total_time ", repr(tot_time_sec)+" ="+tot_time) # record total time in text file with open(txt_outputs_summary, "a") as tf: tf.write(total_time+"\n") msg = "\n ** Calwebb_detector 1 took "+repr(tot_time)+" to complete **" print(msg) logging.info(msg) # Move products to working dir # rename and move the pipeline log file new_name = "caldetector1_pipeline_"+detector+".log" if os.path.isfile(caldetector1_pipeline_log): os.rename(caldetector1_pipeline_log, os.path.join(output_dir, new_name)) # move the PTT log file and the fits intermediary product files if os.getcwd() != output_dir: fits_list = glob("*.fits") if len(fits_list) >= 1: msg = "Output fits files are located at: "+output_dir print(msg) logging.info(msg) for ff in fits_list: if "step_" in ff: ff_newname = os.path.join(output_dir, ff.replace("step_", "")) else: ff_newname = os.path.join(output_dir, ff) if detector.lower() not in ff.lower(): ff_newname = ff_newname.replace(".fits", "_"+detector+".fits") subprocess.run(["mv", ff, ff_newname]) # move text files too subprocess.run(["mv", txt_outputs_summary, output_dir]) else: msg = "No fits files detected after calwbb_detector1 finished. Exiting script." print(msg) logging.info(msg) msg = "Script run_cal_detector1.py finished." print(msg) logging.info(msg)
def test_ramp_fit_miri2(_bigdata): """ Regression test of ramp_fit step performed on MIRI data. """ suffix = 'rampfit' output_file_base, output_files = add_suffix('rampfit2_output.fits', suffix, list(range(2))) RampFitStep.call( _bigdata + '/miri/test_ramp_fit/jw80600012001_02101_00003_mirimage_jump.fits', save_opt=True, opt_name='rampfit2_opt_out.fits', output_file=output_file_base, suffix=suffix) # compare primary output n_priout = output_files[0] h = fits.open(n_priout) n_priref = _bigdata + '/miri/test_ramp_fit/jw80600012001_02101_00003_mirimage_ramp.fits' href = fits.open(n_priref) newh = fits.HDUList([h['primary'], h['sci'], h['err'], h['dq']]) newhref = fits.HDUList( [href['primary'], href['sci'], href['err'], href['dq']]) result = fits.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report() # compare integration-specific output n_intout = output_files[1] h = fits.open(n_intout) n_intref = _bigdata + '/miri/test_ramp_fit/jw80600012001_02101_00003_mirimage_int.fits' href = fits.open(n_intref) newh = fits.HDUList([h['primary'], h['sci'], h['err'], h['dq']]) newhref = fits.HDUList( [href['primary'], href['sci'], href['err'], href['dq']]) result = fits.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report() # compare optional output n_optout = 'rampfit2_opt_out_fitopt.fits' h = fits.open(n_optout) n_optref = _bigdata + '/miri/test_ramp_fit/jw80600012001_02101_00003_mirimage_opt.fits' href = fits.open(n_optref) newh = fits.HDUList([ h['primary'], h['slope'], h['sigslope'], h['yint'], h['sigyint'], h['pedestal'], h['weights'], h['crmag'] ]) newhref = fits.HDUList([ href['primary'], href['slope'], href['sigslope'], href['yint'], href['sigyint'], href['pedestal'], href['weights'], href['crmag'] ]) result = fits.diff.FITSDiff( newh, newhref, ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'], rtol=0.00001) assert result.identical, result.report()