def test_dark_current_niriss(_bigdata):
    """
    Regression test of dark current step performed on NIRISS data.
    """
    suffix = 'dark_current'
    output_file_base, output_file = add_suffix('darkcurrent1_output.fits',
                                               suffix)

    DarkCurrentStep.call(
        _bigdata +
        '/niriss/test_dark_step/jw00034001001_01101_00001_NIRISS_saturation.fits',
        output_file=output_file_base,
        suffix=suffix)
    h = fits.open(output_file)
    href = fits.open(
        _bigdata +
        '/niriss/test_dark_step/jw00034001001_01101_00001_NIRISS_dark_current.fits'
    )
    newh = fits.HDUList(
        [h['primary'], h['sci'], h['err'], h['pixeldq'], h['groupdq']])
    newhref = fits.HDUList([
        href['primary'], href['sci'], href['err'], href['pixeldq'],
        href['groupdq']
    ])
    result = fits.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
def test_dark_current_miri2(_bigdata):
    """
    Regression test of dark current step performed on MIRI data.
    """
    suffix = 'dark_current'
    output_file_base, output_file = add_suffix('darkcurrent2_output.fits',
                                               suffix)

    DarkCurrentStep.call(
        _bigdata +
        '/miri/test_dark_step/jw80600012001_02101_00003_mirimage_lastframe.fits',
        output_file=output_file_base,
        suffix=suffix)
    h = fits.open(output_file)
    href = fits.open(
        _bigdata +
        '/miri/test_dark_step/jw80600012001_02101_00003_mirimage_dark.fits')
    newh = fits.HDUList(
        [h['primary'], h['sci'], h['err'], h['pixeldq'], h['groupdq']])
    newhref = fits.HDUList([
        href['primary'], href['sci'], href['err'], href['pixeldq'],
        href['groupdq']
    ])
    result = fits.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
Beispiel #3
0
def test_dark_current_miri(_bigdata):
    """

    Regression test of dark current step performed on MIRI data.

    """
    suffix = 'dark_current'
    output_file_base, output_file = add_suffix('darkcurrent1_output.fits',
                                               suffix)

    try:
        os.remove(output_file)
    except:
        pass

    DarkCurrentStep.call(
        _bigdata +
        '/miri/test_dark_step/jw00001001001_01101_00001_MIRIMAGE_bias_drift.fits',
        output_file=output_file_base,
        suffix=suffix)
    h = pf.open(output_file)
    href = pf.open(
        _bigdata +
        '/miri/test_dark_step/jw00001001001_01101_00001_MIRIMAGE_dark_current.fits'
    )
    newh = pf.HDUList(
        [h['primary'], h['sci'], h['err'], h['pixeldq'], h['groupdq']])
    newhref = pf.HDUList([
        href['primary'], href['sci'], href['err'], href['pixeldq'],
        href['groupdq']
    ])
    result = pf.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
Beispiel #4
0
def test_dark_current_nirspec(_bigdata):
    """

    Regression test of dark current step performed on NIRSpec data.

    """
    output_file_base, output_file = add_suffix('darkcurrent1_output.fits',
                                               'dark_current')

    try:
        os.remove(output_file)
    except:
        pass

    DarkCurrentStep.call(
        _bigdata +
        '/nirspec/test_dark_step/jw00023001001_01101_00001_NRS1_saturation.fits',
        output_file=output_file_base,
        name='dark_current')
    h = pf.open(output_file)
    href = pf.open(
        _bigdata +
        '/nirspec/test_dark_step/jw00023001001_01101_00001_NRS1_dark_current.fits'
    )
    newh = pf.HDUList(
        [h['primary'], h['sci'], h['err'], h['pixeldq'], h['groupdq']])
    newhref = pf.HDUList([
        href['primary'], href['sci'], href['err'], href['pixeldq'],
        href['groupdq']
    ])
    result = pf.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
Beispiel #5
0
def test_basic_step(make_rampmodel, make_darkmodel):
    """
    Same as test_more_sci_frames above, but done calling the step code.
    """
    # size of integration
    nints, ngroups, nrows, ncols = 1, 10, 200, 200

    # create raw input data for step
    dm_ramp = make_rampmodel(nints, ngroups, nrows, ncols)
    dm_ramp.meta.exposure.nframes = 1
    dm_ramp.meta.exposure.groupgap = 0

    # populate data array of science cube
    for i in range(0, ngroups - 1):
        dm_ramp.data[0, i] = i

    # create dark reference file model with more frames than science data
    refgroups = 15
    dark = make_darkmodel(refgroups, nrows, ncols)

    # populate data array of reference file
    for i in range(0, refgroups - 1):
        dark.data[0, i] = i * 0.1

    dark_model = DarkCurrentStep.call(dm_ramp, override_dark=dark)

    assert (dark_model.meta.cal_step.dark_sub == "COMPLETE")

    outdata = np.squeeze(dark_model.data)

    # check that the dark file is subtracted frame by frame from the science data
    diff = dm_ramp.data[0] - dark.data[0, :ngroups]

    # test that the output data file is equal to the difference found when subtracting ref file from sci file
    np.testing.assert_array_equal(
        outdata, diff, err_msg='dark file should be subtracted from sci file ')
Beispiel #6
0
        line2write = "{:<18} {:<20} {:<20}".format(outnm.replace(".fits", ""),
                                                   outnm, "")
        with open(txt_outputs_summary, "a") as tf:
            tf.write(line2write + "\n")
else:
    # steps to be ran, in order
    steps_to_run = [
        GroupScaleStep(),
        DQInitStep(),
        SaturationStep(),
        SuperBiasStep(),
        RefPixStep(),
        RSCD_Step(),
        LastFrameStep(),
        LinearityStep(),
        DarkCurrentStep(),
        JumpStep(),
        RampFitStep(),
        GainScaleStep()
    ]
    comp_keys = [
        "S_GRPSCL", "S_DQINIT", "S_SATURA", "S_SUPERB", "S_REFPIX", "S_RSCD",
        "S_LASTFR", "S_LINEAR", "S_DARK", "S_JUMP", "S_RAMP", "S_GANSCL"
    ]

    # run the pipeline step by step
    for i, stp_instance in enumerate(steps_to_run):
        stp = stp_instance
        if i == 0:
            step_input_file = fits_input_uncal_file
        else:
Beispiel #7
0
def run_caldet1(fits_input_uncal_file, step_by_step=False):
    if not os.path.isfile(fits_input_uncal_file):
        print("Input file not found in the current directory. Unable to proceed, exiting script.")
        exit()

    # Get the detector used
    detector = fits.getval(fits_input_uncal_file, "DETECTOR", 0)

    # Get the cfg file
    calwebb_detector1_cfg, calwebb_tso1_cfg, calwebb_dark_cfg, output_dir, mode_used, rawdatrt = get_caldet1cfg_and_workingdir()
    if mode_used != "BOTS" and mode_used.lower() != "dark":
        cfg_file = calwebb_detector1_cfg
    elif mode_used == "BOTS":
        cfg_file = calwebb_tso1_cfg
    elif mode_used.lower() == "dark":
        cfg_file = calwebb_dark_cfg
    configfile_used = "Using this configuration file: "+cfg_file

    # Initiate the PTT log file
    PTTcaldetector1_log = os.path.join(output_dir, 'PTT_caldetector1_'+detector+'.log')
    print("Information outputed to screen from this script will be logged in file: ", PTTcaldetector1_log)
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    logging.basicConfig(filename=PTTcaldetector1_log, level=logging.DEBUG)

    print(configfile_used)
    logging.info(configfile_used)

    # create the text file to record the names of the output files and the time the pipeline took to run
    txt_outputs_summary = "cal_detector1_outputs_and_times_"+detector+".txt"
    end_time_total = []
    line0 = "# {:<20}".format("Input file: "+fits_input_uncal_file)
    line1 = "# {:<16} {:<19} {:<20}".format("Step", "Output file", "Time to run [s]")
    with open(txt_outputs_summary, "w+") as tf:
        tf.write(line0+"\n")
        tf.write(line1+"\n")

    # Name of the file containing all the pipeline output
    caldetector1_pipeline_log = "pipeline.log"

    # copy the configuration file to create the pipeline log
    stpipelogcfg = calwebb_detector1_cfg.replace("calwebb_detector1.cfg", "stpipe-log.cfg")
    subprocess.run(["cp", stpipelogcfg, "."])

    #final_output_caldet1 = "gain_scale.fits"
    final_output_caldet1 = "final_output_caldet1_"+detector+".fits"
    output_names = ["group_scale.fits", "dq_init.fits", "saturation.fits", "superbias.fits", "refpix.fits",
                    "lastframe.fits", "linearity.fits", "dark_current.fits", "jump.fits", "ramp_fit.fits",
                    final_output_caldet1]

    if not step_by_step:
        print("Got arguments and will run the calwebb_detector1 pipeline in full. This may take a while...")

        # start the timer to compute the step running time
        start_time = time.time()
        result = Detector1Pipeline.call(fits_input_uncal_file, config_file=cfg_file)
        result.save(final_output_caldet1)

        # end the timer to compute pipeline running time
        end_time = time.time() - start_time  # this is in seconds
        time2finish_string = " * calwebb_detector1 took "+repr(end_time)+" seconds to finish *"
        print(time2finish_string)
        logging.info(time2finish_string)
        if end_time > 60.0:
            end_time_min = round(end_time / 60.0, 1)   # in minutes
            tot_time = repr(end_time_min)+"min"
            if end_time_min > 60.0:
                end_time_hr = round(end_time_min / 60.0, 1)   # in hrs
                tot_time = repr(end_time_hr)+"hr"
        else:
            tot_time = str(round(end_time, 1))+"sec"
        total_time = "{:<18} {:<20} {:<20}".format("", "total_time = ", repr(end_time)+"  ="+tot_time)

        # get the running time for the individual steps
        if os.path.isfile(caldetector1_pipeline_log):
            step_running_times = calculate_step_run_time(caldetector1_pipeline_log, output_names)

            # write step running times in the text file
            end_time_list = []
            for outnm in output_names:
                stp = outnm.replace(".fits", "")
                if stp in step_running_times:
                    step_time = step_running_times[stp]["run_time"]
                else:
                    step_time = "N/A"
                end_time_list.append(step_time)
                line2write = "{:<18} {:<20} {:<20}".format(stp, outnm, step_time)
                with open(txt_outputs_summary, "a") as tf:
                    tf.write(line2write+"\n")
        else:
            print("No pipeline.log found. Unable to record times per step.")

    else:
        print("Got arguments and will run the calwebb_detector1 pipeline step by step.")

        # steps to be ran, in order
        steps_to_run = [GroupScaleStep(), DQInitStep(), SaturationStep(), SuperBiasStep(), RefPixStep(),
                        LastFrameStep(), LinearityStep(), DarkCurrentStep(), JumpStep(), RampFitStep(), GainScaleStep()]
        comp_keys = ["S_GRPSCL", "S_DQINIT", "S_SATURA", "S_SUPERB", "S_REFPIX", "S_LASTFR", "S_LINEAR",
                     "S_DARK", "S_JUMP", "S_RAMP", "S_GANSCL"]

        # run the pipeline step by step
        for i, stp_instance in enumerate(steps_to_run):
            stp = stp_instance
            if i == 0:
                step_input_file = fits_input_uncal_file
            else:
                # check if step was completed and find the appropriate input file
                j = 1
                continue_while = True
                while continue_while:
                    step_input_file = output_names[i-j]
                    if (i-j == 0):
                        step_input_file = fits_input_uncal_file
                        break
                    if i == len(output_names)-1:
                        step_input_file = glob("*ramp*fit.fits")[0]
                        break
                    if os.path.isfile(step_input_file):
                        completion_key_val = fits.getval(step_input_file, comp_keys[i-j])
                        msg = "Checking for next step... "
                        completion_keywd_msg = " * Completion keyword: "+comp_keys[i-j]+"   and value: "+completion_key_val
                        print(msg)
                        print(completion_keywd_msg)
                        logging.info(msg)
                        logging.info(completion_keywd_msg)
                        if "SKIPPED" in completion_key_val:
                            j += 1
                        elif "COMPLETE" in completion_key_val:
                            continue_while = False
            running_stp_msg = "\n-> Running step: "+str(stp)+"   with input file: "+step_input_file
            output_msg = "   output will be saved as: "+output_names[i]
            logging.info(running_stp_msg)
            logging.info(output_msg)

            # start the timer to compute the step running time
            start_time = time.time()

            if "ramp" not in output_names[i]:
                result = stp.call(step_input_file)
                result.save(output_names[i])
            else:
                # the pipeline works differently for the ramp_fit step because it has more than one output
                # this step is also hanging from the command line
                #subprocess.call(["strun", "jwst.ramp_fitting.RampFitStep", "jump.fits"])
                (out_slope, int_slope) = stp.call(step_input_file)
                out_slope.save(output_names[i])
                try:
                    int_slope.save("ramp_fit_int.fits")
                except AttributeError:
                    msg = "File has only 1 integration."
                    print(msg)
                    logging.info(msg)

            # end the timer to compute cal_detector1 running time
            et = time.time() - start_time   # this is in seconds
            end_time = repr(et)
            end_time_total.append(et)
            step = output_names[i].replace(".fits", "")
            msg = " * calwebb_detector1 step "+step+" took "+end_time+" seconds to finish * \n"
            print(msg)
            logging.info(msg)
            if et > 60.0:
                end_time_min = round(et / 60.0, 1)   # in minutes
                end_time = repr(end_time_min)+"min"
                if end_time_min > 60.0:
                    end_time_hr = round(end_time_min / 60.0, 1)   # in hrs
                    end_time = repr(end_time_hr)+"hr"
            else:
                end_time = repr(round(et, 1))+"sec"

            # record results in text file
            line2write = "{:<18} {:<20} {:<20}".format(step, output_names[i], end_time)
            with open(txt_outputs_summary, "a") as tf:
                tf.write(line2write+"\n")

        # record total time in text file
        tot_time_sec = sum(end_time_total)
        if tot_time_sec > 60.0:
            tot_time_min = round((tot_time_sec/60.0), 1)
            tot_time = repr(tot_time_min)+"min"
            if tot_time_min > 60.0:
                tot_time_hr = round((tot_time_min/60.0), 1)
                tot_time = repr(tot_time_hr)+"hr"
        else:
            tot_time = round((tot_time_sec/60.0), 1)
        total_time = "{:<18} {:>20} {:>20}".format("", "total_time  ", repr(tot_time_sec)+"  ="+tot_time)

    # record total time in text file
    with open(txt_outputs_summary, "a") as tf:
        tf.write(total_time+"\n")
    msg = "\n ** Calwebb_detector 1 took "+repr(tot_time)+" to complete **"
    print(msg)
    logging.info(msg)

    # Move products to working dir

    # rename and move the pipeline log file
    new_name = "caldetector1_pipeline_"+detector+".log"
    if os.path.isfile(caldetector1_pipeline_log):
        os.rename(caldetector1_pipeline_log, os.path.join(output_dir, new_name))

    # move the PTT log file and the fits intermediary product files
    if os.getcwd() != output_dir:
        fits_list = glob("*.fits")
        if len(fits_list) >= 1:
            msg = "Output fits files are located at: "+output_dir
            print(msg)
            logging.info(msg)
            for ff in fits_list:
                if "step_" in ff:
                    ff_newname = os.path.join(output_dir, ff.replace("step_", ""))
                else:
                    ff_newname = os.path.join(output_dir, ff)
                if detector.lower() not in ff.lower():
                    ff_newname = ff_newname.replace(".fits", "_"+detector+".fits")
                subprocess.run(["mv", ff, ff_newname])
            # move text files too
            subprocess.run(["mv", txt_outputs_summary, output_dir])

        else:
            msg = "No fits files detected after calwbb_detector1 finished. Exiting script."
            print(msg)
            logging.info(msg)

    msg = "Script  run_cal_detector1.py  finished."
    print(msg)
    logging.info(msg)