def run_pipeline(filename, outdir='./'):
    """Run calwebb_detector1 on the input file

    Parameters
    ----------
    filename : str
        Name of input raw file

    Returns
    -------
    slope_filename : str
        Name of fits file with slope image
    """
    lin_file = filename.replace('_uncal', '_linearity')
    rate_file = filename.replace('_uncal', '_rate')

    if not os.path.isfile(lin_file) or not os.path.isfile(rate_file):
        cal = Detector1Pipeline()
        cal.dark_current.skip = True
        cal.persistence.skip = True

        cal.linearity.save_results = True

        cal.jump.rejection_threshold = 9
        cal.jump.maximum_cores = 'quarter'

        cal.save_results = True
        cal.output_dir = outdir
        cal.run(filename)
Exemple #2
0
    def test_fgs_detector1_1(self):
        """
        Regression test of calwebb_detector1 pipeline performed on FGS imaging mode data.
        """
        input_file = self.get_data(
            'test_sloperpipeline',
            'jw86500007001_02101_00001_GUIDER2_uncal.fits')
        pipe = Detector1Pipeline()
        pipe.ipc.skip = True
        pipe.refpix.odd_even_columns = True
        pipe.refpix.use_side_ref_pixels = True
        pipe.refpix.side_smoothing_length = 11
        pipe.refpix.side_gain = 1.0
        pipe.refpix.odd_even_rows = True
        pipe.jump.rejection_threshold = 250.0
        pipe.persistence.skip = True
        pipe.ramp_fit.save_opt = False
        pipe.save_calibrated_ramp = True
        pipe.output_file = 'jw86500007001_02101_00001_GUIDER2_rate.fits'

        pipe.run(input_file)

        outputs = [('jw86500007001_02101_00001_GUIDER2_ramp.fits',
                    'jw86500007001_02101_00001_GUIDER2_ramp_ref.fits',
                    ['primary', 'sci', 'err', 'groupdq', 'pixeldq']),
                   ('jw86500007001_02101_00001_GUIDER2_rateints.fits',
                    'jw86500007001_02101_00001_GUIDER2_rateints_ref.fits',
                    ['primary', 'sci', 'err', 'dq']),
                   ('jw86500007001_02101_00001_GUIDER2_rate.fits',
                    'jw86500007001_02101_00001_GUIDER2_rate_ref.fits',
                    ['primary', 'sci', 'err', 'dq'])]
        self.compare_outputs(outputs)
Exemple #3
0
def test_fgs_detector1_1(_bigdata):
    """
    Regression test of calwebb_detector1 pipeline performed on FGS imaging mode data.
    """
    pipe = Detector1Pipeline()
    pipe.ipc.skip = True
    pipe.refpix.odd_even_columns = True
    pipe.refpix.use_side_ref_pixels = True
    pipe.refpix.side_smoothing_length = 11
    pipe.refpix.side_gain = 1.0
    pipe.refpix.odd_even_rows = True
    pipe.jump.rejection_threshold = 250.0
    pipe.persistence.skip = True
    pipe.ramp_fit.save_opt = False
    pipe.save_calibrated_ramp = True
    pipe.output_file = 'jw86500007001_02101_00001_GUIDER2_rate.fits'

    pipe.run(_bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_uncal.fits')

    # Compare calibrated ramp product
    n_cr = 'jw86500007001_02101_00001_GUIDER2_ramp.fits'
    h = pf.open(n_cr)
    n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_ramp_ref.fits'
    href = pf.open(n_ref)
    newh = pf.HDUList([h['primary'],h['sci'],h['err'],h['groupdq'],h['pixeldq']])
    newhref = pf.HDUList([href['primary'],href['sci'],href['err'],href['groupdq'],href['pixeldq']])
    result = pf.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()

    # Compare multi-integration countrate image product
    n_int = 'jw86500007001_02101_00001_GUIDER2_rateints.fits'
    h = pf.open(n_int)
    n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_rateints_ref.fits'
    href = pf.open(n_ref)
    newh = pf.HDUList([h['primary'],h['sci'],h['err'],h['dq']])
    newhref = pf.HDUList([href['primary'],href['sci'],href['err'],href['dq']])
    result = pf.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()

    # Compare countrate image product
    n_rate = 'jw86500007001_02101_00001_GUIDER2_rate.fits'
    h = pf.open(n_rate)
    n_ref = _bigdata+'/fgs/test_sloperpipeline/jw86500007001_02101_00001_GUIDER2_rate_ref.fits'
    href = pf.open(n_ref)
    newh = pf.HDUList([h['primary'],h['sci'],h['err'],h['dq']])
    newhref = pf.HDUList([href['primary'],href['sci'],href['err'],href['dq']])
    result = pf.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()
def run_calwebb_detector1(filename):
    m = Detector1Pipeline(
        config_file='pipeline_config_files/calwebb_detector1.cfg')

    # make changes to the parameters/reference files used
    m.refpix.odd_even_rows = False

    # jump step is way too sensitive
    m.jump.rejection_threshold = 91

    # skip steps you don't want to run
    m.group_scale.skip = True
    m.ipc.skip = True
    m.rscd.skip = True
    m.firstframe.skip = True
    m.lastframe.skip = True

    # name your output file
    m.save_results = True
    m.output_dir = '/ifs/jwst/wit/nircam/simulationWG/Imaging/CAR-19/Pipeline_Level1/'
    m.output_file = os.path.basename(filename.replace('_uncal', '_rate'))

    # run the pipeline with these paramters
    m.run(filename)
    print('')
    print("Done running CALDETECTOR1 on {}".format(filename))
    print("Output saved to {}".format(os.path.join(m.output_dir,
                                                   m.output_file)))
    print('')
def run_pipeline(files, flat_field=True):
    """
    Runs all stages of the JWST Pipeline on uncalibrated imaging data.

    Parameters
    ----------
    files : list
        The files to run the pipeline on.

    flat_field : bool
        Option to run the flat field step in image2.

    Returns
    -------
    outname : str
        The name of the final i2d drizzled image.
    """

    # Create a name for image3 association and drizzled files
    detector = fits.getheader(files[0])['DETECTOR'].lower()
    fltr = fits.getheader(files[0])['FILTER'].lower()
    pupil = fits.getheader(files[0])['PUPIL'].lower()
    target = fits.getheader(files[0])['TARGNAME'].lower()
    name = '{}_{}_{}_{}'.format(detector, fltr, pupil, target)

    # Run detector1
    for f in files:
        m = Detector1Pipeline()
        #m.jump.override_readnoise = 'jwst_nircam_readnoise_0024_psub.fits'
        m.refpix.odd_even_rows = False  # only for MIRI
        m.ipc.skip = True
        m.persistence.skip = True
        m.save_results = True
        m.output_dir = os.getcwd()
        m.run(f)

    # Run image2
    files = [f.replace('_uncal.fits', '_rate.fits') for f in files]
    for f in files:
        m = Image2Pipeline()
        if not flat_field:
            m.flat_field.skip = True
        m.save_results = True
        m.output_dir = os.getcwd()
        m.run(f)

    # Run image3
    files = [f.replace('_rate.fits', '_cal.fits') for f in files]
    asn = asn_from_list(files, rule=Asn_Lv3Image, product_name=name)
    asn_file = '{}.json'.format(name)
    with open(asn_file, 'w') as f:
        f.write(asn.dump()[1])
    m = Image3Pipeline()
    m.save_results = True
    m.output_dir = os.getcwd()
    m.run(asn_file)

    return '{}_i2d.fits'.format(name)
Exemple #6
0
def test_niriss_detector1(_bigdata):
    """

    Regression test of calwebb_detector1 pipeline performed on NIRISS data.

    """
    step = Detector1Pipeline()
    step.save_calibrated_ramp = True
    step.ipc.skip = True
    step.persistence.skip = True
    step.refpix.odd_even_columns = True
    step.refpix.use_side_ref_pixels = True
    step.refpix.side_smoothing_length = 11
    step.refpix.side_gain = 1.0
    step.refpix.odd_even_rows = True
    step.jump.rejection_threshold = 250.0
    step.ramp_fit.save_opt = False
    step.ramp_fit.suffix = 'ramp'
    step.output_file = 'jw00034001001_01101_00001_NIRISS_rate.fits'

    step.run(_bigdata +
             '/pipelines/jw00034001001_01101_00001_NIRISS_uncal.fits')

    # Compare ramp product
    n_ramp = 'jw00034001001_01101_00001_NIRISS_ramp.fits'
    h = pf.open(n_ramp)
    n_ref = _bigdata + '/pipelines/jw00034001001_01101_00001_NIRISS_ramp_ref.fits'
    href = pf.open(n_ref)
    newh = pf.HDUList(
        [h['primary'], h['sci'], h['err'], h['groupdq'], h['pixeldq']])
    newhref = pf.HDUList([
        href['primary'], href['sci'], href['err'], href['groupdq'],
        href['pixeldq']
    ])
    result = pf.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()

    # Compare countrate image product
    n_cr = 'jw00034001001_01101_00001_NIRISS_rate.fits'
    h = pf.open(n_cr)
    n_ref = _bigdata + '/pipelines/jw00034001001_01101_00001_NIRISS_rate_ref.fits'
    href = pf.open(n_ref)
    newh = pf.HDUList([h['primary'], h['sci'], h['err'], h['dq']])
    newhref = pf.HDUList(
        [href['primary'], href['sci'], href['err'], href['dq']])
    result = pf.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
def test_detector1pipeline4(_bigdata):
    """

    Regression test of calwebb_detector1 pipeline performed on NIRSpec data.

    """
    step = Detector1Pipeline()
    step.save_calibrated_ramp = True
    step.ipc.skip = True
    step.persistence.skip = True
    step.jump.rejection_threshold = 4.0
    step.ramp_fit.save_opt = False
    step.output_file = 'jw84600007001_02101_00001_nrs1_rate.fits'
    step.run(_bigdata + '/pipelines/jw84600007001_02101_00001_nrs1_uncal.fits')

    # Compare ramp product
    n_ramp = 'jw84600007001_02101_00001_nrs1_ramp.fits'
    h = fits.open(n_ramp)
    n_ref = _bigdata + '/pipelines/jw84600007001_02101_00001_nrs1_ramp_ref.fits'
    href = fits.open(n_ref)
    newh = fits.HDUList(
        [h['primary'], h['sci'], h['err'], h['groupdq'], h['pixeldq']])
    newhref = fits.HDUList([
        href['primary'], href['sci'], href['err'], href['groupdq'],
        h['pixeldq']
    ])
    result = fits.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()

    # Compare countrate image product
    n_cr = 'jw84600007001_02101_00001_nrs1_rate.fits'
    h = fits.open(n_cr)
    n_ref = _bigdata + '/pipelines/jw84600007001_02101_00001_nrs1_rate_ref.fits'
    href = fits.open(n_ref)
    newh = fits.HDUList([h['primary'], h['sci'], h['err'], h['dq']])
    newhref = fits.HDUList(
        [href['primary'], href['sci'], href['err'], href['dq']])
    result = fits.diff.FITSDiff(
        newh,
        newhref,
        ignore_keywords=['DATE', 'CAL_VER', 'CAL_VCS', 'CRDS_VER', 'CRDS_CTX'],
        rtol=0.00001)
    assert result.identical, result.report()
def test_detector1pipeline1(_bigdata):
    """
    Regression test for gain_scale naming when results are requested to
    be saved for the gain_scale step.
    """

    step = Detector1Pipeline()
    step.group_scale.skip = True
    step.dq_init.skip = True
    step.saturation.skip = True
    step.ipc.skip = True
    step.superbias.skip = True
    step.refpix.skip = True
    step.rscd.skip = True
    step.firstframe.skip = True
    step.lastframe.skip = True
    step.linearity.skip = True
    step.dark_current.skip = True
    step.persistence.skip = True
    step.jump.skip = True
    step.ramp_fit.skip = False

    step.gain_scale.skip = False
    step.gain_scale.save_results = True

    expfile = 'jw00001001001_01101_00001_MIRIMAGE'
    step.run(_bigdata + '/miri/test_sloperpipeline/' + expfile + '_uncal.fits')

    files = glob('*.fits')

    output_file = expfile + '_gain_scale.fits'
    assert output_file in files
    files.remove(output_file)

    output_file = expfile + '_gain_scaleints.fits'
    assert output_file in files
    files.remove(output_file)

    assert not len(files)
Exemple #9
0
    tf.write(line1 + "\n")

final_out = "gain_scale.fits"
output_names = [
    "group_scale.fits", "dq_init.fits", "saturation.fits", "superbias.fits",
    "refpix.fits", "rscd.fits", "lastframe.fits", "linearity.fits",
    "dark_current.fits", "jump.fits", "ramp_fit.fits", final_out
]

# Get and save the value of the raw data root name to add at the end of calwebb_detector1
#rawdatrt = fits.getval(fits_input_uncal_file, 'rawdatrt', 0)

if not step_by_step:
    # start the timer to compute the step running time
    start_time = time.time()
    Detector1Pipeline.call(fits_input_uncal_file,
                           config_file=calwebb_detector1_cfg)
    # end the timer to compute calwebb_spec2 running time
    end_time = time.time() - start_time  # this is in seconds
    print(" * calwebb_detector1 took " + repr(end_time) +
          " seconds to finish *")
    total_time = "{:<18} {:<20} {:<20}".format("", "total time = ",
                                               repr(end_time))
    tot_time_min = round((end_time / 60.0), 2)
    for outnm in output_names:
        line2write = "{:<18} {:<20} {:<20}".format(outnm.replace(".fits", ""),
                                                   outnm, "")
        with open(txt_outputs_summary, "a") as tf:
            tf.write(line2write + "\n")
else:
    # steps to be ran, in order
    steps_to_run = [
Exemple #10
0
def calwebb_detector1_save_jump(input_file,
                                output_dir,
                                ramp_fit=True,
                                save_fitopt=True):
    """Call ``calwebb_detector1`` on the provided file, running all
    steps up to the ``ramp_fit`` step, and save the result. Optionally
    run the ``ramp_fit`` step and save the resulting slope file as well.

    Parameters
    ----------
    input_file : str
        Name of fits file to run on the pipeline

    output_dir : str
        Directory into which the pipeline outputs are saved

    ramp_fit : bool
        If ``False``, the ``ramp_fit`` step is not run. The output file
        will be a ``*_jump.fits`` file.
        If ``True``, the ``*jump.fits`` file will be produced and saved.
        In addition, the ``ramp_fit`` step will be run and a
        ``*rate.fits`` or ``*_rateints.fits`` file will be saved.
        (``rateints`` if the input file has >1 integration)

    save_fitopt : bool
        If ``True``, the file of optional outputs from the ramp fitting
        step of the pipeline is saved.

    Returns
    -------
    jump_output : str
        Name of the saved file containing the output prior to the
        ``ramp_fit`` step.

    pipe_output : str
        Name of the saved file containing the output after ramp-fitting
        is performed (if requested). Otherwise ``None``.
    """
    input_file_only = os.path.basename(input_file)

    # Find the instrument used to collect the data
    instrument = fits.getheader(input_file)['INSTRUME'].lower()

    # Switch to calling the pipeline rather than individual steps,
    # and use the run() method so that we can set parameters
    # progammatically.
    model = Detector1Pipeline()

    # Always true
    if instrument == 'nircam':
        model.refpix.odd_even_rows = False

    # Default CR rejection threshold is too low
    model.jump.rejection_threshold = 15

    model.jump.save_results = True
    model.jump.output_dir = output_dir
    jump_output = os.path.join(output_dir,
                               input_file_only.replace('uncal', 'jump'))

    # Check to see if the jump version of the requested file is already
    # present
    run_jump = not os.path.isfile(jump_output)

    if ramp_fit:
        model.ramp_fit.save_results = True
        #model.save_results = True
        model.output_dir = output_dir
        #pipe_output = os.path.join(output_dir, input_file_only.replace('uncal', 'rate'))
        pipe_output = os.path.join(
            output_dir, input_file_only.replace('uncal', '0_ramp_fit'))
        run_slope = not os.path.isfile(pipe_output)
        if save_fitopt:
            model.ramp_fit.save_opt = True
            fitopt_output = os.path.join(
                output_dir, input_file_only.replace('uncal', 'fitopt'))
            run_fitopt = not os.path.isfile(fitopt_output)
        else:
            model.ramp_fit.save_opt = False
            fitopt_output = None
            run_fitopt = False
    else:
        model.ramp_fit.skip = True
        pipe_output = None
        fitopt_output = None
        run_slope = False
        run_fitopt = False

    # Call the pipeline if any of the files at the requested calibration
    # states are not present in the output directory
    if run_jump or (ramp_fit and run_slope) or (save_fitopt and run_fitopt):
        model.run(input_file)
    else:
        print((
            "Files with all requested calibration states for {} already present in "
            "output directory. Skipping pipeline call.".format(input_file)))

    return jump_output, pipe_output, fitopt_output
Exemple #11
0
def run_caldet1(fits_input_uncal_file, step_by_step=False):
    if not os.path.isfile(fits_input_uncal_file):
        print("Input file not found in the current directory. Unable to proceed, exiting script.")
        exit()

    # Get the detector used
    detector = fits.getval(fits_input_uncal_file, "DETECTOR", 0)

    # Get the cfg file
    calwebb_detector1_cfg, calwebb_tso1_cfg, calwebb_dark_cfg, output_dir, mode_used, rawdatrt = get_caldet1cfg_and_workingdir()
    if mode_used != "BOTS" and mode_used.lower() != "dark":
        cfg_file = calwebb_detector1_cfg
    elif mode_used == "BOTS":
        cfg_file = calwebb_tso1_cfg
    elif mode_used.lower() == "dark":
        cfg_file = calwebb_dark_cfg
    configfile_used = "Using this configuration file: "+cfg_file

    # Initiate the PTT log file
    PTTcaldetector1_log = os.path.join(output_dir, 'PTT_caldetector1_'+detector+'.log')
    print("Information outputed to screen from this script will be logged in file: ", PTTcaldetector1_log)
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    logging.basicConfig(filename=PTTcaldetector1_log, level=logging.DEBUG)

    print(configfile_used)
    logging.info(configfile_used)

    # create the text file to record the names of the output files and the time the pipeline took to run
    txt_outputs_summary = "cal_detector1_outputs_and_times_"+detector+".txt"
    end_time_total = []
    line0 = "# {:<20}".format("Input file: "+fits_input_uncal_file)
    line1 = "# {:<16} {:<19} {:<20}".format("Step", "Output file", "Time to run [s]")
    with open(txt_outputs_summary, "w+") as tf:
        tf.write(line0+"\n")
        tf.write(line1+"\n")

    # Name of the file containing all the pipeline output
    caldetector1_pipeline_log = "pipeline.log"

    # copy the configuration file to create the pipeline log
    stpipelogcfg = calwebb_detector1_cfg.replace("calwebb_detector1.cfg", "stpipe-log.cfg")
    subprocess.run(["cp", stpipelogcfg, "."])

    #final_output_caldet1 = "gain_scale.fits"
    final_output_caldet1 = "final_output_caldet1_"+detector+".fits"
    output_names = ["group_scale.fits", "dq_init.fits", "saturation.fits", "superbias.fits", "refpix.fits",
                    "lastframe.fits", "linearity.fits", "dark_current.fits", "jump.fits", "ramp_fit.fits",
                    final_output_caldet1]

    if not step_by_step:
        print("Got arguments and will run the calwebb_detector1 pipeline in full. This may take a while...")

        # start the timer to compute the step running time
        start_time = time.time()
        result = Detector1Pipeline.call(fits_input_uncal_file, config_file=cfg_file)
        result.save(final_output_caldet1)

        # end the timer to compute pipeline running time
        end_time = time.time() - start_time  # this is in seconds
        time2finish_string = " * calwebb_detector1 took "+repr(end_time)+" seconds to finish *"
        print(time2finish_string)
        logging.info(time2finish_string)
        if end_time > 60.0:
            end_time_min = round(end_time / 60.0, 1)   # in minutes
            tot_time = repr(end_time_min)+"min"
            if end_time_min > 60.0:
                end_time_hr = round(end_time_min / 60.0, 1)   # in hrs
                tot_time = repr(end_time_hr)+"hr"
        else:
            tot_time = str(round(end_time, 1))+"sec"
        total_time = "{:<18} {:<20} {:<20}".format("", "total_time = ", repr(end_time)+"  ="+tot_time)

        # get the running time for the individual steps
        if os.path.isfile(caldetector1_pipeline_log):
            step_running_times = calculate_step_run_time(caldetector1_pipeline_log, output_names)

            # write step running times in the text file
            end_time_list = []
            for outnm in output_names:
                stp = outnm.replace(".fits", "")
                if stp in step_running_times:
                    step_time = step_running_times[stp]["run_time"]
                else:
                    step_time = "N/A"
                end_time_list.append(step_time)
                line2write = "{:<18} {:<20} {:<20}".format(stp, outnm, step_time)
                with open(txt_outputs_summary, "a") as tf:
                    tf.write(line2write+"\n")
        else:
            print("No pipeline.log found. Unable to record times per step.")

    else:
        print("Got arguments and will run the calwebb_detector1 pipeline step by step.")

        # steps to be ran, in order
        steps_to_run = [GroupScaleStep(), DQInitStep(), SaturationStep(), SuperBiasStep(), RefPixStep(),
                        LastFrameStep(), LinearityStep(), DarkCurrentStep(), JumpStep(), RampFitStep(), GainScaleStep()]
        comp_keys = ["S_GRPSCL", "S_DQINIT", "S_SATURA", "S_SUPERB", "S_REFPIX", "S_LASTFR", "S_LINEAR",
                     "S_DARK", "S_JUMP", "S_RAMP", "S_GANSCL"]

        # run the pipeline step by step
        for i, stp_instance in enumerate(steps_to_run):
            stp = stp_instance
            if i == 0:
                step_input_file = fits_input_uncal_file
            else:
                # check if step was completed and find the appropriate input file
                j = 1
                continue_while = True
                while continue_while:
                    step_input_file = output_names[i-j]
                    if (i-j == 0):
                        step_input_file = fits_input_uncal_file
                        break
                    if i == len(output_names)-1:
                        step_input_file = glob("*ramp*fit.fits")[0]
                        break
                    if os.path.isfile(step_input_file):
                        completion_key_val = fits.getval(step_input_file, comp_keys[i-j])
                        msg = "Checking for next step... "
                        completion_keywd_msg = " * Completion keyword: "+comp_keys[i-j]+"   and value: "+completion_key_val
                        print(msg)
                        print(completion_keywd_msg)
                        logging.info(msg)
                        logging.info(completion_keywd_msg)
                        if "SKIPPED" in completion_key_val:
                            j += 1
                        elif "COMPLETE" in completion_key_val:
                            continue_while = False
            running_stp_msg = "\n-> Running step: "+str(stp)+"   with input file: "+step_input_file
            output_msg = "   output will be saved as: "+output_names[i]
            logging.info(running_stp_msg)
            logging.info(output_msg)

            # start the timer to compute the step running time
            start_time = time.time()

            if "ramp" not in output_names[i]:
                result = stp.call(step_input_file)
                result.save(output_names[i])
            else:
                # the pipeline works differently for the ramp_fit step because it has more than one output
                # this step is also hanging from the command line
                #subprocess.call(["strun", "jwst.ramp_fitting.RampFitStep", "jump.fits"])
                (out_slope, int_slope) = stp.call(step_input_file)
                out_slope.save(output_names[i])
                try:
                    int_slope.save("ramp_fit_int.fits")
                except AttributeError:
                    msg = "File has only 1 integration."
                    print(msg)
                    logging.info(msg)

            # end the timer to compute cal_detector1 running time
            et = time.time() - start_time   # this is in seconds
            end_time = repr(et)
            end_time_total.append(et)
            step = output_names[i].replace(".fits", "")
            msg = " * calwebb_detector1 step "+step+" took "+end_time+" seconds to finish * \n"
            print(msg)
            logging.info(msg)
            if et > 60.0:
                end_time_min = round(et / 60.0, 1)   # in minutes
                end_time = repr(end_time_min)+"min"
                if end_time_min > 60.0:
                    end_time_hr = round(end_time_min / 60.0, 1)   # in hrs
                    end_time = repr(end_time_hr)+"hr"
            else:
                end_time = repr(round(et, 1))+"sec"

            # record results in text file
            line2write = "{:<18} {:<20} {:<20}".format(step, output_names[i], end_time)
            with open(txt_outputs_summary, "a") as tf:
                tf.write(line2write+"\n")

        # record total time in text file
        tot_time_sec = sum(end_time_total)
        if tot_time_sec > 60.0:
            tot_time_min = round((tot_time_sec/60.0), 1)
            tot_time = repr(tot_time_min)+"min"
            if tot_time_min > 60.0:
                tot_time_hr = round((tot_time_min/60.0), 1)
                tot_time = repr(tot_time_hr)+"hr"
        else:
            tot_time = round((tot_time_sec/60.0), 1)
        total_time = "{:<18} {:>20} {:>20}".format("", "total_time  ", repr(tot_time_sec)+"  ="+tot_time)

    # record total time in text file
    with open(txt_outputs_summary, "a") as tf:
        tf.write(total_time+"\n")
    msg = "\n ** Calwebb_detector 1 took "+repr(tot_time)+" to complete **"
    print(msg)
    logging.info(msg)

    # Move products to working dir

    # rename and move the pipeline log file
    new_name = "caldetector1_pipeline_"+detector+".log"
    if os.path.isfile(caldetector1_pipeline_log):
        os.rename(caldetector1_pipeline_log, os.path.join(output_dir, new_name))

    # move the PTT log file and the fits intermediary product files
    if os.getcwd() != output_dir:
        fits_list = glob("*.fits")
        if len(fits_list) >= 1:
            msg = "Output fits files are located at: "+output_dir
            print(msg)
            logging.info(msg)
            for ff in fits_list:
                if "step_" in ff:
                    ff_newname = os.path.join(output_dir, ff.replace("step_", ""))
                else:
                    ff_newname = os.path.join(output_dir, ff)
                if detector.lower() not in ff.lower():
                    ff_newname = ff_newname.replace(".fits", "_"+detector+".fits")
                subprocess.run(["mv", ff, ff_newname])
            # move text files too
            subprocess.run(["mv", txt_outputs_summary, output_dir])

        else:
            msg = "No fits files detected after calwbb_detector1 finished. Exiting script."
            print(msg)
            logging.info(msg)

    msg = "Script  run_cal_detector1.py  finished."
    print(msg)
    logging.info(msg)
Exemple #12
0
def calwebb_detector1_save_jump(input_file,
                                output_dir,
                                ramp_fit=True,
                                save_fitopt=True):
    """Call ``calwebb_detector1`` on the provided file, running all
    steps up to the ``ramp_fit`` step, and save the result. Optionally
    run the ``ramp_fit`` step and save the resulting slope file as well.

    Parameters
    ----------
    input_file : str
        Name of fits file to run on the pipeline

    output_dir : str
        Directory into which the pipeline outputs are saved

    ramp_fit : bool
        If ``False``, the ``ramp_fit`` step is not run. The output file
        will be a ``*_jump.fits`` file.
        If ``True``, the ``*jump.fits`` file will be produced and saved.
        In addition, the ``ramp_fit`` step will be run and a
        ``*rate.fits`` or ``*_rateints.fits`` file will be saved.
        (``rateints`` if the input file has >1 integration)

    save_fitopt : bool
        If ``True``, the file of optional outputs from the ramp fitting
        step of the pipeline is saved.

    Returns
    -------
    jump_output : str
        Name of the saved file containing the output prior to the
        ``ramp_fit`` step.

    pipe_output : str
        Name of the saved file containing the output after ramp-fitting
        is performed (if requested). Otherwise ``None``.
    """
    input_file_only = os.path.basename(input_file)

    # Find the instrument used to collect the data
    datamodel = datamodels.RampModel(input_file)
    instrument = datamodel.meta.instrument.name.lower()

    # If the data pre-date jwst version 1.2.1, then they will have
    # the NUMDTHPT keyword (with string value of the number of dithers)
    # rather than the newer NRIMDTPT keyword (with an integer value of
    # the number of dithers). If so, we need to update the file here so
    # that it doesn't cause the pipeline to crash later. Both old and
    # new keywords are mapped to the model.meta.dither.dither_points
    # metadata entry. So we should be able to focus on that.
    if isinstance(datamodel.meta.dither.dither_points, str):
        # If we have a string, change it to an integer
        datamodel.meta.dither.dither_points = int(
            datamodel.meta.dither.dither_points)
    elif datamodel.meta.dither.dither_points is None:
        # If the information is missing completely, put in a dummy value
        datamodel.meta.dither.dither_points = 1

    # Switch to calling the pipeline rather than individual steps,
    # and use the run() method so that we can set parameters
    # progammatically.
    model = Detector1Pipeline()

    # Always true
    if instrument == 'nircam':
        model.refpix.odd_even_rows = False

    # Default CR rejection threshold is too low
    model.jump.rejection_threshold = 15

    # Turn off IPC step until it is put in the right place
    model.ipc.skip = True

    model.jump.save_results = True
    model.jump.output_dir = output_dir
    jump_output = os.path.join(output_dir,
                               input_file_only.replace('uncal', 'jump'))

    # Check to see if the jump version of the requested file is already
    # present
    run_jump = not os.path.isfile(jump_output)

    if ramp_fit:
        model.ramp_fit.save_results = True
        # model.save_results = True
        model.output_dir = output_dir
        # pipe_output = os.path.join(output_dir, input_file_only.replace('uncal', 'rate'))
        pipe_output = os.path.join(
            output_dir, input_file_only.replace('uncal', '0_ramp_fit'))
        run_slope = not os.path.isfile(pipe_output)
        if save_fitopt:
            model.ramp_fit.save_opt = True
            fitopt_output = os.path.join(
                output_dir, input_file_only.replace('uncal', 'fitopt'))
            run_fitopt = not os.path.isfile(fitopt_output)
        else:
            model.ramp_fit.save_opt = False
            fitopt_output = None
            run_fitopt = False
    else:
        model.ramp_fit.skip = True
        pipe_output = None
        fitopt_output = None
        run_slope = False
        run_fitopt = False

    # Call the pipeline if any of the files at the requested calibration
    # states are not present in the output directory
    if run_jump or (ramp_fit and run_slope) or (save_fitopt and run_fitopt):
        model.run(datamodel)
    else:
        print((
            "Files with all requested calibration states for {} already present in "
            "output directory. Skipping pipeline call.".format(input_file)))

    return jump_output, pipe_output, fitopt_output
Exemple #13
0
#final_output_caldet1 = "gain_scale.fits"
final_output_caldet1 = "final_output_caldet1_" + detector + ".fits"
output_names = [
    "group_scale.fits", "dq_init.fits", "saturation.fits", "superbias.fits",
    "refpix.fits", "lastframe.fits", "linearity.fits", "dark_current.fits",
    "jump.fits", "ramp_fit.fits", final_output_caldet1
]

if not step_by_step:
    print(
        "Got arguments and will run the calwebb_detector1 pipeline in full. This may take a while..."
    )

    # start the timer to compute the step running time
    start_time = time.time()
    result = Detector1Pipeline.call(fits_input_uncal_file,
                                    config_file=cfg_file)
    result.save(final_output_caldet1)

    # end the timer to compute pipeline running time
    end_time = time.time() - start_time  # this is in seconds
    time2finish_string = " * calwebb_detector1 took " + repr(
        end_time) + " seconds to finish *"
    print(time2finish_string)
    logging.info(time2finish_string)
    if end_time > 60.0:
        end_time_min = round(end_time / 60.0, 1)  # in minutes
        tot_time = repr(end_time_min) + "min"
        if end_time_min > 60.0:
            end_time_hr = round(end_time_min / 60.0, 1)  # in hrs
            tot_time = repr(end_time_hr) + "hr"
    else:
Exemple #14
0
def test_detector1pipeline1(_bigdata):
    """
    Regression test of calwebb_detector1 pipeline performed on MIRI data.
    """

    step = Detector1Pipeline()
    step.save_calibrated_ramp = True
    step.ipc.skip = True
    step.refpix.odd_even_columns = True
    step.refpix.use_side_ref_pixels = True
    step.refpix.side_smoothing_length=11
    step.refpix.side_gain=1.0
    step.refpix.odd_even_rows = True
    step.persistence.skip = True
    step.jump.rejection_threshold = 250.0
    step.ramp_fit.save_opt = False
    step.output_file='jw00001001001_01101_00001_MIRIMAGE'
    step.suffix='rate'

    step.run(_bigdata+'/miri/test_sloperpipeline/jw00001001001_01101_00001_MIRIMAGE_uncal.fits'
             )

    # Compare calibrated ramp product
    n_cr = 'jw00001001001_01101_00001_MIRIMAGE_ramp.fits'
    h = fits.open( n_cr )
    n_ref = _bigdata+'/miri/test_sloperpipeline/jw00001001001_01101_00001_MIRIMAGE_uncal_jump.fits'
    href = fits.open( n_ref )
    newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['groupdq'],h['pixeldq']])
    newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['groupdq'],href['pixeldq']])
    result = fits.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()

    # Compare multi-integration countrate image product
    n_int = 'jw00001001001_01101_00001_MIRIMAGE_rateints.fits'
    h = fits.open( n_int )
    n_ref = _bigdata+'/miri/test_sloperpipeline/jw00001001001_01101_00001_MIRIMAGE_uncal_integ.fits'
    href = fits.open( n_ref )
    newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['dq']])
    newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['dq']])
    result = fits.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()

    # Compare countrate image product
    n_rate = 'jw00001001001_01101_00001_MIRIMAGE_rate.fits'
    h = fits.open( n_rate )
    n_ref = _bigdata+'/miri/test_sloperpipeline/jw00001001001_01101_00001_MIRIMAGE_uncal_MiriSloperPipeline.fits'
    href = fits.open( n_ref )
    newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['dq']])
    newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['dq']])
    result = fits.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()