def test_superbias_step(fits_input):
    """Make sure the DQInitStep runs without error."""
    fname = fits_input[0].header['filename'].replace('.fits',
                                                     '_superbiasstep.fits')
    SuperBiasStep.call(datamodels.open(fits_input),
                       output_file=fname,
                       save_results=True)
Пример #2
0
    def run_jump_step(self, infile, threshold, run_steps):
        '''Function to run the jump detection step.'''

        # output file name
        out = infile[:-5] + "_jump_CRthresh" + str(threshold) + ".fits"

        # if run_steps, run all steps prior to jump
        if run_steps:

            m = DQInitStep.call(infile)
            m = SaturationStep.call(m)
            m = SuperBiasStep.call(m)
            m_ref = RefPixStep.call(m, config_file='refpix.cfg')
            m_lin = LinearityStep.call(m)
            m_dark = DarkCurrentStep.call(m)

            # if threshold is given, use that rejection threshold
            if threshold is not None:
                m = JumpStep.call(m,
                                  output_file=out,
                                  rejection_threshold=threshold)
            else:
                m = JumpStep.call(m, output_file=out)

        # else, run only jump_step
        else:
            if threshold is not None:
                m = JumpStep.call(infile,
                                  output_file=out,
                                  rejection_threshold=threshold)
            else:
                m = JumpStep.call(infile, output_file=out)

        return m
Пример #3
0
 def run_superbias_step(self):
     #run the superbias step (whether dq_init and saturation
     #flagging have been run is irrelelvant)
     m = SuperBiasStep.call(self.infile, override_superbias=self.sbfile)
     if self.outfile is not None:
         m.save(self.outfile)
     return m
Пример #4
0
    def run_early_pipeline(self,
                           filename,
                           odd_even_rows=False,
                           odd_even_columns=True,
                           use_side_ref_pixels=True,
                           group_scale=False):
        """Runs the early steps of the jwst pipeline (dq_init, saturation,
        superbias, refpix) on uncalibrated files and outputs the result.

        Parameters
        ----------
        filename : str
            File on which to run the pipeline steps

        odd_even_rows : bool
            Option to treat odd and even rows separately during refpix step

        odd_even_columns : bools
            Option to treat odd and even columns separately during refpix step

        use_side_ref_pixels : bool
            Option to perform the side refpix correction during refpix step

        group_scale : bool
            Option to rescale pixel values to correct for instances where
            on-board frame averaging did not result in the proper values

        Returns
        -------
        output_filename : str
            The full path to the calibrated file
        """

        output_filename = filename.replace('_uncal', '').replace(
            '.fits', '_superbias_refpix.fits')

        if not os.path.isfile(output_filename):
            # Run the group_scale and dq_init steps on the input file
            if group_scale:
                model = GroupScaleStep.call(filename)
                model = DQInitStep.call(model)
            else:
                model = DQInitStep.call(filename)

            # Run the saturation and superbias steps
            model = SaturationStep.call(model)
            model = SuperBiasStep.call(model)

            # Run the refpix step and save the output
            model = RefPixStep.call(model,
                                    odd_even_rows=odd_even_rows,
                                    odd_even_columns=odd_even_columns,
                                    use_side_ref_pixels=use_side_ref_pixels)
            model.save(output_filename)
            set_permissions(output_filename)
        else:
            logging.info('\t{} already exists'.format(output_filename))

        return output_filename
Пример #5
0
def test_superbias_nirspec(_bigdata):
    """
    Regression test of superbias step performed on NIRSpec data.
    """
    output_file_base, output_file = add_suffix('superbias1_output.fits', 'superbias')

    SuperBiasStep.call(_bigdata+'/nirspec/test_superbias/jw00011001001_01106_00001_NRS2_saturation.fits',
                       output_file=output_file_base, name='superbias'
                       )
    h = fits.open(output_file)
    href = fits.open(_bigdata+'/nirspec/test_superbias/jw00011001001_01106_00001_NRS2_superbias.fits')
    newh = fits.HDUList([h['primary'],h['sci'],h['err'],h['pixeldq'],h['groupdq']])
    newhref = fits.HDUList([href['primary'],href['sci'],href['err'],href['pixeldq'],href['groupdq']])
    result = fits.diff.FITSDiff(newh,
                              newhref,
                              ignore_keywords = ['DATE','CAL_VER','CAL_VCS','CRDS_VER','CRDS_CTX'],
                              rtol = 0.00001
    )
    assert result.identical, result.report()
Пример #6
0
def test_full_step(setup_full_cube):
    '''Test full run of the SuperBiasStep.'''

    # Create inputs, data, and superbiases
    ngroups = 5
    nrows = 2048
    ncols = 2048

    data, bias = setup_full_cube(ngroups, nrows, ncols)

    # Add signal values and bias values
    # Use signal = 0 ADU so value will be negative after superbias step
    data.data[0, :, 500, 500] = 0

    # Run the pipeline
    output = SuperBiasStep.call(data)

    # Check that pixel value is negative after bias is subtracted
    assert np.sign(output.data[0, 0, 500, 500]) == -1
Пример #7
0
        if len(line) > 3:
            files.append(line.strip())

#assume they've been run thru bpm and saturation
#so now do superbias subtraction, refpix corr,
#linearity corr, jump step

#hardwire for A1 at the moment:
reffile_dir = '/ifs/jwst/wit/witserv/data7/nrc/reference_files/SSB/CV3/cv3_reffile_conversion/'
sbfile = '/ifs/jwst/wit/witserv/data4/nrc/hilbert/superbias/cv3/A1/A1_superbias_from_list_of_biasfiles.list.fits'
linfile = reffile_dir + 'linearity/NRCA1_17004_LinearityCoeff_ADU0_2016-05-14_ssblinearity_DMSorient.fits'
gainfile = reffile_dir + 'gain/NRCA1_17004_Gain_ISIMCV3_2016-01-23_ssbgain_DMSorient.fits'
ronfile = '/grp/jwst/wit/nircam/reference_files/SSB/CV2/delivery_Dec_2015/Read_Noise/NRCA1_16989_CDSNoise_2014-10-24_ssbreadnoise_DMSorient.fits'

for file in files:
    sbout = file[0:-5] + '_superbias.fits'
    data = SuperBiasStep.call(file,
                              override_superbias=sbfile,
                              output_file=sbout)
    refout = sbout[0:-5] + '_refpix.fits'
    data = RefPixStep.call(data, output_file=refout)
    linout = refout[0:-5] + '_linearity.fits'
    data = LinearityStep.call(data,
                              override_linearity=linfile,
                              output_file=linout)
    jumpout = linout[0:-5] + '_jump.fits'
    data = JumpStep.call(data,
                         override_gain=gainfile,
                         override_readnoise=ronfile,
                         output_file=jumpout)
Пример #8
0
    def linearize_dark(self, darkobj):
        """Beginning with the input dark current ramp, run the dq_init, saturation, superbias
        subtraction, refpix and nonlin pipeline steps in order to produce a linearized
        version of the ramp. This will be used when combining the dark ramp with the
        simulated signal ramp.

        Parameters
        -----------
        darkobj : obj
            Instance of read_fits class containing dark current data and info

        Returns
        -------
        linDarkObj : obj
            Modified read_fits instance with linearized dark current data
        """
        from jwst.dq_init import DQInitStep
        from jwst.saturation import SaturationStep
        from jwst.superbias import SuperBiasStep
        from jwst.refpix import RefPixStep
        from jwst.linearity import LinearityStep

        # First we need to place the read_fits object into a RampModel instance
        if self.runStep['linearized_darkfile']:
            subfile = self.params['Reffiles']['linearized_darkfile']
        else:
            subfile = self.params['Reffiles']['dark']
        dark = darkobj.insert_into_datamodel(subfile)

        print('Creating a linearized version of the dark current input ramp')
        print('using JWST calibration pipeline.')

        # Run the DQ_Init step
        if self.runStep['badpixmask']:
            linDark = DQInitStep.call(dark,
                                      config_file=self.params['newRamp']['dq_configfile'],
                                      override_mask=self.params['Reffiles']['badpixmask'])
        else:
            linDark = DQInitStep.call(dark, config_file=self.params['newRamp']['dq_configfile'])

        # If the saturation map is provided, use it. If not, default to whatever is in CRDS
        if self.runStep['saturation_lin_limit']:
            linDark = SaturationStep.call(linDark,
                                          config_file=self.params['newRamp']['sat_configfile'],
                                          override_saturation=self.params['Reffiles']['saturation'])
        else:
            linDark = SaturationStep.call(linDark,
                                          config_file=self.params['newRamp']['sat_configfile'])

        # If the superbias file is provided, use it. If not, default to whatever is in CRDS
        if self.runStep['superbias']:
            linDark = SuperBiasStep.call(linDark,
                                         config_file=self.params['newRamp']['superbias_configfile'],
                                         override_superbias=self.params['Reffiles']['superbias'])
        else:
            linDark = SuperBiasStep.call(linDark,
                                         config_file=self.params['newRamp']['superbias_configfile'])

        # Reference pixel correction
        linDark = RefPixStep.call(linDark,
                                  config_file=self.params['newRamp']['refpix_configfile'])

        # Save a copy of the superbias- and reference pixel-subtracted
        # dark. This will be used later to add these effects back in
        # after the synthetic signals have been added and the non-linearity
        # effects are added back in when using the PROPER combine method.
        sbAndRefpixEffects = dark.data - linDark.data

        # Linearity correction - save the output so that you won't need to
        # re-run the pipeline when using the same dark current file in the
        # future. Use the linearity coefficient file if provided
        base_name = self.params['Output']['file'].split('/')[-1]
        linearoutfile = base_name[0:-5] + '_linearized_dark_current_ramp.fits'
        linearoutfile = os.path.join(self.params['Output']['directory'], linearoutfile)
        if self.runStep['linearity']:
            linDark = LinearityStep.call(linDark,
                                         config_file=self.params['newRamp']['linear_configfile'],
                                         override_linearity=self.params['Reffiles']['linearity'],
                                         output_file=linearoutfile)
        else:
            linDark = LinearityStep.call(linDark,
                                         config_file=self.params['newRamp']['linear_configfile'],
                                         output_file=linearoutfile)

        print(("Linearized dark (output directly from pipeline saved as {}"
               .format(linearoutfile)))

        # Now we need to put the data back into a read_fits object
        linDarkobj = read_fits.Read_fits()
        linDarkobj.model = linDark
        linDarkobj.rampmodel_to_obj()
        linDarkobj.sbAndRefpix = sbAndRefpixEffects

        return linDarkobj
    def run(self):
        '''Main function.'''

        # Read in list of files to use to calculate saturation.
        files = self.read_listfile(self.listfile)

        # Check that input files have the same readpattern, array size, etc.
        detector, xshape, yshape = self.input_consistency_check(files)
        det_short = str(detector[3:])
        if 'long' in det_short:
            det_short = det_short[0] + '5'

        # Set up arrays to hold output data.
        sat_arr, grp_arr, new_dq, big_dq, tmp_mask = \
                                        self.init_arrays(files, xshape, yshape)

        # Create mask to isolate reference pixels
        tmp_mask[:, 4:-4, 4:-4] = True

        # Set reference pixel values so they don't get used in calculations.
        sat_arr[~tmp_mask] = 1e6
        grp_arr[~tmp_mask] = np.nan
        big_dq[~tmp_mask] = np.uint8(0)

        # Loop over files.
        for file, n in zip(files, np.arange(0, len(files))):

            # Run dq, superbias, refpix steps and save outputs (optional)
            bpm = DQInitStep.call(file)
            sup = SuperBiasStep.call(bpm)
            ref = RefPixStep.call(sup, odd_even_rows=False)

            if self.intermediates:
                sup.save(file[:-5] + '_dq_superbias.fits')
                ref.save(file[:-5] + '_dq_superbias_refpix.fits')

            # Grab the name of the mask file used from the headers
            bpmcalfile = ref.meta.ref_file.mask.name
            if 'crds' in bpmcalfile:
                jwst = bpmcalfile.find('jwst')
                bpmfile = '/grp/crds/cache/references/jwst/' + bpmcalfile[jwst:]
            else:
                bpmfile = bpmcalfile

            # Get data values
            mask = fits.getdata(bpmfile, 1)
            data = ref.data
            xstart = ref.meta.subarray.xstart
            ystart = ref.meta.subarray.ystart
            xend = ref.meta.subarray.xsize
            yend = ref.meta.subarray.ysize

            # Loop over pixel combinations for given array (no ref pixels).
            for i, j in itertools.product(np.arange(xstart + 3, xend - 4),
                                          np.arange(ystart + 3, yend - 4)):

                # Set values for bad pixels so they don't get used in calculations.
                if mask[j, i] == np.uint8(1):

                    sat_arr[n, j, i] = np.nan
                    grp_arr[n, j, i] = np.nan
                    big_dq[n, j, i] = np.uint8(1)

                else:

                    # Get signal values for each pixel.
                    signal = data[0, :, j, i].astype('float32')

                    # Get linear region early in the ramp with method 1
                    signal_range = self.get_lin_regime(signal, "method1")

                    # If signal_range can't be determined, must be weird ramp
                    if np.shape(signal_range)[0] == 0:

                        # Try again to get linear region with different method
                        signal_range = self.get_lin_regime(signal, "method2")

                        # If that still doesn't work, quit.
                        if np.shape(signal_range)[0] == 0:

                            sat_arr[n, j, i] = np.nan
                            grp_arr[n, j, i] = np.nan
                            big_dq[n, j, i] = np.uint8(2)

                        else:
                            hard_sat, first_sat_grp = \
                                self.get_saturation(signal, signal_range)

                            # Save all values.
                            sat_arr[n, j, i] = hard_sat.astype('float32')
                            grp_arr[n, j, i] = first_sat_grp.astype('float32')
                            big_dq[n, j, i] = np.uint8(3)

                    # Otherwise, must be good ramp?
                    elif np.shape(signal_range)[0] > 0:

                        # Get hard saturation.
                        hard_sat, first_sat_grp = \
                            self.get_saturation(signal, signal_range)

                        # Save all saturation values.
                        sat_arr[n, j, i] = hard_sat.astype('float32')
                        grp_arr[n, j, i] = first_sat_grp.astype('float32')

                    # Catch errors
                    else:
                        print('ERROR for pixel ', i, j)
                        sys.exit(0)

        # If each file gave same pixel DQs, make sure output DQ matches
        locs = np.all(big_dq == big_dq[0, :], axis=0)
        new_dq[locs] = big_dq[0][locs]

        # Get statistics for saturation values, averaging over exposures.
        avg, err = self.calc_stats(sat_arr, big_dq)

        # Save saturation values for each exposure to a FITS file
        newhdu = fits.PrimaryHDU(sat_arr)
        newhdulist = fits.HDUList([newhdu])
        grpname = detector + '_'  \
                      + str(DET_NUM[detector])  \
                      + '_WellDepthADU_'  \
                      + str(datetime.date.today())  \
                      + '_beforeAverage.fits'
        newhdulist.writeto(grpname, overwrite=True)

        # Save first saturated groups array to a FITS file
        newhdu = fits.PrimaryHDU(grp_arr)
        newhdulist = fits.HDUList([newhdu])
        grpname = detector + '_'  \
                      + str(DET_NUM[detector])  \
                      + '_WellDepthADU_'  \
                      + str(datetime.date.today())  \
                      + '_firstSatGroup.fits'
        newhdulist.writeto(grpname, overwrite=True)

        # Save averaged saturation values to a FITS file.
        outfilename = detector + '_'  \
                          + str(DET_NUM[detector])  \
                          + '_WellDepthADU_'  \
                          + str(datetime.date.today())  \
                          + '_ssbsaturation_DMSorient.fits'
        outfile = self.save_reffile(avg, err, new_dq, files, outfilename)

        # Save saturation errors, since the pipeline doesn't currently use them
        errhdu = fits.PrimaryHDU(err)
        errhdulist = fits.HDUList([errhdu])
        errname = detector + '_'  \
                          + str(DET_NUM[detector])  \
                          + '_WellDepthADU_'  \
                          + str(datetime.date.today())  \
                          + '_saturationErrors.fits'
        errhdulist.writeto(errname, overwrite=True)

        z = fits.open(outfile)
        z0 = z[0]
        z1 = z[1]
        z2 = z[2]
        z3 = z[3]

        # Add other things that the pipeline doesn't use, but are helpful.
        z0.header['S_DQINIT'] = ('COMPLETE', 'Data Quality Initialization')
        z0.header['S_SUPERB'] = ('COMPLETE', 'Superbias Subtraction')
        z0.header['S_REFPIX'] = ('COMPLETE', 'Reference Pixel Correction')
        newhdu = fits.HDUList([z0, z1, z2, z3])
        newhdu.writeto(outfile, overwrite=True)
Пример #10
0
def test_superbias_step(fits_input):
    """Make sure the DQInitStep runs without error."""

    SuperBiasStep.call(datamodels.open(fits_input), save_results=True)
    det = det.lower()[3:]
    if 'long' in det:
        det = det[0] + '5'
    refdict = reffiles[det]

    #m = calwebb_detector1.Detector1Pipeline(config_file='calwebb_detector1.cfg')
    #m.saturation.override_saturation = satdir+refdict['saturation']
    #m.superbias.override_superbias = sbdir+refdict['superbias']
    #m.refpix.odd_even_rows = False
    #m.group_scale.skip = True
    #m.ipc.skip = True
    #m.rscd.skip = True
    #m.lastframe.skip = True
    #m.dark_current.skip = True
    #m.persistence.skip = True
    #m.jump.skip = True
    #m.ramp_fit.skip = False #bug in pipeline means this must
    #be run. 
    #m.linearity.override_linearity = lindir+refdict['linearity']
    #m.output_file = outfile
    #m.run(file)

    m = DQInitStep.call(file,config_file = 'dq_init.cfg')
    m = SaturationStep.call(m,config_file = 'saturation.cfg')
    m = SuperBiasStep.call(m,config_file = 'superbias.cfg')
    m = RefPixStep.call(m,config_file='refpix.cfg')
    m = LinearityStep.call(m,config_file='linearity.cfg',output_file = outfile)