예제 #1
0
    def process(self, input):
        '''Process a Stage 0 *_uncal.fits file to Stage 1 *_rate.fits and *_rateints.fits files. 
    
        Steps taken to perform this processing can follow the default JWST pipeline, or alternative methods.  

        Parameters
        ----------
        input:  str, tuple, `~astropy.io.fits.HDUList`, ndarray, dict, None

            - None: Create a default data model with no shape.

            - tuple: Shape of the data array.
              Initialize with empty data array with shape specified by the.

            - file path: Initialize from the given file (FITS or ASDF)

            - readable file object: Initialize from the given file
              object

            - `~astropy.io.fits.HDUList`: Initialize from the given
              `~astropy.io.fits.HDUList`.

            - A numpy array: Used to initialize the data array
        
        Returns
        -------
        out_model:  jwst.datamodels.ImageModel
            The output ImageModel to be returned from the ramp fit step.
        int_model:  jwst.datamodels.CubeModel
            The output CubeModel to be returned from the ramp fit step.
        
        Notes
        -----
        History:

        - October 2021 Aarynn Carter and Eva-Maria Ahrer
            Initial version
        - February 2022 Aarynn Carter and Eva-Maria Ahrer
            Updated for JWST version 1.3.3, code restructure    
        '''
        with datamodels.RampModel(input) as input_model:
            readnoise_filename = self.get_reference_file(input_model, 'readnoise')
            gain_filename = self.get_reference_file(input_model, 'gain')

            log.info('Using READNOISE reference file: %s', readnoise_filename)
            log.info('Using GAIN reference file: %s', gain_filename)

            with datamodels.ReadnoiseModel(readnoise_filename) as readnoise_model, \
                 datamodels.GainModel(gain_filename) as gain_model:

                # Try to retrieve the gain factor from the gain reference file.
                # If found, store it in the science model meta data, so that it's
                # available later in the gain_scale step, which avoids having to
                # load the gain ref file again in that step.
                if gain_model.meta.exposure.gain_factor is not None:
                    input_model.meta.exposure.gain_factor = gain_model.meta.exposure.gain_factor

                # Get gain arrays, subarrays if desired.
                frames_per_group = input_model.meta.exposure.nframes
                readnoise_2d, gain_2d = get_reference_file_subarrays(
                    input_model, readnoise_model, gain_model, frames_per_group)

            log.info('Using algorithm = %s' % self.algorithm)
            log.info('Using weighting = %s' % self.weighting)

            buffsize = ramp_fit.BUFSIZE
            if pipe_utils.is_tso(input_model) and hasattr(input_model, 'int_times'):
                input_model.int_times = input_model.int_times
            else:
                input_model.int_times = None

            # DEFAULT RAMP FITTING ALGORITHM
            if self.algorithm == 'default':
                # In our case, default just means Optimal Least Squares
                self.algorithm = 'OLS'
                if self.weighting == 'default':
                    # Want to use the default optimal weighting
                    pass
                elif self.weighting == 'fixed':
                    # Want to use default weighting, but don't want to
                    # change exponent between pixels.
                    if not isinstance(self.fixed_exponent, (int, float)):
                        raise ValueError('Weighting exponent must be of type "int" or "float" for "default_fixed" weighting')

                    # Overwrite the exponent calculation function from ols_fit
                    stcal.ramp_fitting.ols_fit.calc_power = partial(fixed_power, weighting_exponent=self.fixed_exponent) #Pipeline version 1.3.3
                elif self.weighting == 'interpolated':
                    # Want to use an interpolated version of the default weighting.

                    # Overwrite the exponent calculation function from ols_fit
                    stcal.ramp_fitting.ols_fit.calc_power = interpolate_power #Pipeline version 1.3.3
                elif self.weighting == 'uniform':
                    # Want each frame and pixel weighted equally

                    # Overwrite the entire optimal calculation function
                    stcal.ramp_fitting.ols_fit.calc_opt_sums = calc_opt_sums_uniform_weight #Pipeline version 1.3.3
                elif self.weighting == 'custom':
                    # Want to manually assign snr bounds for exponent changes

                    # Overwrite the exponent calculation function from ols_fit
                    stcal.ramp_fitting.ols_fit.calc_power = partial(custom_power, snr_bounds=self.custom_snr_bounds, exponents=self.custom_exponents) #Pipeline version 1.3.3
                else:
                    raise ValueError('Could not interpret weighting "{}".'.format(self.weighting))

                # Important! Must set the weighting to 'optimal' for the actual
                # ramp_fit() function, previous if statements will have changed
                # it's underlying functionality.
                self.weighting = 'optimal'

                image_info, integ_info, opt_info, gls_opt_model = ramp_fit.ramp_fit(input_model, buffsize, self.save_opt, readnoise_2d,\
                                                                                    gain_2d, self.algorithm, self.weighting,\
                                                                                    self.maximum_cores, dqflags.pixel)
            # FUTURE IMPROVEMENT, WFC3-like differenced frames.
            elif self.algorithm == 'differenced':
                raise ValueError('I do not know how to do differenced frames yet.')
            # PRIMARILY FOR TESTING, MEAN OF RAMP
            elif self.algorithm == 'mean':
                image_info, integ_info, opt_info = mean_ramp_fit_single(input_model, buffsize, self.save_opt, readnoise_2d,\
                                                                        gain_2d, self.algorithm, self.weighting,\
                                                                        self.maximum_cores, dqflags.pixel)
            else:
                raise ValueError('Ramp fitting algorithm "{}" not implemented.'.format(self.algorithm))

        if image_info is not None:
            out_model = create_image_model(input_model, image_info)
            out_model.meta.bunit_data = 'DN/s'
            out_model.meta.bunit_err = 'DN/s'
            out_model.meta.cal_step.ramp_fit = 'COMPLETE'

        if integ_info is not None:
            int_model = create_integration_model(input_model, integ_info)
            int_model.meta.bunit_data = 'DN/s'
            int_model.meta.bunit_err = 'DN/s'
            int_model.meta.cal_step.ramp_fit = 'COMPLETE'


        return out_model, int_model
예제 #2
0
def get_subarray_model(sci_model, ref_model):
    """
    Create a subarray version of a reference file model that matches
    the subarray characteristics of a science data model. An new
    model is created that contains subarrays of all data arrays
    contained in the reference file model.

    Parameters
    ----------
    sci_model: JWST data model
        science data model

    ref_model: JWST data model
        reference file data model

    Returns
    -------
    sub_model: JWST data model
        subarray version of the reference file model
    """

    # Get the science model subarray params
    xstart_sci = sci_model.meta.subarray.xstart
    xsize_sci = sci_model.meta.subarray.xsize
    ystart_sci = sci_model.meta.subarray.ystart
    ysize_sci = sci_model.meta.subarray.ysize

    # Get the reference model subarray params
    xstart_ref = ref_model.meta.subarray.xstart
    xsize_ref = ref_model.meta.subarray.xsize
    ystart_ref = ref_model.meta.subarray.ystart
    ysize_ref = ref_model.meta.subarray.ysize

    # Compute the slice indexes, in 0-indexed python frame
    xstart = xstart_sci - xstart_ref
    ystart = ystart_sci - ystart_ref
    xstop = xstart + xsize_sci
    ystop = ystart + ysize_sci
    log.debug("slice xstart=%d, xstop=%d, ystart=%d, ystop=%d", xstart, xstop,
              ystart, ystop)

    # Make sure that the slice limits are within the bounds of
    # the reference file data array
    if (xstart < 0 or ystart < 0 or xstop > ref_model.meta.subarray.xsize
            or ystop > ref_model.meta.subarray.ysize):
        log.error('Computed reference file slice indexes are ' \
                  + 'incompatible with size of reference data array')
        log.error('xstart=%d, xstop=%d, ystart=%d, ystop=%d', xstart, xstop,
                  ystart, ystop)
        log.error('Reference xsize=%d, ysize=%d',
                  ref_model.meta.subarray.xsize, ref_model.meta.subarray.ysize)
        raise ValueError('Bad reference file slice indexes')

    # Extract subarrays from each data attribute in the particular
    # type of reference file model and return a new copy of the
    # data model
    if isinstance(ref_model, datamodels.FlatModel):
        sub_data = ref_model.data[ystart:ystop, xstart:xstop]
        sub_err = ref_model.err[ystart:ystop, xstart:xstop]
        sub_dq = ref_model.dq[ystart:ystop, xstart:xstop]
        sub_model = datamodels.FlatModel(data=sub_data, err=sub_err, dq=sub_dq)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.GainModel):
        sub_data = ref_model.data[ystart:ystop, xstart:xstop]
        sub_model = datamodels.GainModel(data=sub_data)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.LinearityModel):
        sub_data = ref_model.coeffs[:, ystart:ystop, xstart:xstop]
        sub_dq = ref_model.dq[ystart:ystop, xstart:xstop]
        sub_model = datamodels.LinearityModel(coeffs=sub_data, dq=sub_dq)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.MaskModel):
        sub_dq = ref_model.dq[ystart:ystop, xstart:xstop]
        sub_model = datamodels.MaskModel(dq=sub_dq)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.ReadnoiseModel):
        sub_data = ref_model.data[ystart:ystop, xstart:xstop]
        sub_model = datamodels.ReadnoiseModel(data=sub_data)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.SaturationModel):
        sub_data = ref_model.data[ystart:ystop, xstart:xstop]
        sub_dq = ref_model.dq[ystart:ystop, xstart:xstop]
        sub_model = datamodels.SaturationModel(data=sub_data, dq=sub_dq)
        sub_model.update(ref_model)
    elif isinstance(ref_model, datamodels.SuperBiasModel):
        sub_data = ref_model.data[ystart:ystop, xstart:xstop]
        sub_err = ref_model.err[ystart:ystop, xstart:xstop]
        sub_dq = ref_model.dq[ystart:ystop, xstart:xstop]
        sub_model = datamodels.SuperBiasModel(data=sub_data,
                                              err=sub_err,
                                              dq=sub_dq)
        sub_model.update(ref_model)
    else:
        log.warning('Unsupported reference file model type')
        sub_model = None

    return sub_model
    def save_readnoise_file(self,im,infile,outfilebasename):

        #create readnoise model instance and add data
        self.outputmodel = models.ReadnoiseModel()
        self.outputmodel.data = im
        
        #update info on the detector
        try:
            self.outputmodel.meta.instrument.detector = self.hdr0['DETECTOR']
            self.outputmodel.meta.instrument.channel = self.hdr0['CHANNEL']
            self.outputmodel.meta.instrument.module = self.hdr0['MODULE']
        except KeyError:
            print('Keyword detector not found in header of {}. Assuming this file is NOT in SSB format, and attempting to proceed.'.format(infile))
            #get the runID so we know how to populate the detector-related header keywords
            runID = self.getRunID(filename=infile)

            #update the info on the detector
            self.cryo_update_meta_detector(runID=runID,reffileflag=False)
            

        #other required keywords
        self.outputmodel.meta.reffile.type = 'READNOISE'
        self.outputmodel.meta.subarray.name = 'FULL'
        self.outputmodel.meta.subarray.xstart = 1
        self.outputmodel.meta.subarray.xsize = self.hdr['NAXIS1']
        self.outputmodel.meta.subarray.ystart = 1
        self.outputmodel.meta.subarray.ysize = self.hdr['NAXIS2']
        self.outputmodel.meta.reffile.pedigree = 'GROUND'
        self.outputmodel.meta.instrument.name = 'NIRCAM'
        self.outputmodel.meta.telescope = 'JWST'
        self.outputmodel.meta.reffile.author = 'NIRCam Instrument Team'
        self.outputmodel.meta.reffile.description = 'Detector Readnoise maps.'
        try:
            #for fitswriter format files
            self.outputmodel.meta.exposure.readpatt = self.hdr['READOUT']
        except KeyError:
            #for SSB-format files
            self.outputmodel.meta.exposure.readpatt = self.hdr0['READPATT']

        self.outputmodel.meta.reffile.author = 'Hilbert'
        self.outputmodel.meta.reffile.description = 'Readnoise reffile from CV3 data'
        self.outputmodel.meta.reffile.pedigree = 'GROUND'
        self.outputmodel.meta.reffile.useafter = '2015-10-01'

        #look for the fastaxis and slowaxis keywords in the input data.
        #if they are present propogate those values into the bad pixel
        #mask. If they are not present, then you must be working with 
        #native orientation data, so use the appropriate values
        #inhdu = fits.open(infile)
        try:
            self.outputmodel.meta.subarray.fastaxis = self.hdr0['FASTAXIS']
            self.outputmodel.meta.subarray.slowaxis = self.hdr0['SLOWAXIS']
        except KeyError:
            print('===============================================')
            print("FASTAXIS and SLOWAXIS header keywords not found in the input data.")
            print("Assuming they are in native (fitswriter) orientation, and adding the")
            print("native orientation values for those keywords to the static pixel mask.")
            print('===============================================')
            model.meta.subarray.fastaxis = 1
            model.meta.subarray.slowaxis = 2

        #HISTORY keyword
        self.outputmodel.history.append('Description of Reference File Creation')

        self.outputmodel.history.append('DOCUMENT:')
        self.outputmodel.history.append('JWST-STScI-TR-XXXX')

        self.outputmodel.history.append('SOFTWARE:')
        self.outputmodel.history.append('/grp/jwst/wit/nircam/nircam-tools/pythonmodules/')
        self.outputmodel.history.append('mkimrdnoise_ssboutput.py')
        
        #put the list of input files into the HISTORY keyword
        self.outputmodel.history.append('DATA USED:')
        self.outputmodel.history.append(infile)
        
        self.outputmodel.history.append('DIFFERENCES:')
        self.outputmodel.history.append('N/A. No previous version.')

        #save output file
        outfilename = re.sub('fits','bx%d.by%d_ssbreadnoise.fits' % (self.boxsizex,self.boxsizey),outfilebasename)
        print(outfilename+' inside save_readnoise_file')
        if not re.search('fits$',outfilename): outfilename += '_ssbreadnoise.fits'
        print 'Saving %s' % outfilename
        self.outputmodel.save(outfilename)
        return outfilename