Ejemplo n.º 1
0
    def insert_into_datamodel(self, subfile):
        #read in a dummy/substitute file as a datamodel,
        #and insert the data and self.header metadata
        #into it

        h = RampModel(subfile)
        h.data = self.data
        try:
            h.zeroframe = self.zeroframe
        except:
            pass

        h.err = np.zeros_like(self.data)
        h.groupdq = np.zeros_like(self.data)
        nint, ng, ny, nx = self.data.shape
        h.pixeldq = np.zeros((ny, nx))

        h.meta.exposure.readpatt = self.header['READPATT']
        h.meta.exposure.nints = self.header['NINTS']
        h.meta.exposure.ngroups = self.header['NGROUPS']
        h.meta.exposure.nframes = self.header['NFRAMES']
        h.meta.exposure.nskip = self.header['NSKIP']
        h.meta.exposure.groupgap = self.header['GROUPGAP']
        h.meta.exposure.type = self.header['EXP_TYPE']
        h.meta.instrument.detector = self.header['DETECTOR']
        h.meta.instrument.name = self.header['INSTRUME']
        h.meta.subarray.fastaxis = self.header['FASTAXIS']
        h.meta.subarray.slowaxis = self.header['SLOWAXIS']

        return h
Ejemplo n.º 2
0
def setup_cube(ngroups, nrows, ncols):
    ''' Set up fake data to test.'''

    nints = 1

    data_model = RampModel()
    data_model.data = np.zeros(shape=(nints, ngroups, nrows, ncols),
                               dtype=np.float32)
    data_model.pixeldq = np.zeros(shape=(nrows, ncols), dtype=np.int32)
    data_model.meta.subarray.xstart = 1
    data_model.meta.subarray.ystart = 1
    data_model.meta.subarray.xsize = ncols
    data_model.meta.subarray.ysize = nrows
    data_model.meta.instrument.name = 'NIRCAM'

    bias_model = SuperBiasModel()
    bias_model.data = np.zeros(shape=(2048, 2048), dtype=np.float32)
    bias_model.dq = np.zeros(shape=(2048, 2048), dtype=np.int32)
    bias_model.meta.subarray.xstart = 1
    bias_model.meta.subarray.ystart = 1
    bias_model.meta.subarray.xsize = 2048
    bias_model.meta.subarray.ysize = 2048
    bias_model.meta.instrument.name = 'NIRCAM'
    bias_model.meta.description = 'Fake data.'
    bias_model.meta.telescope = 'JWST'
    bias_model.meta.reftype = 'SuperBiasModel'
    bias_model.meta.author = 'Alicia'
    bias_model.meta.pedigree = 'Dummy'
    bias_model.meta.useafter = '2015-10-01T00:00:00'

    return data_model, bias_model
Ejemplo n.º 3
0
def setup_cube(ngroups, nrows, ncols):
    ''' Set up fake data to test.'''

    nints = 1

    data_model = RampModel()
    data_model.data = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.float32)
    data_model.pixeldq = np.zeros(shape=(nrows, ncols), dtype=np.int32)
    data_model.groupdq = np.zeros(shape=(nints, ngroups, nrows, ncols), dtype=np.float32)
    data_model.meta.subarray.xstart = 1
    data_model.meta.subarray.ystart = 1
    data_model.meta.subarray.xsize = ncols
    data_model.meta.subarray.ysize = nrows
    data_model.meta.exposure.ngroups = ngroups
    data_model.meta.instrument.name = 'NIRCAM'

    saturation_model = SaturationModel()
    saturation_model.data = np.zeros(shape=(2048, 2048), dtype=np.float32)
    saturation_model.dq = np.zeros(shape=(2048, 2048), dtype=np.int32)
    saturation_model.meta.subarray.xstart = 1
    saturation_model.meta.subarray.ystart = 1
    saturation_model.meta.subarray.xsize = 2048
    saturation_model.meta.subarray.ysize = 2048
    saturation_model.meta.instrument.name = 'NIRCAM'
    saturation_model.meta.description = 'Fake data.'
    saturation_model.meta.telescope = 'JWST'
    saturation_model.meta.reftype = 'SaturationModel'
    saturation_model.meta.author = 'Alicia'
    saturation_model.meta.pedigree = 'Dummy'
    saturation_model.meta.useafter = '2015-10-01T00:00:00'

    return data_model, saturation_model
Ejemplo n.º 4
0
    def run(self):
        #check the proposed output name. If it exists, remove it.
        if self.outfile == None:
            dot = self.infile.rfind('.')
            self.outfile = self.infile[0:dot] + '_refpixg0.fits'

        if os.path.isfile(self.outfile):
            os.remove(self.outfile)

        #read in data
        ramp = RampModel(self.infile)
        data = ramp.data

        #get data shape
        nint, ngroup, ny, nx = data.shape

        #make a copy of the 0th read
        zero_read = copy.deepcopy(ramp.data[:, 0, :, :])

        #subtract the zeroth read from all subsequent reads
        for integration in range(nint):
            data[integration, :, :, :] -= zero_read[integration, :, :]
        ramp.data = data

        #run the SSB pipeline's refpix step
        ramp = RefPixStep.call(ramp,
                               use_side_ref_pixels=True,
                               odd_even_columns=True,
                               odd_even_rows=False,
                               config_file='refpix.cfg')

        #now add the original 0th read back in
        data = ramp.data
        for integration in range(nint):
            data[integration, :, :, :] += zero_read[integration, :, :]
            #dd = data[0,0,:,:] - zero_read[0,:,:]
        ramp.data = data

        #save the result
        ramp.save(self.outfile)
Ejemplo n.º 5
0
def test_fake_pedestals(darkcases, rates, pedestals):
    '''Test ramp-fit step with fake data.'''

    # open ramp to get data shape and headers
    m = RampModel(darkcases)
    tgroup = m.meta.exposure.group_time
    rates = np.float(rates)
    pedestals = np.float(pedestals)

    nrows = int(m.meta.subarray.xsize)
    ncols = int(m.meta.subarray.ysize)
    ngroups = int(m.meta.exposure.ngroups)
    nints = int(m.meta.exposure.nints)

    # create fake ramps with known slope and pedestal
    new_data = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    for i in np.arange(0, ngroups):
        for j in np.arange(4, 2044):
            for k in np.arange(4, 2044):
                new_data[0, i, j, k] = pedestals + rates * ((i + 1) * tgroup)

    # save it
    m.data = new_data
    m.err = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    fake_data_outname = darkcases[:-5] + "_rate" + str(
        rates) + "_pedestal" + str(
            pedestals) + "_test_fake_pedestals_uncal.fits"
    m.save(fake_data_outname, overwrite=True)

    output, outint = RampFitStep.call(
        m,
        output_file=fake_data_outname[:-5] + "_rate.fits",
        save_opt=True,
        opt_name=fake_data_outname[:-5] + "_rate_opt.fits")
    optoutput = fits.open(fake_data_outname[:-5] + "rate_opt.fits")

    # check pedestal
    clip = sigma_clip(optoutput['PEDESTAL'].data)
    clip.data[clip.mask] = np.nan
    meanped = np.nanmean(clip.data)
    assert np.allclose(pedestals, meanped, rtol=2, atol=2) == True

    optoutput.close()
Ejemplo n.º 6
0
    def _cube():

        # create a JWST datamodel for NIRSPEC IRS2 data
        data_model = RampModel((1, 5, 3200, 2048))
        data_model.data = np.ones(((1, 5, 3200, 2048)))
        data_model.groupdq = np.zeros(((1, 5, 3200, 2048)))
        data_model.pixeldq = np.zeros(((3200, 2048)))
        data_model.meta.instrument.name = 'NIRSPEC'
        data_model.meta.instrument.detector = 'NRS1'
        data_model.meta.instrument.filter = 'F100LP'
        data_model.meta.observation.date = '2019-07-19'
        data_model.meta.observation.time = '23:23:30.912'
        data_model.meta.exposure.type = 'NRS_LAMP'
        data_model.meta.subarray.name = 'FULL'
        data_model.meta.subarray.xstart = 1
        data_model.meta.subarray.xsize = 2048
        data_model.meta.subarray.ystart = 1
        data_model.meta.subarray.ysize = 2048
        data_model.meta.exposure.nrs_normal = 16
        data_model.meta.exposure.nrs_reference = 4
        data_model.meta.exposure.readpatt = 'NRSIRS2RAPID'

        # create a saturation model for the saturation step
        saturation_model = SaturationModel((2048, 2048))
        saturation_model.data = np.ones(
            (2048, 2048)) * 60000  # saturation limit for every pixel is 60000
        saturation_model.meta.description = 'Fake data.'
        saturation_model.meta.telescope = 'JWST'
        saturation_model.meta.reftype = 'SaturationModel'
        saturation_model.meta.useafter = '2015-10-01T00:00:00'
        saturation_model.meta.instrument.name = 'NIRSPEC'
        saturation_model.meta.instrument.detector = 'NRS1'
        saturation_model.meta.author = 'Clare'
        saturation_model.meta.pedigree = 'Dummy'
        saturation_model.meta.subarray.xstart = 1
        saturation_model.meta.subarray.xsize = 2048
        saturation_model.meta.subarray.ystart = 1
        saturation_model.meta.subarray.ysize = 2048

        return data_model, saturation_model
Ejemplo n.º 7
0
def test_1overf(cases, sigmas, smoothing_lengths, tolerances, side_gains):
    '''Test amp average and 1/f noise subtraction.'''

    test_name = 'only_1overf'

    # pipeline refpix correction results
    # ----------------
    refq = RefPixStep.call(cases,
                           odd_even_columns=False,
                           use_side_ref_pixels=True,
                           side_smoothing_length=smoothing_lengths,
                           side_gain=side_gains,
                           odd_even_rows=False)
    outname = cases[:-5] + '_refpix_only1overf_pipeline.fits'
    refq.save(outname)

    # manual refpix correction results
    # --------------

    # read in input file
    ramp = RampModel(cases)
    rampdata = np.copy(ramp.data)
    pixeldq = ramp.pixeldq
    goodpix = pixeldq == 0

    #extract subarray if necessary
    xs, xe, ys, ye = get_coords_rampmodel(ramp)

    # get bounds
    num_left = np.max([4 - xs, 0])
    num_right = np.max([xe - 2044, 0])
    num_bottom = np.max([4 - ys, 0])
    num_top = np.max([ye - 2044, 0])

    # do the manual subtraction
    refq_manual, table = amp_only_refpix_corr(rampdata, sigmas, num_left,
                                              num_right, num_bottom, num_top,
                                              goodpix)
    refq_manual, outtable = include_1overf(refq_manual, sigmas, num_left,
                                           num_right, num_bottom, num_top,
                                           pixeldq, smoothing_lengths,
                                           side_gains)

    # save table to compare groups and amps
    save_df_table(outtable, cases[:-5] + '_include_1overf_amponly.dat')

    # compare manual to pipeline
    diff = refq_manual - refq.data

    # save an image of the differences between manual and pipeline subtraction
    images = RampModel()
    images.data = diff
    images.save(cases[:-5] + '_refpix_only1overf_differences.fits',
                overwrite=True)

    # check some values
    print("Group, Diffs: Amp1 through Amp4")
    for i in np.arange(0, diff.shape[1]):
        print('')
        print('Pipeline: ' + str(i) + ',' + str(refq.data[0, i, 12, 12]) +
              ',' + str(refq.data[0, i, 12, 600]) + ',' +
              str(refq.data[0, i, 12, 1030]) + ',' +
              str(refq.data[0, i, 12, 1600]))
        print('Manual: ' + str(i) + ',' + str(refq_manual[0, i, 12, 12]) +
              ',' + str(refq_manual[0, i, 12, 600]) + ',' +
              str(refq_manual[0, i, 12, 1030]) + ',' +
              str(refq_manual[0, i, 12, 1600]))

    # save out data
    outramp = RampModel()
    outramp.data = refq_manual
    outramp.save(cases[:-5] + '_refpix_only1overf_manual.fits', overwrite=True)

    # pytest to make sure pipeline ~= manual
    if np.allclose(refq_manual,
                   refq.data,
                   rtol=tolerances[1],
                   atol=tolerances[0],
                   equal_nan=True) == False:

        # if test fails, get image of (manual - pipeline) for each group
        display_multi_image(diff, np.shape(diff)[1], tolerances[1], test_name)

        print('')
        print("Group, Max Difference")
        for i in np.arange(0, diff.shape[1]):
            print('Max difference between pipeline and manual: ' + str(i) +
                  ',' + str(np.max(np.absolute(diff[0, i, :, :]))))
        print('')

    assert np.allclose(refq_manual,
                       refq.data,
                       rtol=tolerances[1],
                       atol=tolerances[0],
                       equal_nan=True) == True
Ejemplo n.º 8
0
def test_CR_handling(darkcases, rates, pedestals):
    '''Test ramp-fit step with fake data.'''

    # open ramp to get data shape and headers
    m = RampModel(darkcases)
    tgroup = m.meta.exposure.group_time
    rates = np.float(rates)
    pedestals = np.float(pedestals)

    ngroups = 10
    nints = 1
    nrows = int(m.meta.subarray.xsize)
    ncols = int(m.meta.subarray.ysize)
    m.meta.exposure.ngroups = ngroups
    m.meta.exposure.ngroup = ngroups
    m.meta.exposure.nints = nints
    m.err = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    m.groupdq = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)

    # create fake ramps with known slope and pedestal
    new_data = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    for ints in np.arange(0, nints):
        for i in np.arange(0, ngroups):
            for j in np.arange(4, 2044):
                for k in np.arange(4, 2044):
                    new_data[ints, i, j,
                             k] = pedestals + rates * ((i + 1) * tgroup)

    # add in jump to one of the pixels
    new_data[0, 2:, 500, 500] = new_data[0, 2:, 500, 500] + (rates * 5)
    m.groupdq[0, 2, 500, 500] = 4.0

    # add in two jumps to one of the pixels
    new_data[0, 2:, 740, 740] = new_data[0, 2:, 740, 740] + (rates * 5)
    new_data[0, 6:, 740, 740] = new_data[0, 6:, 740, 740] + (rates * 6)
    m.groupdq[0, 2, 740, 740] = 4.0
    m.groupdq[0, 6, 740, 740] = 4.0

    # save it
    m.data = new_data
    fake_data_outname = darkcases[:-5] + "_rate" + str(
        rates) + "_pedestal" + str(pedestals) + "_test2_uncal.fits"
    # m.save(fake_data_outname,overwrite=True)

    output, outint = RampFitStep.call(
        m,
        output_file=fake_data_outname[:-5] + "rate.fits",
        save_opt=True,
        opt_name=fake_data_outname[:-5] + "rate_opt.fits")
    optoutput = fits.open(fake_data_outname[:-5] + "rate_opt.fits")

    # check output rates in regular output
    clip = sigma_clip(output.data)
    clip.data[clip.mask] = np.nan
    clip.data[output.dq != 0] = np.nan
    meanrate = np.nanmean(clip.data)
    assert np.allclose(meanrate, rates, rtol=8, atol=8) == True

    # check output rates in INTS output
    if nints > 1:
        for i in np.arange(0, nints):
            clip = sigma_clip(outint.data[nints, :, :])
            clip.data[clip.mask] = np.nan
            clip.data[output.dq != 0] = np.nan
            meanrate = np.nanmean(clip.data)
            assert np.allclose(meanrate, rates, rtol=8, atol=8) == True

    # CR rates from rate_opt.fits file
    ratebeforeCR1 = optoutput['SLOPE'].data[0, 0, 740, 740]
    rateafterCR1 = optoutput['SLOPE'].data[0, 1, 740, 740]
    ratebeforeCR2 = optoutput['SLOPE'].data[0, 1, 740, 740]
    rateafterCR2 = optoutput['SLOPE'].data[0, 2, 740, 740]
    assert np.allclose(ratebeforeCR1, rateafterCR1, rtol=1e-2,
                       atol=1e-2) == True
    assert np.allclose(ratebeforeCR2, rateafterCR2, rtol=1e-2,
                       atol=1e-2) == True

    # # check to make sure slope is weighted average of intervals
    # weights = optoutput['WEIGHTS'].data
    # interval1 = optoutput['SLOPE'].data[0,0,740,740]*weights[0,0,740,740]
    # interval2 = optoutput['SLOPE'].data[0,1,740,740]*weights[0,1,740,740]
    # interval3 = optoutput['SLOPE'].data[0,2,740,740]*weights[0,2,740,740]
    # calc = (interval1 + interval2 + interval3)/(weights[0,0,740,740] + weights[0,1,740,740] +weights[0,2,740,740])
    # print(calc)

    # other integrations shouldn't have CR hit
    if nints > 1:
        int2_noCRbefore = optoutput['SLOPE'].data[1, 0, 740, 740]
        int2_noCRafter1 = optoutput['SLOPE'].data[1, 1, 740, 740]
        int2_noCRafter2 = optoutput['SLOPE'].data[1, 2, 740, 740]
        assert int2_noCRbefore == rates
        assert int2_noCRafter1 == 0.0
        assert int2_noCRafter2 == 0.0

    # CR rates for pix with no CR hit
    ratebefore = optoutput['SLOPE'].data[0, 0, 800, 800]
    rateafter = optoutput['SLOPE'].data[0, 1, 800, 800]
    assert ratebefore == output.data[800, 800]
    assert rateafter == 0.0

    # Check CR magnitude
    # right now this is just calculated as the difference
    # between the two group values for the pixel. Is that right?
    manualCRmag = new_data[0, 2, 500, 500] - new_data[0, 1, 500, 500]
    pipeCRmag = optoutput['CRMAG'].data[0, 0, 500, 500]
    assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True

    manualCRmag = new_data[0, 2, 740, 740] - new_data[0, 1, 740, 740]
    pipeCRmag = optoutput['CRMAG'].data[0, 0, 740, 740]
    assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True

    manualCRmag = new_data[0, 6, 740, 740] - new_data[0, 5, 740, 740]
    pipeCRmag = optoutput['CRMAG'].data[0, 1, 740, 740]
    assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True

    optoutput.close()
    def run(self):
        '''main function'''

        #check for the existance of the output file
        if self.outfile == None:
            self.outfile = self.infile[
                0:-5] + '_REGROUP_' + self.readpatt + '_ngroup' + str(
                    self.ngroup) + '.fits'

        if (os.path.isfile(self.outfile)):  # & self.clobber == False):
            print("WARNING: Proposed output file {} already exists. Removing.".
                  format(self.outfile))
            os.remove(self.outfile)

        #read in the exposure to use. Read in with RampModel
        exposure = RampModel(self.infile)

        #assume that the readpattern of the input file is 'RAPID'. If not, throw an error.
        rp = exposure.meta.exposure.readpatt
        if rp != 'RAPID':
            print(
                'WARNING! INPUT DATA WERE NOT COLLECTED USING THE RAPID READPATTERN. QUITTING.'
            )
            sys.exit(0)

        #extract data
        data = exposure.data
        err = exposure.err
        groupdq = exposure.groupdq

        #sizes
        integrations = data.shape[0]
        ingroups = data.shape[1]
        ydim = data.shape[2]
        xdim = data.shape[3]

        #if the number of groups was not requested, use the maximum for the given readpattern
        if self.ngroup == None:
            self.ngroup = readpatts[self.readpatt.lower()]['ngroup']

        #group the input groups into collections of frames which will be averaged into the output groups
        #Only group as many input groups as you need to make the requested number of output groups
        frames_per_group = readpatts[self.readpatt.lower()]['nframe']
        frames_to_skip = readpatts[self.readpatt.lower()]['nskip']
        total_frames = (frames_per_group * self.ngroup) + (frames_to_skip *
                                                           (self.ngroup - 1))
        total_exposure_time = total_frames * readpatts['rapid']['tgroup']

        #if the total number of frames needed to make the requested integration don't exist
        #throw an error
        if total_frames > ingroups:
            print(
                "WARNING: Requested regrouping requires more groups than are contained in the input file {}. Quitting."
                .format(self.infile))
            sys.exit(0)

        #starting and ending indexes of the input groups to be averaged to create the new groups
        groupstart_index = np.arange(0, total_frames,
                                     frames_per_group + frames_to_skip)
        groupend_index = groupstart_index + frames_per_group

        #prepare for averaging
        newdata = np.zeros((integrations, self.ngroup, ydim, xdim))
        newerrs = np.zeros((integrations, self.ngroup, ydim, xdim))
        newgroupdq = np.zeros((integrations, self.ngroup, ydim, xdim))

        #average the input data to create the output data
        for integration in xrange(integrations):
            newgp = 0
            for gs, ge in izip(groupstart_index, groupend_index):

                #average the data frames
                print("Averaging groups {} to {}.".format(gs, ge - 1))
                newframe = self.avg_frame(data[integration, gs:ge, :, :])

                newdata[integration, newgp, :, :] = newframe

                #reduce the error in the new frames by sqrt(number of frames) for now
                newerrs[integration,
                        newgp, :, :] = err[integration, gs + frames_per_group /
                                           2, :, :] / np.sqrt(frames_per_group)

                #just keep the DQ array from the final frame of the group
                newgroupdq[integration, newgp, :, :] = groupdq[integration,
                                                               ge, :, :]

                #increment the counter for the new group number
                newgp += 1

        #place the updated data back into the model instance
        exposure.data = newdata
        exposure.err = newerrs
        exposure.groupdq = newgroupdq

        #update header
        exposure.meta.exposure.ngroups = self.ngroup
        exposure.meta.exposure.nframes = frames_per_group
        exposure.meta.exposure.groupgap = frames_to_skip
        exposure.meta.exposure.group_time = readpatts[
            self.readpatt.lower()]['tgroup']
        exposure.meta.exposure.exptime = total_exposure_time
        exposure.meta.exposure.readpatt = self.readpatt.upper()

        #write the regrouped file out to a new file
        exposure.save(self.outfile)