예제 #1
0
def test_fake_pedestals(darkcases, rates, pedestals):
    '''Test ramp-fit step with fake data.'''

    # open ramp to get data shape and headers
    m = RampModel(darkcases)
    tgroup = m.meta.exposure.group_time
    rates = np.float(rates)
    pedestals = np.float(pedestals)

    nrows = int(m.meta.subarray.xsize)
    ncols = int(m.meta.subarray.ysize)
    ngroups = int(m.meta.exposure.ngroups)
    nints = int(m.meta.exposure.nints)

    # create fake ramps with known slope and pedestal
    new_data = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    for i in np.arange(0, ngroups):
        for j in np.arange(4, 2044):
            for k in np.arange(4, 2044):
                new_data[0, i, j, k] = pedestals + rates * ((i + 1) * tgroup)

    # save it
    m.data = new_data
    m.err = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32)
    fake_data_outname = darkcases[:-5] + "_rate" + str(
        rates) + "_pedestal" + str(
            pedestals) + "_test_fake_pedestals_uncal.fits"
    m.save(fake_data_outname, overwrite=True)

    output, outint = RampFitStep.call(
        m,
        output_file=fake_data_outname[:-5] + "_rate.fits",
        save_opt=True,
        opt_name=fake_data_outname[:-5] + "_rate_opt.fits")
    optoutput = fits.open(fake_data_outname[:-5] + "rate_opt.fits")

    # check pedestal
    clip = sigma_clip(optoutput['PEDESTAL'].data)
    clip.data[clip.mask] = np.nan
    meanped = np.nanmean(clip.data)
    assert np.allclose(pedestals, meanped, rtol=2, atol=2) == True

    optoutput.close()
예제 #2
0
    def run(self):
        #check the proposed output name. If it exists, remove it.
        if self.outfile == None:
            dot = self.infile.rfind('.')
            self.outfile = self.infile[0:dot] + '_refpixg0.fits'

        if os.path.isfile(self.outfile):
            os.remove(self.outfile)

        #read in data
        ramp = RampModel(self.infile)
        data = ramp.data

        #get data shape
        nint, ngroup, ny, nx = data.shape

        #make a copy of the 0th read
        zero_read = copy.deepcopy(ramp.data[:, 0, :, :])

        #subtract the zeroth read from all subsequent reads
        for integration in range(nint):
            data[integration, :, :, :] -= zero_read[integration, :, :]
        ramp.data = data

        #run the SSB pipeline's refpix step
        ramp = RefPixStep.call(ramp,
                               use_side_ref_pixels=True,
                               odd_even_columns=True,
                               odd_even_rows=False,
                               config_file='refpix.cfg')

        #now add the original 0th read back in
        data = ramp.data
        for integration in range(nint):
            data[integration, :, :, :] += zero_read[integration, :, :]
            #dd = data[0,0,:,:] - zero_read[0,:,:]
        ramp.data = data

        #save the result
        ramp.save(self.outfile)
예제 #3
0
def test_1overf(cases, sigmas, smoothing_lengths, tolerances, side_gains):
    '''Test amp average and 1/f noise subtraction.'''

    test_name = 'only_1overf'

    # pipeline refpix correction results
    # ----------------
    refq = RefPixStep.call(cases,
                           odd_even_columns=False,
                           use_side_ref_pixels=True,
                           side_smoothing_length=smoothing_lengths,
                           side_gain=side_gains,
                           odd_even_rows=False)
    outname = cases[:-5] + '_refpix_only1overf_pipeline.fits'
    refq.save(outname)

    # manual refpix correction results
    # --------------

    # read in input file
    ramp = RampModel(cases)
    rampdata = np.copy(ramp.data)
    pixeldq = ramp.pixeldq
    goodpix = pixeldq == 0

    #extract subarray if necessary
    xs, xe, ys, ye = get_coords_rampmodel(ramp)

    # get bounds
    num_left = np.max([4 - xs, 0])
    num_right = np.max([xe - 2044, 0])
    num_bottom = np.max([4 - ys, 0])
    num_top = np.max([ye - 2044, 0])

    # do the manual subtraction
    refq_manual, table = amp_only_refpix_corr(rampdata, sigmas, num_left,
                                              num_right, num_bottom, num_top,
                                              goodpix)
    refq_manual, outtable = include_1overf(refq_manual, sigmas, num_left,
                                           num_right, num_bottom, num_top,
                                           pixeldq, smoothing_lengths,
                                           side_gains)

    # save table to compare groups and amps
    save_df_table(outtable, cases[:-5] + '_include_1overf_amponly.dat')

    # compare manual to pipeline
    diff = refq_manual - refq.data

    # save an image of the differences between manual and pipeline subtraction
    images = RampModel()
    images.data = diff
    images.save(cases[:-5] + '_refpix_only1overf_differences.fits',
                overwrite=True)

    # check some values
    print("Group, Diffs: Amp1 through Amp4")
    for i in np.arange(0, diff.shape[1]):
        print('')
        print('Pipeline: ' + str(i) + ',' + str(refq.data[0, i, 12, 12]) +
              ',' + str(refq.data[0, i, 12, 600]) + ',' +
              str(refq.data[0, i, 12, 1030]) + ',' +
              str(refq.data[0, i, 12, 1600]))
        print('Manual: ' + str(i) + ',' + str(refq_manual[0, i, 12, 12]) +
              ',' + str(refq_manual[0, i, 12, 600]) + ',' +
              str(refq_manual[0, i, 12, 1030]) + ',' +
              str(refq_manual[0, i, 12, 1600]))

    # save out data
    outramp = RampModel()
    outramp.data = refq_manual
    outramp.save(cases[:-5] + '_refpix_only1overf_manual.fits', overwrite=True)

    # pytest to make sure pipeline ~= manual
    if np.allclose(refq_manual,
                   refq.data,
                   rtol=tolerances[1],
                   atol=tolerances[0],
                   equal_nan=True) == False:

        # if test fails, get image of (manual - pipeline) for each group
        display_multi_image(diff, np.shape(diff)[1], tolerances[1], test_name)

        print('')
        print("Group, Max Difference")
        for i in np.arange(0, diff.shape[1]):
            print('Max difference between pipeline and manual: ' + str(i) +
                  ',' + str(np.max(np.absolute(diff[0, i, :, :]))))
        print('')

    assert np.allclose(refq_manual,
                       refq.data,
                       rtol=tolerances[1],
                       atol=tolerances[0],
                       equal_nan=True) == True
    def run(self):
        '''main function'''

        #check for the existance of the output file
        if self.outfile == None:
            self.outfile = self.infile[
                0:-5] + '_REGROUP_' + self.readpatt + '_ngroup' + str(
                    self.ngroup) + '.fits'

        if (os.path.isfile(self.outfile)):  # & self.clobber == False):
            print("WARNING: Proposed output file {} already exists. Removing.".
                  format(self.outfile))
            os.remove(self.outfile)

        #read in the exposure to use. Read in with RampModel
        exposure = RampModel(self.infile)

        #assume that the readpattern of the input file is 'RAPID'. If not, throw an error.
        rp = exposure.meta.exposure.readpatt
        if rp != 'RAPID':
            print(
                'WARNING! INPUT DATA WERE NOT COLLECTED USING THE RAPID READPATTERN. QUITTING.'
            )
            sys.exit(0)

        #extract data
        data = exposure.data
        err = exposure.err
        groupdq = exposure.groupdq

        #sizes
        integrations = data.shape[0]
        ingroups = data.shape[1]
        ydim = data.shape[2]
        xdim = data.shape[3]

        #if the number of groups was not requested, use the maximum for the given readpattern
        if self.ngroup == None:
            self.ngroup = readpatts[self.readpatt.lower()]['ngroup']

        #group the input groups into collections of frames which will be averaged into the output groups
        #Only group as many input groups as you need to make the requested number of output groups
        frames_per_group = readpatts[self.readpatt.lower()]['nframe']
        frames_to_skip = readpatts[self.readpatt.lower()]['nskip']
        total_frames = (frames_per_group * self.ngroup) + (frames_to_skip *
                                                           (self.ngroup - 1))
        total_exposure_time = total_frames * readpatts['rapid']['tgroup']

        #if the total number of frames needed to make the requested integration don't exist
        #throw an error
        if total_frames > ingroups:
            print(
                "WARNING: Requested regrouping requires more groups than are contained in the input file {}. Quitting."
                .format(self.infile))
            sys.exit(0)

        #starting and ending indexes of the input groups to be averaged to create the new groups
        groupstart_index = np.arange(0, total_frames,
                                     frames_per_group + frames_to_skip)
        groupend_index = groupstart_index + frames_per_group

        #prepare for averaging
        newdata = np.zeros((integrations, self.ngroup, ydim, xdim))
        newerrs = np.zeros((integrations, self.ngroup, ydim, xdim))
        newgroupdq = np.zeros((integrations, self.ngroup, ydim, xdim))

        #average the input data to create the output data
        for integration in xrange(integrations):
            newgp = 0
            for gs, ge in izip(groupstart_index, groupend_index):

                #average the data frames
                print("Averaging groups {} to {}.".format(gs, ge - 1))
                newframe = self.avg_frame(data[integration, gs:ge, :, :])

                newdata[integration, newgp, :, :] = newframe

                #reduce the error in the new frames by sqrt(number of frames) for now
                newerrs[integration,
                        newgp, :, :] = err[integration, gs + frames_per_group /
                                           2, :, :] / np.sqrt(frames_per_group)

                #just keep the DQ array from the final frame of the group
                newgroupdq[integration, newgp, :, :] = groupdq[integration,
                                                               ge, :, :]

                #increment the counter for the new group number
                newgp += 1

        #place the updated data back into the model instance
        exposure.data = newdata
        exposure.err = newerrs
        exposure.groupdq = newgroupdq

        #update header
        exposure.meta.exposure.ngroups = self.ngroup
        exposure.meta.exposure.nframes = frames_per_group
        exposure.meta.exposure.groupgap = frames_to_skip
        exposure.meta.exposure.group_time = readpatts[
            self.readpatt.lower()]['tgroup']
        exposure.meta.exposure.exptime = total_exposure_time
        exposure.meta.exposure.readpatt = self.readpatt.upper()

        #write the regrouped file out to a new file
        exposure.save(self.outfile)
예제 #5
0
    def export(self, outfile, all_data=False):
        """
        Export the simulated data to a JWST pipeline ingestible FITS file

        Parameters
        ----------
        outfile: str
            The path of the output file
        """
        # Make a RampModel
        data = self.tso
        mod = RampModel(data=data,
                        groupdq=np.zeros_like(data),
                        pixeldq=np.zeros((self.nrows, self.ncols)),
                        err=np.zeros_like(data))
        pix = utils.subarray_specs(self.subarray)

        # Set meta data values for header keywords
        mod.meta.telescope = 'JWST'
        mod.meta.instrument.name = 'NIRISS'
        mod.meta.instrument.detector = 'NIS'
        mod.meta.instrument.filter = self.filter
        mod.meta.instrument.pupil = 'GR700XD'
        mod.meta.exposure.type = 'NIS_SOSS'
        mod.meta.exposure.nints = self.nints
        mod.meta.exposure.ngroups = self.ngrps
        mod.meta.exposure.nframes = self.nframes
        mod.meta.exposure.readpatt = 'NISRAPID'
        mod.meta.exposure.groupgap = 0
        mod.meta.exposure.frame_time = self.frame_time
        mod.meta.exposure.group_time = self.group_time
        mod.meta.exposure.duration = self.time[-1] - self.time[0]
        mod.meta.subarray.name = self.subarray
        mod.meta.subarray.xsize = data.shape[3]
        mod.meta.subarray.ysize = data.shape[2]
        mod.meta.subarray.xstart = pix.get('xloc', 1)
        mod.meta.subarray.ystart = pix.get('yloc', 1)
        mod.meta.subarray.fastaxis = -2
        mod.meta.subarray.slowaxis = -1
        mod.meta.observation.date = self.obs_date
        mod.meta.observation.time = self.obs_time
        mod.meta.target.ra = self.ra
        mod.meta.target.dec = self.dec
        mod.meta.target.source_type = 'POINT'

        # Save the file
        mod.save(outfile, overwrite=True)

        # Save input data
        with fits.open(outfile) as hdul:

            # Save input star data
            hdul.append(
                fits.ImageHDU(data=np.array([i.value for i in self.star],
                                            dtype=np.float64),
                              name='STAR'))
            hdul['STAR'].header.set('FUNITS', str(self.star[1].unit))
            hdul['STAR'].header.set('WUNITS', str(self.star[0].unit))

            # Save input planet data
            if self.planet is not None:
                hdul.append(
                    fits.ImageHDU(data=np.asarray(self.planet,
                                                  dtype=np.float64),
                                  name='PLANET'))
                for param, val in self.tmodel.__dict__.items():
                    if isinstance(val, (float, int, str)):
                        hdul['PLANET'].header.set(param.upper()[:8], val)
                    elif isinstance(val, np.ndarray) and len(val) == 1:
                        hdul['PLANET'].header.set(param.upper(), val[0])
                    elif isinstance(val, type(None)):
                        hdul['PLANET'].header.set(param.upper(), '')
                    elif param == 'u':
                        for n, v in enumerate(val):
                            hdul['PLANET'].header.set('U{}'.format(n + 1), v)
                    else:
                        print(param, val, type(val))

            # Write to file
            hdul.writeto(outfile, overwrite=True)

        print('File saved as', outfile)