def insert_into_datamodel(self, subfile): #read in a dummy/substitute file as a datamodel, #and insert the data and self.header metadata #into it h = RampModel(subfile) h.data = self.data try: h.zeroframe = self.zeroframe except: pass h.err = np.zeros_like(self.data) h.groupdq = np.zeros_like(self.data) nint, ng, ny, nx = self.data.shape h.pixeldq = np.zeros((ny, nx)) h.meta.exposure.readpatt = self.header['READPATT'] h.meta.exposure.nints = self.header['NINTS'] h.meta.exposure.ngroups = self.header['NGROUPS'] h.meta.exposure.nframes = self.header['NFRAMES'] h.meta.exposure.nskip = self.header['NSKIP'] h.meta.exposure.groupgap = self.header['GROUPGAP'] h.meta.exposure.type = self.header['EXP_TYPE'] h.meta.instrument.detector = self.header['DETECTOR'] h.meta.instrument.name = self.header['INSTRUME'] h.meta.subarray.fastaxis = self.header['FASTAXIS'] h.meta.subarray.slowaxis = self.header['SLOWAXIS'] return h
def test_fake_pedestals(darkcases, rates, pedestals): '''Test ramp-fit step with fake data.''' # open ramp to get data shape and headers m = RampModel(darkcases) tgroup = m.meta.exposure.group_time rates = np.float(rates) pedestals = np.float(pedestals) nrows = int(m.meta.subarray.xsize) ncols = int(m.meta.subarray.ysize) ngroups = int(m.meta.exposure.ngroups) nints = int(m.meta.exposure.nints) # create fake ramps with known slope and pedestal new_data = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32) for i in np.arange(0, ngroups): for j in np.arange(4, 2044): for k in np.arange(4, 2044): new_data[0, i, j, k] = pedestals + rates * ((i + 1) * tgroup) # save it m.data = new_data m.err = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32) fake_data_outname = darkcases[:-5] + "_rate" + str( rates) + "_pedestal" + str( pedestals) + "_test_fake_pedestals_uncal.fits" m.save(fake_data_outname, overwrite=True) output, outint = RampFitStep.call( m, output_file=fake_data_outname[:-5] + "_rate.fits", save_opt=True, opt_name=fake_data_outname[:-5] + "_rate_opt.fits") optoutput = fits.open(fake_data_outname[:-5] + "rate_opt.fits") # check pedestal clip = sigma_clip(optoutput['PEDESTAL'].data) clip.data[clip.mask] = np.nan meanped = np.nanmean(clip.data) assert np.allclose(pedestals, meanped, rtol=2, atol=2) == True optoutput.close()
def test_CR_handling(darkcases, rates, pedestals): '''Test ramp-fit step with fake data.''' # open ramp to get data shape and headers m = RampModel(darkcases) tgroup = m.meta.exposure.group_time rates = np.float(rates) pedestals = np.float(pedestals) ngroups = 10 nints = 1 nrows = int(m.meta.subarray.xsize) ncols = int(m.meta.subarray.ysize) m.meta.exposure.ngroups = ngroups m.meta.exposure.ngroup = ngroups m.meta.exposure.nints = nints m.err = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32) m.groupdq = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32) # create fake ramps with known slope and pedestal new_data = np.zeros((nints, ngroups, nrows, ncols), dtype=np.float32) for ints in np.arange(0, nints): for i in np.arange(0, ngroups): for j in np.arange(4, 2044): for k in np.arange(4, 2044): new_data[ints, i, j, k] = pedestals + rates * ((i + 1) * tgroup) # add in jump to one of the pixels new_data[0, 2:, 500, 500] = new_data[0, 2:, 500, 500] + (rates * 5) m.groupdq[0, 2, 500, 500] = 4.0 # add in two jumps to one of the pixels new_data[0, 2:, 740, 740] = new_data[0, 2:, 740, 740] + (rates * 5) new_data[0, 6:, 740, 740] = new_data[0, 6:, 740, 740] + (rates * 6) m.groupdq[0, 2, 740, 740] = 4.0 m.groupdq[0, 6, 740, 740] = 4.0 # save it m.data = new_data fake_data_outname = darkcases[:-5] + "_rate" + str( rates) + "_pedestal" + str(pedestals) + "_test2_uncal.fits" # m.save(fake_data_outname,overwrite=True) output, outint = RampFitStep.call( m, output_file=fake_data_outname[:-5] + "rate.fits", save_opt=True, opt_name=fake_data_outname[:-5] + "rate_opt.fits") optoutput = fits.open(fake_data_outname[:-5] + "rate_opt.fits") # check output rates in regular output clip = sigma_clip(output.data) clip.data[clip.mask] = np.nan clip.data[output.dq != 0] = np.nan meanrate = np.nanmean(clip.data) assert np.allclose(meanrate, rates, rtol=8, atol=8) == True # check output rates in INTS output if nints > 1: for i in np.arange(0, nints): clip = sigma_clip(outint.data[nints, :, :]) clip.data[clip.mask] = np.nan clip.data[output.dq != 0] = np.nan meanrate = np.nanmean(clip.data) assert np.allclose(meanrate, rates, rtol=8, atol=8) == True # CR rates from rate_opt.fits file ratebeforeCR1 = optoutput['SLOPE'].data[0, 0, 740, 740] rateafterCR1 = optoutput['SLOPE'].data[0, 1, 740, 740] ratebeforeCR2 = optoutput['SLOPE'].data[0, 1, 740, 740] rateafterCR2 = optoutput['SLOPE'].data[0, 2, 740, 740] assert np.allclose(ratebeforeCR1, rateafterCR1, rtol=1e-2, atol=1e-2) == True assert np.allclose(ratebeforeCR2, rateafterCR2, rtol=1e-2, atol=1e-2) == True # # check to make sure slope is weighted average of intervals # weights = optoutput['WEIGHTS'].data # interval1 = optoutput['SLOPE'].data[0,0,740,740]*weights[0,0,740,740] # interval2 = optoutput['SLOPE'].data[0,1,740,740]*weights[0,1,740,740] # interval3 = optoutput['SLOPE'].data[0,2,740,740]*weights[0,2,740,740] # calc = (interval1 + interval2 + interval3)/(weights[0,0,740,740] + weights[0,1,740,740] +weights[0,2,740,740]) # print(calc) # other integrations shouldn't have CR hit if nints > 1: int2_noCRbefore = optoutput['SLOPE'].data[1, 0, 740, 740] int2_noCRafter1 = optoutput['SLOPE'].data[1, 1, 740, 740] int2_noCRafter2 = optoutput['SLOPE'].data[1, 2, 740, 740] assert int2_noCRbefore == rates assert int2_noCRafter1 == 0.0 assert int2_noCRafter2 == 0.0 # CR rates for pix with no CR hit ratebefore = optoutput['SLOPE'].data[0, 0, 800, 800] rateafter = optoutput['SLOPE'].data[0, 1, 800, 800] assert ratebefore == output.data[800, 800] assert rateafter == 0.0 # Check CR magnitude # right now this is just calculated as the difference # between the two group values for the pixel. Is that right? manualCRmag = new_data[0, 2, 500, 500] - new_data[0, 1, 500, 500] pipeCRmag = optoutput['CRMAG'].data[0, 0, 500, 500] assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True manualCRmag = new_data[0, 2, 740, 740] - new_data[0, 1, 740, 740] pipeCRmag = optoutput['CRMAG'].data[0, 0, 740, 740] assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True manualCRmag = new_data[0, 6, 740, 740] - new_data[0, 5, 740, 740] pipeCRmag = optoutput['CRMAG'].data[0, 1, 740, 740] assert np.allclose(manualCRmag, pipeCRmag, rtol=1, atol=1) == True optoutput.close()
def run(self): '''main function''' #check for the existance of the output file if self.outfile == None: self.outfile = self.infile[ 0:-5] + '_REGROUP_' + self.readpatt + '_ngroup' + str( self.ngroup) + '.fits' if (os.path.isfile(self.outfile)): # & self.clobber == False): print("WARNING: Proposed output file {} already exists. Removing.". format(self.outfile)) os.remove(self.outfile) #read in the exposure to use. Read in with RampModel exposure = RampModel(self.infile) #assume that the readpattern of the input file is 'RAPID'. If not, throw an error. rp = exposure.meta.exposure.readpatt if rp != 'RAPID': print( 'WARNING! INPUT DATA WERE NOT COLLECTED USING THE RAPID READPATTERN. QUITTING.' ) sys.exit(0) #extract data data = exposure.data err = exposure.err groupdq = exposure.groupdq #sizes integrations = data.shape[0] ingroups = data.shape[1] ydim = data.shape[2] xdim = data.shape[3] #if the number of groups was not requested, use the maximum for the given readpattern if self.ngroup == None: self.ngroup = readpatts[self.readpatt.lower()]['ngroup'] #group the input groups into collections of frames which will be averaged into the output groups #Only group as many input groups as you need to make the requested number of output groups frames_per_group = readpatts[self.readpatt.lower()]['nframe'] frames_to_skip = readpatts[self.readpatt.lower()]['nskip'] total_frames = (frames_per_group * self.ngroup) + (frames_to_skip * (self.ngroup - 1)) total_exposure_time = total_frames * readpatts['rapid']['tgroup'] #if the total number of frames needed to make the requested integration don't exist #throw an error if total_frames > ingroups: print( "WARNING: Requested regrouping requires more groups than are contained in the input file {}. Quitting." .format(self.infile)) sys.exit(0) #starting and ending indexes of the input groups to be averaged to create the new groups groupstart_index = np.arange(0, total_frames, frames_per_group + frames_to_skip) groupend_index = groupstart_index + frames_per_group #prepare for averaging newdata = np.zeros((integrations, self.ngroup, ydim, xdim)) newerrs = np.zeros((integrations, self.ngroup, ydim, xdim)) newgroupdq = np.zeros((integrations, self.ngroup, ydim, xdim)) #average the input data to create the output data for integration in xrange(integrations): newgp = 0 for gs, ge in izip(groupstart_index, groupend_index): #average the data frames print("Averaging groups {} to {}.".format(gs, ge - 1)) newframe = self.avg_frame(data[integration, gs:ge, :, :]) newdata[integration, newgp, :, :] = newframe #reduce the error in the new frames by sqrt(number of frames) for now newerrs[integration, newgp, :, :] = err[integration, gs + frames_per_group / 2, :, :] / np.sqrt(frames_per_group) #just keep the DQ array from the final frame of the group newgroupdq[integration, newgp, :, :] = groupdq[integration, ge, :, :] #increment the counter for the new group number newgp += 1 #place the updated data back into the model instance exposure.data = newdata exposure.err = newerrs exposure.groupdq = newgroupdq #update header exposure.meta.exposure.ngroups = self.ngroup exposure.meta.exposure.nframes = frames_per_group exposure.meta.exposure.groupgap = frames_to_skip exposure.meta.exposure.group_time = readpatts[ self.readpatt.lower()]['tgroup'] exposure.meta.exposure.exptime = total_exposure_time exposure.meta.exposure.readpatt = self.readpatt.upper() #write the regrouped file out to a new file exposure.save(self.outfile)