def run_early_pipeline(self, filename, odd_even_rows=False, odd_even_columns=True, use_side_ref_pixels=True, group_scale=False): """Runs the early steps of the jwst pipeline (dq_init, saturation, superbias, refpix) on uncalibrated files and outputs the result. Parameters ---------- filename : str File on which to run the pipeline steps odd_even_rows : bool Option to treat odd and even rows separately during refpix step odd_even_columns : bools Option to treat odd and even columns separately during refpix step use_side_ref_pixels : bool Option to perform the side refpix correction during refpix step group_scale : bool Option to rescale pixel values to correct for instances where on-board frame averaging did not result in the proper values Returns ------- output_filename : str The full path to the calibrated file """ output_filename = filename.replace('_uncal', '').replace( '.fits', '_superbias_refpix.fits') if not os.path.isfile(output_filename): # Run the group_scale and dq_init steps on the input file if group_scale: model = GroupScaleStep.call(filename) model = DQInitStep.call(model) else: model = DQInitStep.call(filename) # Run the saturation and superbias steps model = SaturationStep.call(model) model = SuperBiasStep.call(model) # Run the refpix step and save the output model = RefPixStep.call(model, odd_even_rows=odd_even_rows, odd_even_columns=odd_even_columns, use_side_ref_pixels=use_side_ref_pixels) model.save(output_filename) set_permissions(output_filename) else: logging.info('\t{} already exists'.format(output_filename)) return output_filename
def run_jump_step(self, infile, threshold, run_steps): '''Function to run the jump detection step.''' # output file name out = infile[:-5] + "_jump_CRthresh" + str(threshold) + ".fits" # if run_steps, run all steps prior to jump if run_steps: m = DQInitStep.call(infile) m = SaturationStep.call(m) m = SuperBiasStep.call(m) m_ref = RefPixStep.call(m, config_file='refpix.cfg') m_lin = LinearityStep.call(m) m_dark = DarkCurrentStep.call(m) # if threshold is given, use that rejection threshold if threshold is not None: m = JumpStep.call(m, output_file=out, rejection_threshold=threshold) else: m = JumpStep.call(m, output_file=out) # else, run only jump_step else: if threshold is not None: m = JumpStep.call(infile, output_file=out, rejection_threshold=threshold) else: m = JumpStep.call(infile, output_file=out) return m
def run(tup): #pipeline steps #dq_init - yes #saturation - no #ipc - yes/no #superbias - no #bias_drift - yes #linearity - no #dark - no #jump - no #ramp fit - no file = tup[0] cfile = tup[1] outfile = tup[2] maskfile = tup[3] ipckernel = tup[4] ipconly = tup[5] if ipconly == False: input = DQInitStep.call(file, override_mask=maskfile) input = BiasDriftStep.call(input, config_file='bias_drift.cfg') if ipckernel != None: input = IPCStep.call(input, override_ipc=ipckernel) else: input = IPCStep.call(file, override_ipc=ipckernel) input.save(outfile)
def run_sat_step(self, file, maskfile, satfile, outfile, run_dq=True): #run the saturation pipeline step #if the dq_init step needs to be run, do that first if run_dq: if maskfile is not None: m = DQInitStep.call(file, override_mask=maskfile) else: m = DQInitStep.call(file) m = SaturationStep.call(m, override_saturation=satfile, output_file=outfile) else: #run the saturation step m = SaturationStep.call(file, override_saturation=satfile, output_file=outfile) return m
def pipeline_check(reference_filename, filename): """Run the dq_init step using the provided file, to check that the pipeline runs successfully Parameters ---------- reference_filename : str Name of the bad pixel mask reference filename filename : str Name of the fit exposure on which to test the reference file """ model = DQInitStep.call(filename, override_mask=reference_filename)
def run_cal(file): #get list of bad pixel masks that can be used #bpmlist = glob.glob('/grp/jwst/wit/nircam/cv3_reffile_conversion/bpm/*ssbspmask.fits') bpmlist = glob.glob( '/ifs/jwst/wit/witserv/data7/nrc/reference_files/SSB/CV3/cv3_reffile_conversion/bpm/*DMSorient.fits' ) satlist = glob.glob( '/ifs/jwst/wit/witserv/data7/nrc/reference_files/SSB/CV3/cv3_reffile_conversion/welldepth/*ADU*DMSorient.fits' ) #run the DQ init and bias_drift steps on the file detstr = fits.getval(file, 'DETECTOR') if 'LONG' in detstr: detstr = detstr[0:4] + '5' #find the appropriate bad pixel mask and saturation file for the detector bpm = [s for s in bpmlist if detstr in s] if len(bpm) > 1: print( "More than one bad pixel mask found. Need a better list of possibilities." ) stophere welldepth = [s for s in satlist if detstr in s] if len(welldepth) > 1: print( "More than one saturation map found. Need a better list of possibilities." ) stophere #run the dq_init step dqfile = file[0:-5] + '_dq_init.fits' dqstep = DQInitStep.call(file, config_file='dq_init.cfg', override_mask=bpm[0], output_file=dqfile) #run saturation flagging satfile = dqfile[0:-5] + '_saturation.fits' satstep = SaturationStep.call(dqstep, config_file='saturation.cfg', override_saturation=welldepth[0], output_file=satfile) #run the reference pixel subtraction step using the group 0 subtraction and re-addition #which is what the Build 4 pipeline used to use. refcor = refpix_g0() refcor.infile = satfile refcor.outfile = None refcor.run()
def test_fullstep(xstart, ystart, xsize, ysize, nints, ngroups, instrument, exp_type, detector): """Test that the full step runs""" # create raw input data for step dm_ramp = make_rawramp(instrument, nints, ngroups, ysize, xsize, ystart, xstart, exp_type) dm_ramp.meta.instrument.name = instrument dm_ramp.meta.instrument.detector = detector dm_ramp.meta.observation.date = '2016-06-01' dm_ramp.meta.observation.time = '00:00:00' # run the full step outfile = DQInitStep.call(dm_ramp) # test that a pixeldq frame has been initlialized if instrument == "FGS": assert outfile.dq.ndim == 2 else: assert outfile.pixeldq.ndim == 2 # a 2-d pixeldq frame exists
def linearize_dark(self, darkobj): """Beginning with the input dark current ramp, run the dq_init, saturation, superbias subtraction, refpix and nonlin pipeline steps in order to produce a linearized version of the ramp. This will be used when combining the dark ramp with the simulated signal ramp. Parameters ----------- darkobj : obj Instance of read_fits class containing dark current data and info Returns ------- linDarkObj : obj Modified read_fits instance with linearized dark current data """ from jwst.dq_init import DQInitStep from jwst.saturation import SaturationStep from jwst.superbias import SuperBiasStep from jwst.refpix import RefPixStep from jwst.linearity import LinearityStep # First we need to place the read_fits object into a RampModel instance if self.runStep['linearized_darkfile']: subfile = self.params['Reffiles']['linearized_darkfile'] else: subfile = self.params['Reffiles']['dark'] dark = darkobj.insert_into_datamodel(subfile) print('Creating a linearized version of the dark current input ramp') print('using JWST calibration pipeline.') # Run the DQ_Init step if self.runStep['badpixmask']: linDark = DQInitStep.call(dark, config_file=self.params['newRamp']['dq_configfile'], override_mask=self.params['Reffiles']['badpixmask']) else: linDark = DQInitStep.call(dark, config_file=self.params['newRamp']['dq_configfile']) # If the saturation map is provided, use it. If not, default to whatever is in CRDS if self.runStep['saturation_lin_limit']: linDark = SaturationStep.call(linDark, config_file=self.params['newRamp']['sat_configfile'], override_saturation=self.params['Reffiles']['saturation']) else: linDark = SaturationStep.call(linDark, config_file=self.params['newRamp']['sat_configfile']) # If the superbias file is provided, use it. If not, default to whatever is in CRDS if self.runStep['superbias']: linDark = SuperBiasStep.call(linDark, config_file=self.params['newRamp']['superbias_configfile'], override_superbias=self.params['Reffiles']['superbias']) else: linDark = SuperBiasStep.call(linDark, config_file=self.params['newRamp']['superbias_configfile']) # Reference pixel correction linDark = RefPixStep.call(linDark, config_file=self.params['newRamp']['refpix_configfile']) # Save a copy of the superbias- and reference pixel-subtracted # dark. This will be used later to add these effects back in # after the synthetic signals have been added and the non-linearity # effects are added back in when using the PROPER combine method. sbAndRefpixEffects = dark.data - linDark.data # Linearity correction - save the output so that you won't need to # re-run the pipeline when using the same dark current file in the # future. Use the linearity coefficient file if provided base_name = self.params['Output']['file'].split('/')[-1] linearoutfile = base_name[0:-5] + '_linearized_dark_current_ramp.fits' linearoutfile = os.path.join(self.params['Output']['directory'], linearoutfile) if self.runStep['linearity']: linDark = LinearityStep.call(linDark, config_file=self.params['newRamp']['linear_configfile'], override_linearity=self.params['Reffiles']['linearity'], output_file=linearoutfile) else: linDark = LinearityStep.call(linDark, config_file=self.params['newRamp']['linear_configfile'], output_file=linearoutfile) print(("Linearized dark (output directly from pipeline saved as {}" .format(linearoutfile))) # Now we need to put the data back into a read_fits object linDarkobj = read_fits.Read_fits() linDarkobj.model = linDark linDarkobj.rampmodel_to_obj() linDarkobj.sbAndRefpix = sbAndRefpixEffects return linDarkobj
def test_dq_init_step(fits_input): """Make sure the DQInitStep runs without error.""" fname = fits_input['PRIMARY'].header['filename'].replace('.fits', '_dqinitstep.fits') DQInitStep.call(fits_input, output_file=fname, save_results=True)
def test_dq_init_step(fits_input): """Make sure the DQInitStep runs without error.""" DQInitStep.call(fits_input, save_results=True)
def run(self): '''Main function.''' # Read in list of files to use to calculate saturation. files = self.read_listfile(self.listfile) # Check that input files have the same readpattern, array size, etc. detector, xshape, yshape = self.input_consistency_check(files) det_short = str(detector[3:]) if 'long' in det_short: det_short = det_short[0] + '5' # Set up arrays to hold output data. sat_arr, grp_arr, new_dq, big_dq, tmp_mask = \ self.init_arrays(files, xshape, yshape) # Create mask to isolate reference pixels tmp_mask[:, 4:-4, 4:-4] = True # Set reference pixel values so they don't get used in calculations. sat_arr[~tmp_mask] = 1e6 grp_arr[~tmp_mask] = np.nan big_dq[~tmp_mask] = np.uint8(0) # Loop over files. for file, n in zip(files, np.arange(0, len(files))): # Run dq, superbias, refpix steps and save outputs (optional) bpm = DQInitStep.call(file) sup = SuperBiasStep.call(bpm) ref = RefPixStep.call(sup, odd_even_rows=False) if self.intermediates: sup.save(file[:-5] + '_dq_superbias.fits') ref.save(file[:-5] + '_dq_superbias_refpix.fits') # Grab the name of the mask file used from the headers bpmcalfile = ref.meta.ref_file.mask.name if 'crds' in bpmcalfile: jwst = bpmcalfile.find('jwst') bpmfile = '/grp/crds/cache/references/jwst/' + bpmcalfile[jwst:] else: bpmfile = bpmcalfile # Get data values mask = fits.getdata(bpmfile, 1) data = ref.data xstart = ref.meta.subarray.xstart ystart = ref.meta.subarray.ystart xend = ref.meta.subarray.xsize yend = ref.meta.subarray.ysize # Loop over pixel combinations for given array (no ref pixels). for i, j in itertools.product(np.arange(xstart + 3, xend - 4), np.arange(ystart + 3, yend - 4)): # Set values for bad pixels so they don't get used in calculations. if mask[j, i] == np.uint8(1): sat_arr[n, j, i] = np.nan grp_arr[n, j, i] = np.nan big_dq[n, j, i] = np.uint8(1) else: # Get signal values for each pixel. signal = data[0, :, j, i].astype('float32') # Get linear region early in the ramp with method 1 signal_range = self.get_lin_regime(signal, "method1") # If signal_range can't be determined, must be weird ramp if np.shape(signal_range)[0] == 0: # Try again to get linear region with different method signal_range = self.get_lin_regime(signal, "method2") # If that still doesn't work, quit. if np.shape(signal_range)[0] == 0: sat_arr[n, j, i] = np.nan grp_arr[n, j, i] = np.nan big_dq[n, j, i] = np.uint8(2) else: hard_sat, first_sat_grp = \ self.get_saturation(signal, signal_range) # Save all values. sat_arr[n, j, i] = hard_sat.astype('float32') grp_arr[n, j, i] = first_sat_grp.astype('float32') big_dq[n, j, i] = np.uint8(3) # Otherwise, must be good ramp? elif np.shape(signal_range)[0] > 0: # Get hard saturation. hard_sat, first_sat_grp = \ self.get_saturation(signal, signal_range) # Save all saturation values. sat_arr[n, j, i] = hard_sat.astype('float32') grp_arr[n, j, i] = first_sat_grp.astype('float32') # Catch errors else: print('ERROR for pixel ', i, j) sys.exit(0) # If each file gave same pixel DQs, make sure output DQ matches locs = np.all(big_dq == big_dq[0, :], axis=0) new_dq[locs] = big_dq[0][locs] # Get statistics for saturation values, averaging over exposures. avg, err = self.calc_stats(sat_arr, big_dq) # Save saturation values for each exposure to a FITS file newhdu = fits.PrimaryHDU(sat_arr) newhdulist = fits.HDUList([newhdu]) grpname = detector + '_' \ + str(DET_NUM[detector]) \ + '_WellDepthADU_' \ + str(datetime.date.today()) \ + '_beforeAverage.fits' newhdulist.writeto(grpname, overwrite=True) # Save first saturated groups array to a FITS file newhdu = fits.PrimaryHDU(grp_arr) newhdulist = fits.HDUList([newhdu]) grpname = detector + '_' \ + str(DET_NUM[detector]) \ + '_WellDepthADU_' \ + str(datetime.date.today()) \ + '_firstSatGroup.fits' newhdulist.writeto(grpname, overwrite=True) # Save averaged saturation values to a FITS file. outfilename = detector + '_' \ + str(DET_NUM[detector]) \ + '_WellDepthADU_' \ + str(datetime.date.today()) \ + '_ssbsaturation_DMSorient.fits' outfile = self.save_reffile(avg, err, new_dq, files, outfilename) # Save saturation errors, since the pipeline doesn't currently use them errhdu = fits.PrimaryHDU(err) errhdulist = fits.HDUList([errhdu]) errname = detector + '_' \ + str(DET_NUM[detector]) \ + '_WellDepthADU_' \ + str(datetime.date.today()) \ + '_saturationErrors.fits' errhdulist.writeto(errname, overwrite=True) z = fits.open(outfile) z0 = z[0] z1 = z[1] z2 = z[2] z3 = z[3] # Add other things that the pipeline doesn't use, but are helpful. z0.header['S_DQINIT'] = ('COMPLETE', 'Data Quality Initialization') z0.header['S_SUPERB'] = ('COMPLETE', 'Superbias Subtraction') z0.header['S_REFPIX'] = ('COMPLETE', 'Reference Pixel Correction') newhdu = fits.HDUList([z0, z1, z2, z3]) newhdu.writeto(outfile, overwrite=True)
def run_dq_step(self, file, maskfile, outfile): #run the dq_init pipeline step m = DQInitStep.call(file, override_mask=maskfile, output_file=outfile) return m
det = det.lower()[3:] if 'long' in det: det = det[0] + '5' refdict = reffiles[det] #m = calwebb_detector1.Detector1Pipeline(config_file='calwebb_detector1.cfg') #m.saturation.override_saturation = satdir+refdict['saturation'] #m.superbias.override_superbias = sbdir+refdict['superbias'] #m.refpix.odd_even_rows = False #m.group_scale.skip = True #m.ipc.skip = True #m.rscd.skip = True #m.lastframe.skip = True #m.dark_current.skip = True #m.persistence.skip = True #m.jump.skip = True #m.ramp_fit.skip = False #bug in pipeline means this must #be run. #m.linearity.override_linearity = lindir+refdict['linearity'] #m.output_file = outfile #m.run(file) m = DQInitStep.call(file,config_file = 'dq_init.cfg') m = SaturationStep.call(m,config_file = 'saturation.cfg') m = SuperBiasStep.call(m,config_file = 'superbias.cfg') m = RefPixStep.call(m,config_file='refpix.cfg') m = LinearityStep.call(m,config_file='linearity.cfg',output_file = outfile)