def extract(self, global_sky, sobjs_obj): """ Main method to extract spectra from the ScienceImage Args: global_sky (ndarray): Sky estimate sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): List of SpecObj that have been found and traced """ # This holds the objects, pre-extraction self.sobjs_obj = sobjs_obj if self.par['scienceimage']['extraction']['skip_optimal']: # Boxcar only with global sky subtraction msgs.info("Skipping optimal extraction") # This will hold the extracted objects self.sobjs = self.sobjs_obj.copy() # Only extract positive objects self.sobjs.purge_neg() # Quick loop over the objects for iobj in range(self.sobjs.nobj): sobj = self.sobjs[iobj] plate_scale = self.get_platescale(sobj) ''' if self.pypeline == 'Echelle': # Grab the positive object only thisobj = (self.sobjs.ech_orderindx == iord) & ( self.sobjs.ech_objid > 0) # pos indices of objects for this slit sobj = self.sobjs[np.where(thisobj)[0][0]] # Plate scale plate_scale = self.spectrograph.order_platescale(sobj.ECH_ORDER, binning=self.binning)[0] else: sobj = self.sobjs[iord] plate_scale = self.spectrograph.detector[self.det - 1]['platescale'] ''' # True = Good, False = Bad for inmask thismask = (self.slitmask == iobj) # pixels for this slit inmask = (self.sciImg.mask == 0) & thismask # Do it extract.extract_boxcar(self.sciImg.image, self.sciImg.ivar, inmask, self.caliBrate.mswave, global_sky, self.sciImg.rn2img, self.par['scienceimage']['extraction']['boxcar_radius']/plate_scale, sobj) # Fill up extra bits and pieces self.objmodel = np.zeros_like(self.sciImg.image) self.ivarmodel = np.copy(self.sciImg.ivar) self.outmask = self.sciImg.mask self.skymodel = global_sky.copy() else: # Local sky subtraction and optimal extraction. self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs = \ self.local_skysub_extract(self.caliBrate.mswave, global_sky, self.sobjs_obj, model_noise=(not self.ir_redux), show_profile=self.reduce_show, show=self.reduce_show) # Return return self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs
def extract(self, global_sky, sobjs_obj): """ Main method to extract spectra from the ScienceImage Args: global_sky (ndarray): Sky estimate sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): List of SpecObj that have been found and traced """ # This holds the objects, pre-extraction self.sobjs_obj = sobjs_obj if self.par['reduce']['extraction'][ 'skip_optimal']: # Boxcar only with global sky subtraction msgs.info("Skipping optimal extraction") # This will hold the extracted objects self.sobjs = self.sobjs_obj.copy() # Only extract positive objects self.sobjs.purge_neg() # Quick loop over the objects for iobj in range(self.sobjs.nobj): sobj = self.sobjs[iobj] plate_scale = self.get_platescale(sobj) # True = Good, False = Bad for inmask thismask = self.slitmask == sobj.SLITID # pixels for this slit inmask = (self.sciImg.fullmask == 0) & thismask # Do it extract.extract_boxcar( self.sciImg.image, self.sciImg.ivar, inmask, self.waveimg, global_sky, self.sciImg.rn2img, self.par['reduce']['extraction']['boxcar_radius'] / plate_scale, sobj) # Fill up extra bits and pieces self.objmodel = np.zeros_like(self.sciImg.image) self.ivarmodel = np.copy(self.sciImg.ivar) self.outmask = self.sciImg.fullmask self.skymodel = global_sky.copy() else: # Local sky subtraction and optimal extraction. self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs = \ self.local_skysub_extract(global_sky, self.sobjs_obj, model_noise=(not self.ir_redux), show_profile=self.reduce_show, show=self.reduce_show) # Return return self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs
def ech_objfind(image, ivar, ordermask, slit_left, slit_righ, inmask=None, plate_scale=0.2, npca=2, ncoeff=5, min_snr=0.0, nabove_min_snr=0, pca_percentile=20.0, snr_pca=3.0, box_radius=2.0, show_peaks=False, show_fits=False, show_trace=False): if inmask is None: inmask = (ordermask > 0) frameshape = image.shape nspec = frameshape[0] norders = slit_left.shape[1] if isinstance(plate_scale, (float, int)): plate_scale_ord = np.full( norders, plate_scale) # 0.12 binned by 3 spatially for HIRES elif isinstance(plate_scale, (np.ndarray, list, tuple)): if len(plate_scale) == norders: plate_scale_ord = plate_scale elif len(plate_scale) == 1: plate_scale_ord = np.full(norders, plate_scale[0]) else: msgs.error( 'Invalid size for plate_scale. It must either have one element or norders elements' ) else: msgs.error('Invalid type for plate scale') specmid = nspec // 2 slit_width = slit_righ - slit_left spec_vec = np.arange(nspec) slit_spec_pos = nspec / 2.0 slit_spat_pos = np.zeros((norders, 2)) for iord in range(norders): slit_spat_pos[iord, :] = (np.interp(slit_spec_pos, spec_vec, slit_left[:, iord]), np.interp(slit_spec_pos, spec_vec, slit_righ[:, iord])) # Loop over orders and find objects sobjs = specobjs.SpecObjs() show_peaks = True show_fits = True # ToDo replace orderindx with the true order number here? Maybe not. Clean up slitid and orderindx! for iord in range(norders): msgs.info('Finding objects on slit # {:d}'.format(iord + 1)) thismask = ordermask == (iord + 1) inmask_iord = inmask & thismask specobj_dict = { 'setup': 'HIRES', 'slitid': iord + 1, 'scidx': 0, 'det': 1, 'objtype': 'science' } sobjs_slit, skymask[thismask], objmask[thismask], proc_list = \ extract.objfind(image, thismask, slit_left[:,iord], slit_righ[:,iord], inmask=inmask_iord,show_peaks=show_peaks, show_fits=show_fits, show_trace=False, specobj_dict = specobj_dict)#, sig_thresh = 3.0) # ToDO make the specobjs _set_item_ work with expressions like this spec[:].orderindx = iord for spec in sobjs_slit: spec.ech_orderindx = iord sobjs.add_sobj(sobjs_slit) nfound = len(sobjs) # Compute the FOF linking length based on the instrument place scale and matching length FOFSEP = 1.0" FOFSEP = 1.0 # separation of FOF algorithm in arcseconds FOF_frac = FOFSEP / (np.median(slit_width) * np.median(plate_scale_ord)) # Feige: made the code also works for only one object found in one order # Run the FOF. We use fake coordinaes fracpos = sobjs.spat_fracpos ra_fake = fracpos / 1000.0 # Divide all angles by 1000 to make geometry euclidian dec_fake = 0.0 * fracpos if nfound > 1: (ingroup, multgroup, firstgroup, nextgroup) = spheregroup(ra_fake, dec_fake, FOF_frac / 1000.0) group = ingroup.copy() uni_group, uni_ind = np.unique(group, return_index=True) nobj = len(uni_group) msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects') elif nfound == 1: group = np.zeros(1, dtype='int') uni_group, uni_ind = np.unique(group, return_index=True) nobj = len(group) msgs.warn('Only find one object no FOF matching is needed') gfrac = np.zeros(nfound) for jj in range(nobj): this_group = group == uni_group[jj] gfrac[this_group] = np.median(fracpos[this_group]) uni_frac = gfrac[uni_ind] sobjs_align = sobjs.copy() # Now fill in the missing objects and their traces for iobj in range(nobj): for iord in range(norders): # Is there an object on this order that grouped into the current group in question? on_slit = (group == uni_group[iobj]) & (sobjs_align.ech_orderindx == iord) if not np.any(on_slit): # Add this to the sobjs_align, and assign required tags thisobj = specobjs.SpecObj(frameshape, slit_spat_pos[iord, :], slit_spec_pos, det=sobjs_align[0].det, setup=sobjs_align[0].setup, slitid=(iord + 1), scidx=sobjs_align[0].scidx, objtype=sobjs_align[0].objtype) thisobj.ech_orderindx = iord thisobj.spat_fracpos = uni_frac[iobj] thisobj.trace_spat = slit_left[:, iord] + slit_width[:, iord] * uni_frac[ iobj] # new trace thisobj.trace_spec = spec_vec thisobj.spat_pixpos = thisobj.trace_spat[specmid] thisobj.set_idx() # Use the real detections of this objects for the FWHM this_group = group == uni_group[iobj] # Assign to the fwhm of the nearest detected order imin = np.argmin( np.abs(sobjs_align[this_group].ech_orderindx - iord)) thisobj.fwhm = sobjs_align[imin].fwhm thisobj.maskwidth = sobjs_align[imin].maskwidth thisobj.ech_fracpos = uni_frac[iobj] thisobj.ech_group = uni_group[iobj] thisobj.ech_usepca = True sobjs_align.add_sobj(thisobj) group = np.append(group, uni_group[iobj]) gfrac = np.append(gfrac, uni_frac[iobj]) else: # ToDo fix specobjs to get rid of these crappy loops! for spec in sobjs_align[on_slit]: spec.ech_fracpos = uni_frac[iobj] spec.ech_group = uni_group[iobj] spec.ech_usepca = False # Some code to ensure that the objects are sorted in the sobjs_align by fractional position on the order and by order # respectively sobjs_sort = specobjs.SpecObjs() for iobj in range(nobj): this_group = group == uni_group[iobj] this_sobj = sobjs_align[this_group] sobjs_sort.add_sobj(this_sobj[np.argsort(this_sobj.ech_orderindx)]) # Loop over the objects and perform a quick and dirty extraction to assess S/N. varimg = utils.calc_ivar(ivar) flux_box = np.zeros((nspec, norders, nobj)) ivar_box = np.zeros((nspec, norders, nobj)) mask_box = np.zeros((nspec, norders, nobj)) SNR_arr = np.zeros((norders, nobj)) for iobj in range(nobj): for iord in range(norders): indx = (sobjs_sort.ech_group == uni_group[iobj]) & (sobjs_sort.ech_orderindx == iord) spec = sobjs_sort[indx] thismask = ordermask == (iord + 1) inmask_iord = inmask & thismask box_rad_pix = box_radius / plate_scale_ord[iord] flux_tmp = extract.extract_boxcar(image * inmask_iord, spec.trace_spat, box_rad_pix, ycen=spec.trace_spec) var_tmp = extract.extract_boxcar(varimg * inmask_iord, spec.trace_spat, box_rad_pix, ycen=spec.trace_spec) ivar_tmp = utils.calc_ivar(var_tmp) pixtot = extract.extract_boxcar(ivar * 0 + 1.0, spec.trace_spat, box_rad_pix, ycen=spec.trace_spec) mask_tmp = (extract.extract_boxcar(ivar * inmask_iord == 0.0, spec.trace_spat, box_rad_pix, ycen=spec.trace_spec) != pixtot) flux_box[:, iord, iobj] = flux_tmp * mask_tmp ivar_box[:, iord, iobj] = np.fmax(ivar_tmp * mask_tmp, 0.0) mask_box[:, iord, iobj] = mask_tmp (mean, med_sn, stddev) = sigma_clipped_stats( flux_box[mask_tmp, iord, iobj] * np.sqrt(ivar_box[mask_tmp, iord, iobj]), sigma_lower=5.0, sigma_upper=5.0) SNR_arr[iord, iobj] = med_sn # Purge objects with low SNR and that don't show up in enough orders keep_obj = np.zeros(nobj, dtype=bool) sobjs_trim = specobjs.SpecObjs() uni_group_trim = np.array([], dtype=int) uni_frac_trim = np.array([], dtype=float) for iobj in range(nobj): if (np.sum(SNR_arr[:, iobj] > min_snr) >= nabove_min_snr): keep_obj[iobj] = True ikeep = sobjs_sort.ech_group == uni_group[iobj] sobjs_trim.add_sobj(sobjs_sort[ikeep]) uni_group_trim = np.append(uni_group_trim, uni_group[iobj]) uni_frac_trim = np.append(uni_frac_trim, uni_frac[iobj]) else: msgs.info( 'Purging object #{:d}'.format(iobj) + ' which does not satisfy min_snr > {:5.2f}'.format(min_snr) + ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) + ' orders') nobj_trim = np.sum(keep_obj) if nobj_trim == 0: return specobjs.SpecObjs() SNR_arr_trim = SNR_arr[:, keep_obj] # Do a final loop over objects and make the final decision about which orders will be interpolated/extrapolated by the PCA for iobj in range(nobj_trim): SNR_now = SNR_arr_trim[:, iobj] indx = (sobjs_trim.ech_group == uni_group_trim[iobj]) # PCA interp/extrap if: # (SNR is below pca_percentile of the total SNRs) AND (SNR < snr_pca) # OR # (if this order was not originally traced by the object finding, see above) usepca = ((SNR_now < np.percentile(SNR_now, pca_percentile)) & (SNR_now < snr_pca)) | sobjs_trim[indx].ech_usepca # ToDo fix specobjs to get rid of these crappy loops! for iord, spec in enumerate(sobjs_trim[indx]): spec.ech_usepca = usepca[iord] if usepca[iord]: msgs.info('Using PCA to predict trace for object #{:d}'.format( iobj) + ' on order #{:d}'.format(iord)) sobjs_final = sobjs_trim.copy() # Loop over the objects one by one and adjust/predict the traces npoly_cen = 3 pca_fits = np.zeros((nspec, norders, nobj_trim)) for iobj in range(nobj_trim): igroup = sobjs_final.ech_group == uni_group_trim[iobj] # PCA predict the masked orders which were not traced pca_fits[:, :, iobj] = pca_trace((sobjs_final[igroup].trace_spat).T, usepca=None, npca=npca, npoly_cen=npoly_cen) # usepca = sobjs_final[igroup].ech_usepca, # Perform iterative flux weighted centroiding using new PCA predictions xinit_fweight = pca_fits[:, :, iobj].copy() inmask_now = inmask & (ordermask > 0) xfit_fweight = extract.iter_tracefit(image, xinit_fweight, ncoeff, inmask=inmask_now, show_fits=show_fits) # Perform iterative Gaussian weighted centroiding xinit_gweight = xfit_fweight.copy() xfit_gweight = extract.iter_tracefit(image, xinit_gweight, ncoeff, inmask=inmask_now, gweight=True, show_fits=show_fits) # Assign the new traces for iord, spec in enumerate(sobjs_final[igroup]): spec.trace_spat = xfit_gweight[:, iord] spec.spat_pixpos = spec.trace_spat[specmid] # Set the IDs sobjs_final.set_idx() if show_trace: viewer, ch = ginga.show_image(objminsky * (ordermask > 0)) for iobj in range(nobj_trim): for iord in range(norders): ginga.show_trace(viewer, ch, pca_fits[:, iord, iobj], str(uni_frac[iobj]), color='yellow') for spec in sobjs_trim: color = 'green' if spec.ech_usepca else 'magenta' ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color) #for spec in sobjs_final: # color = 'red' if spec.ech_usepca else 'green' # ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color) return sobjs_final
def ech_objfind(image, ivar, ordermask, slit_left, slit_righ,inmask=None,plate_scale=0.2,npca=2,ncoeff = 5,min_snr=0.0,nabove_min_snr=0, pca_percentile=20.0,snr_pca=3.0,box_radius=2.0,show_peaks=False,show_fits=False,show_trace=False): if inmask is None: inmask = (ordermask > 0) frameshape = image.shape nspec = frameshape[0] norders = slit_left.shape[1] if isinstance(plate_scale,(float, int)): plate_scale_ord = np.full(norders, plate_scale) # 0.12 binned by 3 spatially for HIRES elif isinstance(plate_scale,(np.ndarray, list, tuple)): if len(plate_scale) == norders: plate_scale_ord = plate_scale elif len(plate_scale) == 1: plate_scale_ord = np.full(norders, plate_scale[0]) else: msgs.error('Invalid size for plate_scale. It must either have one element or norders elements') else: msgs.error('Invalid type for plate scale') specmid = nspec // 2 slit_width = slit_righ - slit_left spec_vec = np.arange(nspec) slit_spec_pos = nspec/2.0 slit_spat_pos = np.zeros((norders, 2)) for iord in range(norders): slit_spat_pos[iord, :] = (np.interp(slit_spec_pos, spec_vec, slit_left[:,iord]), np.interp(slit_spec_pos, spec_vec, slit_righ[:,iord])) # Loop over orders and find objects sobjs = specobjs.SpecObjs() show_peaks=True show_fits=True # ToDo replace orderindx with the true order number here? Maybe not. Clean up slitid and orderindx! for iord in range(norders): msgs.info('Finding objects on slit # {:d}'.format(iord + 1)) thismask = ordermask == (iord + 1) inmask_iord = inmask & thismask specobj_dict = {'setup': 'HIRES', 'slitid': iord + 1, 'scidx': 0,'det': 1, 'objtype': 'science'} sobjs_slit, skymask[thismask], objmask[thismask], proc_list = \ extract.objfind(image, thismask, slit_left[:,iord], slit_righ[:,iord], inmask=inmask_iord,show_peaks=show_peaks, show_fits=show_fits, show_trace=False, specobj_dict = specobj_dict)#, sig_thresh = 3.0) # ToDO make the specobjs _set_item_ work with expressions like this spec[:].orderindx = iord for spec in sobjs_slit: spec.ech_orderindx = iord sobjs.add_sobj(sobjs_slit) nfound = len(sobjs) # Compute the FOF linking length based on the instrument place scale and matching length FOFSEP = 1.0" FOFSEP = 1.0 # separation of FOF algorithm in arcseconds FOF_frac = FOFSEP/(np.median(slit_width)*np.median(plate_scale_ord)) # Feige: made the code also works for only one object found in one order # Run the FOF. We use fake coordinaes fracpos = sobjs.spat_fracpos ra_fake = fracpos/1000.0 # Divide all angles by 1000 to make geometry euclidian dec_fake = 0.0*fracpos if nfound>1: (ingroup, multgroup, firstgroup, nextgroup) = spheregroup(ra_fake, dec_fake, FOF_frac/1000.0) group = ingroup.copy() uni_group, uni_ind = np.unique(group, return_index=True) nobj = len(uni_group) msgs.info('FOF matching found {:d}'.format(nobj) + ' unique objects') elif nfound==1: group = np.zeros(1,dtype='int') uni_group, uni_ind = np.unique(group, return_index=True) nobj = len(group) msgs.warn('Only find one object no FOF matching is needed') gfrac = np.zeros(nfound) for jj in range(nobj): this_group = group == uni_group[jj] gfrac[this_group] = np.median(fracpos[this_group]) uni_frac = gfrac[uni_ind] sobjs_align = sobjs.copy() # Now fill in the missing objects and their traces for iobj in range(nobj): for iord in range(norders): # Is there an object on this order that grouped into the current group in question? on_slit = (group == uni_group[iobj]) & (sobjs_align.ech_orderindx == iord) if not np.any(on_slit): # Add this to the sobjs_align, and assign required tags thisobj = specobjs.SpecObj(frameshape, slit_spat_pos[iord,:], slit_spec_pos, det = sobjs_align[0].det, setup = sobjs_align[0].setup, slitid = (iord + 1), scidx = sobjs_align[0].scidx, objtype=sobjs_align[0].objtype) thisobj.ech_orderindx = iord thisobj.spat_fracpos = uni_frac[iobj] thisobj.trace_spat = slit_left[:,iord] + slit_width[:,iord]*uni_frac[iobj] # new trace thisobj.trace_spec = spec_vec thisobj.spat_pixpos = thisobj.trace_spat[specmid] thisobj.set_idx() # Use the real detections of this objects for the FWHM this_group = group == uni_group[iobj] # Assign to the fwhm of the nearest detected order imin = np.argmin(np.abs(sobjs_align[this_group].ech_orderindx - iord)) thisobj.fwhm = sobjs_align[imin].fwhm thisobj.maskwidth = sobjs_align[imin].maskwidth thisobj.ech_fracpos = uni_frac[iobj] thisobj.ech_group = uni_group[iobj] thisobj.ech_usepca = True sobjs_align.add_sobj(thisobj) group = np.append(group, uni_group[iobj]) gfrac = np.append(gfrac, uni_frac[iobj]) else: # ToDo fix specobjs to get rid of these crappy loops! for spec in sobjs_align[on_slit]: spec.ech_fracpos = uni_frac[iobj] spec.ech_group = uni_group[iobj] spec.ech_usepca = False # Some code to ensure that the objects are sorted in the sobjs_align by fractional position on the order and by order # respectively sobjs_sort = specobjs.SpecObjs() for iobj in range(nobj): this_group = group == uni_group[iobj] this_sobj = sobjs_align[this_group] sobjs_sort.add_sobj(this_sobj[np.argsort(this_sobj.ech_orderindx)]) # Loop over the objects and perform a quick and dirty extraction to assess S/N. varimg = utils.calc_ivar(ivar) flux_box = np.zeros((nspec, norders, nobj)) ivar_box = np.zeros((nspec, norders, nobj)) mask_box = np.zeros((nspec, norders, nobj)) SNR_arr = np.zeros((norders, nobj)) for iobj in range(nobj): for iord in range(norders): indx = (sobjs_sort.ech_group == uni_group[iobj]) & (sobjs_sort.ech_orderindx == iord) spec = sobjs_sort[indx] thismask = ordermask == (iord + 1) inmask_iord = inmask & thismask box_rad_pix = box_radius/plate_scale_ord[iord] flux_tmp = extract.extract_boxcar(image*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) var_tmp = extract.extract_boxcar(varimg*inmask_iord, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) ivar_tmp = utils.calc_ivar(var_tmp) pixtot = extract.extract_boxcar(ivar*0 + 1.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) mask_tmp = (extract.extract_boxcar(ivar*inmask_iord == 0.0, spec.trace_spat,box_rad_pix, ycen = spec.trace_spec) != pixtot) flux_box[:,iord,iobj] = flux_tmp*mask_tmp ivar_box[:,iord,iobj] = np.fmax(ivar_tmp*mask_tmp,0.0) mask_box[:,iord,iobj] = mask_tmp (mean, med_sn, stddev) = sigma_clipped_stats(flux_box[mask_tmp,iord,iobj]*np.sqrt(ivar_box[mask_tmp,iord,iobj]), sigma_lower=5.0,sigma_upper=5.0) SNR_arr[iord,iobj] = med_sn # Purge objects with low SNR and that don't show up in enough orders keep_obj = np.zeros(nobj,dtype=bool) sobjs_trim = specobjs.SpecObjs() uni_group_trim = np.array([],dtype=int) uni_frac_trim = np.array([],dtype=float) for iobj in range(nobj): if (np.sum(SNR_arr[:,iobj] > min_snr) >= nabove_min_snr): keep_obj[iobj] = True ikeep = sobjs_sort.ech_group == uni_group[iobj] sobjs_trim.add_sobj(sobjs_sort[ikeep]) uni_group_trim = np.append(uni_group_trim, uni_group[iobj]) uni_frac_trim = np.append(uni_frac_trim, uni_frac[iobj]) else: msgs.info('Purging object #{:d}'.format(iobj) + ' which does not satisfy min_snr > {:5.2f}'.format(min_snr) + ' on at least nabove_min_snr >= {:d}'.format(nabove_min_snr) + ' orders') nobj_trim = np.sum(keep_obj) if nobj_trim == 0: return specobjs.SpecObjs() SNR_arr_trim = SNR_arr[:,keep_obj] # Do a final loop over objects and make the final decision about which orders will be interpolated/extrapolated by the PCA for iobj in range(nobj_trim): SNR_now = SNR_arr_trim[:,iobj] indx = (sobjs_trim.ech_group == uni_group_trim[iobj]) # PCA interp/extrap if: # (SNR is below pca_percentile of the total SNRs) AND (SNR < snr_pca) # OR # (if this order was not originally traced by the object finding, see above) usepca = ((SNR_now < np.percentile(SNR_now, pca_percentile)) & (SNR_now < snr_pca)) | sobjs_trim[indx].ech_usepca # ToDo fix specobjs to get rid of these crappy loops! for iord, spec in enumerate(sobjs_trim[indx]): spec.ech_usepca = usepca[iord] if usepca[iord]: msgs.info('Using PCA to predict trace for object #{:d}'.format(iobj) + ' on order #{:d}'.format(iord)) sobjs_final = sobjs_trim.copy() # Loop over the objects one by one and adjust/predict the traces npoly_cen = 3 pca_fits = np.zeros((nspec, norders, nobj_trim)) for iobj in range(nobj_trim): igroup = sobjs_final.ech_group == uni_group_trim[iobj] # PCA predict the masked orders which were not traced pca_fits[:,:,iobj] = pca_trace((sobjs_final[igroup].trace_spat).T, usepca = None, npca = npca, npoly_cen = npoly_cen) # usepca = sobjs_final[igroup].ech_usepca, # Perform iterative flux weighted centroiding using new PCA predictions xinit_fweight = pca_fits[:,:,iobj].copy() inmask_now = inmask & (ordermask > 0) xfit_fweight = extract.iter_tracefit(image, xinit_fweight, ncoeff, inmask = inmask_now, show_fits=show_fits) # Perform iterative Gaussian weighted centroiding xinit_gweight = xfit_fweight.copy() xfit_gweight = extract.iter_tracefit(image, xinit_gweight, ncoeff, inmask = inmask_now, gweight=True,show_fits=show_fits) # Assign the new traces for iord, spec in enumerate(sobjs_final[igroup]): spec.trace_spat = xfit_gweight[:,iord] spec.spat_pixpos = spec.trace_spat[specmid] # Set the IDs sobjs_final.set_idx() if show_trace: viewer, ch = ginga.show_image(objminsky*(ordermask > 0)) for iobj in range(nobj_trim): for iord in range(norders): ginga.show_trace(viewer, ch, pca_fits[:,iord, iobj], str(uni_frac[iobj]), color='yellow') for spec in sobjs_trim: color = 'green' if spec.ech_usepca else 'magenta' ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color) #for spec in sobjs_final: # color = 'red' if spec.ech_usepca else 'green' # ginga.show_trace(viewer, ch, spec.trace_spat, spec.idx, color=color) return sobjs_final
def trace_tilts_work(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, gauss=False, tilts_guess=None, fwhm=4.0, spat_order=3, maxdev_tracefit=0.02,sigrej_trace=3.0, max_badpix_frac=0.30, tcrude_maxerr=1.0, tcrude_maxshift=3.0, tcrude_maxshift0=3.0,tcrude_nave=5, show_tracefits=False): """ Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs. Parameters ---------- arcimg: ndarray, float (nspec, nspat) Image of arc or sky that will be used for tracing tilts. lines_spec: ndarray, float (nlines,) Array containing arc line centroids along the center of the slit for each arc line that will be traced. This is in pixels in image coordinates. lines_spat: ndarray, float (nlines,) Array contianing the spatial position of the center of the slit along which the arc was extracted. This is is in pixels in image coordinates. thismask: ndarray, boolean (nspec, nsapt) Boolean mask image specifying the pixels which lie on the slit/order to search for objects on. The convention is: True = on the slit/order, False = off the slit/order. This must be the same size as the arcimg. Optional Parameters ------------------- inmask: float ndarray, default = None Input mask image. gauss: bool, default = False If true the code will trace the arc lines usign Gaussian weighted centroiding (trace_gweight) instead of the default, which is flux weighted centroiding (trace_fweight) tilts_guess: float ndarray, default = None A guess for the tilts used for running this tilt tracing in an iterative manner. If the tilts_guess is not None, it should be an array containing the tilts from a previous iteration which will be used as a crutch for the tracing of the tilts. The default is None, which is how this code is run on a first iteration. In that case the crutces are determined via trace_crude, and then the flux (or Gaussian) weighted tracing is performed. fwhm: float Expected FWHM of the arc lines. spat_order: int, default = None Order of the legendre polynomial that will be fit to the tilts. maxdev_tracefit: float, default = 0.2 Maximum absolute deviation for the arc tilt fits during iterative trace fitting expressed in units of the fwhm. sigrej_trace: float, default = 3.0 From each line we compute a median absolute deviation of the trace from the polynomial fit. We then analyze the distribution of maximxum absolute deviations (MADs) for all the lines, and reject sigrej_trace outliers from that distribution. max_badpix_frac: float, default = 0.20 Maximum fraction of total pixels that can be masked by the trace_gweight algorithm (because the residuals are too large) to still be usable for tilt fitting. tcrude_maxerr: float, default = 1.0 maxerr parameter for trace crude tcrude_maxshift: float, default = 3.0 maxshift parameter for trace crude tcrude_maxshift0: float, default = 3.0 maxshift0 parameter for trace crude tcrude_nave: int, default = 5 Trace crude is used to determine the initial arc line tilts, which are then iteratively fit. Trace crude can optionally boxcar smooth the image (along the spatial direction of the image, i.e. roughly along the arc line tilts) to improve the tracing. show_tracefits: bool, default = False If true the fits will be shown to each arc line trace by iterative_fitting """ nspec, nspat = arcimg.shape spec_vec = np.arange(nspec) spat_vec = np.arange(nspat) slit_widp2 = int(np.ceil((np.sum(thismask,axis=1)).max()) + 2) slit_width_even = np.fmin(slit_widp2 if slit_widp2 % 2 == 0 else slit_widp2 + 1, nspat-1) trace_int = slit_width_even//2 maxdev = maxdev_tracefit*fwhm # maxdev is fraction of fwhm do_crude = True if tilts_guess is None else False nlines = len(lines_spec) nsub = 2 * trace_int + 1 lines_spat_int = np.round(lines_spat).astype(int) spat_min = np.zeros(nlines, dtype=int) spat_max = np.zeros(nlines, dtype=int) if inmask is None: inmask = thismask # The sub arrays hold the sub-imaged tilts #tilts_sub = np.zeros((nsub, nlines)) # Thee trace_fweight (or gweighed) tilts #tilts_sub_err = np.zeros((nsub, nlines)) # errors on the tilts (only used for masking but not weighted fitting) #tilts_sub_mask = np.zeros((nsub, nlines), dtype=bool) # mask indicating where the tilts are actually covered in the sub image, i.e. where thismask != False #tilts_sub_spec = np.outer(np.ones(nsub), lines_spec) # spectral coordinate of each tilt, which is the arc line spectral pixel location #tilts_sub_spec_fit = np.zeros((nsub, nlines)) # spectral position determined by evaluating the tilt fit at the center of the slit #tilts_sub_dspat = np.zeros_like(tilts_sub_spat) # delta position of the tilt in pixels, i.e. difference between slitcen and the spatial coordinate above # PCA fitting uses the sub-imaged fits, so we need them tilts_sub_fit = np.zeros((nsub, nlines)) # legendre polynomial fits to the tilt traces tilts_sub_spat = np.outer(np.arange(nsub), np.ones(nlines)) # spatial coordinate along each tilt tilts = np.zeros((nspat, nlines)) # The trace_fweight (or gweighed) tilts tilts_fit = np.zeros((nspat, nlines)) # legendre polynomial fits to the tilt traces tilts_err = np.zeros((nspat, nlines)) # errors on the tilts (only used for masking but not weighted fitting) tilts_mask = np.zeros((nspat, nlines), dtype=bool) # This is true if the pixel was in a region traced tilts_spec = np.zeros((nspat, nlines)) # spectral position determined by evaluating the tilt fit at the center of the slit tilts_spat = np.outer(np.arange(nspat), np.ones(nlines)) # spatial coordinate along each tilt tilts_dspat= np.zeros_like(tilts_spat) # delta position of the tilt in pixels, i.e. difference between slitcen and the spatial coordinate above # Transposed image and masks for traceing arcimg_trans = (arcimg * thismask).T inmask_trans = (inmask * thismask).T.astype(float) thismask_trans = thismask.T # 1) Trace the tilts from a guess. If no guess is provided from a previous iteration use trace_crude for iline in range(nlines): # We sub-image each tilt using a symmetric window about the (integer) spatial location of each line, # which is the slitcen evaluated at the line spectral position. spat_min[iline] = lines_spat_int[iline] - trace_int # spat_min is the minium location of the sub-image spat_max[iline] = lines_spat_int[iline] + trace_int + 1 # spat_max is the maximum location of the sub-image min_spat = np.fmax(spat_min[iline], 0) # These min_spat and max_spat are to prevent leaving the image max_spat = np.fmin(spat_max[iline], nspat - 1) sub_img = arcimg_trans[min_spat:max_spat, :] sub_inmask = inmask_trans[min_spat:max_spat,:] sub_thismask = thismask_trans[min_spat:max_spat,:] if do_crude: # First time tracing, do a trace crude tilts_guess_now, err_now = trace_slits.trace_crude_init( sub_img, np.array([lines_spec[iline]]), (sub_img.shape[0] - 1) // 2, invvar=sub_inmask, radius=fwhm, nave=tcrude_nave, maxshift0=tcrude_maxshift0, maxshift=tcrude_maxshift, maxerr=tcrude_maxerr) tilts_guess_now=tilts_guess_now.flatten() else: # A guess was provided, use that as the crutch, but determine if it is a full trace or a sub-trace if tilts_guess.shape[0] == nspat: # This is full image size tilt trace, sub-window it tilts_guess_now = tilts_guess[min_spat:max_spat, iline] else: # If it is a sub-trace, deal with falling off the image if spat_min[iline] < 0: tilts_guess_now = tilts_guess[-spat_min[iline]:,iline] elif spat_max[iline] > (nspat-1): tilts_guess_now = tilts_guess[:-(spat_max[iline] - nspat + 1),iline] else: tilts_guess_now = tilts_guess[:, iline] # Boxcar extract the thismask to have a mask indicating whether a tilt is defined along the spatial direction tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_guess_now, fwhm/2.0) > 0.99*fwhm) # If more than 80% of the pixels are masked, then don't mask at all. This happens when the traces leave the good # part of the slit. If we proceed with everything masked the iter_tracefit fitting will crash. if (np.sum(tilts_sub_mask_box) < 0.8*nsub): tilts_sub_mask_box = np.ones_like(tilts_sub_mask_box) # Do iterative flux weighted tracing and polynomial fitting to refine these traces. This must also be done in a loop # since the sub image is different for every aperture, i.e. each aperature has its own image tilts_sub_fit_out, tilts_sub_out, tilts_sub_err_out, tset_out = extract.iter_tracefit( sub_img, tilts_guess_now, spat_order, inmask=sub_inmask, trc_inmask = tilts_sub_mask_box, fwhm=fwhm, maxdev=maxdev, niter=6, idx=str(iline),show_fits=show_tracefits, xmin=0.0,xmax=float(nsub-1)) tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_sub_fit_out, fwhm/2.0) > 0.99*fwhm) if gauss: # If gauss is set, do a Gaussian refinement to the flux weighted tracing if (np.sum(tilts_sub_mask_box) < 0.8 * nsub): tilts_sub_mask_box = np.ones_like(tilts_sub_mask_box) tilts_sub_fit_gw, tilts_sub_gw, tilts_sub_err_gw, tset_gw = extract.iter_tracefit( sub_img, tilts_sub_fit_out, spat_order, inmask=sub_inmask, trc_inmask = tilts_sub_mask_box, fwhm=fwhm, maxdev=maxdev, niter=3, idx=str(iline),show_fits=show_tracefits, xmin=0.0, xmax=float(nsub-1)) tilts_sub_fit_out = tilts_sub_fit_gw tilts_sub_out = tilts_sub_gw tilts_sub_err_out = tilts_sub_err_gw tilts_sub_mask_box = (extract.extract_boxcar(sub_thismask, tilts_sub_fit_out, fwhm/2.0) > 0.99*fwhm) # Pack the results into arrays, accounting for possibly falling off the image # Deal with possibly falling off the chip # This is the same for all cases since it is the evaluation of a fit tilts_sub_fit[:, iline] = tset_out.xy(tilts_sub_spat[:, iline].reshape(1, nsub))[1] # We use the tset_out.xy to evaluate the trace across the whole sub-image even for pixels off the slit. This # guarantees that the fits are always evaluated across the whole sub-image which is required for the PCA step. if spat_min[iline] < 0: #tilts_sub[ -spat_min[iline]:,iline] = tilts_sub_out.flatten() #tilts_sub_err[ -spat_min[iline]:,iline] = tilts_sub_err_out.flatten() #tilts_sub_mask[ -spat_min[iline]:,iline] = tilts_sub_mask_box.flatten() #tilts_sub_dspat[-spat_min[iline]:,iline] = tilts_dspat[min_spat:max_spat,iline] tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() # tilts_sub[ -spat_min[iline]:,iline] tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ -spat_min[iline]:,iline] tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ -spat_min[iline]:,iline] tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[-spat_min[iline]:,iline] elif spat_max[iline] > (nspat - 1): #tilts_sub[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_out.flatten() #tilts_sub_err[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_err_out.flatten() #tilts_sub_mask[ :-(spat_max[iline] - nspat + 1),iline] = tilts_sub_mask_box.flatten() #tilts_sub_dspat[:-(spat_max[iline] - nspat + 1),iline] = tilts_dspat[min_spat:max_spat,iline] tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() # tilts_sub[ :-(spat_max[iline] - nspat + 1),iline] tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ :-(spat_max[iline] - nspat + 1),iline] tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ :-(spat_max[iline] - nspat + 1),iline] tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[:-(spat_max[iline] - nspat + 1),iline] else: #tilts_sub[ :,iline] = tilts_sub_out.flatten() #tilts_sub_err[ :,iline] = tilts_sub_err_out.flatten() #tilts_sub_mask[ :,iline] = tilts_sub_mask_box.flatten() #tilts_sub_dspat[:,iline] = tilts_dspat[min_spat:max_spat,iline] tilts[ min_spat:max_spat,iline] = tilts_sub_out.flatten() #tilts_sub[ :,iline] tilts_fit[ min_spat:max_spat,iline] = tilts_sub_fit[ :,iline] tilts_err[ min_spat:max_spat,iline] = tilts_sub_err_out.flatten() # tilts_sub_err[ :,iline] tilts_mask[min_spat:max_spat,iline] = tilts_sub_mask_box.flatten() #tilts_sub_mask[:,iline] # Now use these fits to the traces to get a more robust value of the tilt spectral position and spatial # offset from the trace than what was initially determined from the 1d arc line spectrum. This is technically # where the slit_cen cross the tilts_fit, but it is a tricky since they are parameterized by different # independent variables (slit_cen uses spec_vec, whereas tilts_fit uses spat_vec). This code uses # a trick of interpolating the slit_cen onto the arc pixels. If we find it fails, then replace with something # simpler that simply iterates to zero on where the two cross. # ToDO Fix this later with an iterative thing that also updates spatial reference position of the tilt #imask = tilts_mask[:, iline] #slit_cen_spat_ontilt = np.interp(tilts_fit[imask,iline],spec_vec, slit_cen) #delta_spat = slit_cen_spat_ontilt - tilts_spat[imask,iline] # Grab the monotonic indices #ediff = np.ediff1d(delta_spat,to_begin=0.0) #mono_ind = np.sign(ediff) == np.sign(np.median(ediff)) #zero_cross_spat = (scipy.interpolate.interp1d(delta_spat[mono_ind],(tilts_spat[imask,iline])[mono_ind],assume_sorted=False))(0.0) #spec_fit_now = np.interp(zero_cross_spat,tilts_spat[imask,iline], tilts_fit[imask,iline]) #spat_fit_now = np.interp(spec_fit_now,spec_vec, slit_cen) #tilts_spec[:, iline] = np.full(nspat, spec_fit_now) tilts_dspat[:, iline] = (spat_vec - lines_spat[iline]) imask = tilts_mask[:,iline] try: spec_fit_now = np.interp(0.0, tilts_dspat[imask, iline], tilts_fit[imask, iline]) except ValueError: spec_fit_now = lines_spec[iline] tilts_spec[:,iline] = np.full(nspat, spec_fit_now) # Create the mask for the bad lines. Define the error on the bad tilt as being the bad_mask = (tilts_err > 900) | (tilts_mask == False) on_slit = np.sum(tilts_mask,0) on_slit_bad = np.sum((tilts_mask & (tilts_err > 900)),0) bad_frac = on_slit_bad/on_slit dev_mean, dev_median, dev_sig = sigma_clipped_stats(np.abs(tilts - tilts_fit), mask=bad_mask, sigma=4.0,axis=0) good_line = np.any(bad_mask == False,axis=0) # Is it masked everywhere? # Median absolute deviation for each line quantifies the goodnes of tracing dev_mad = 1.4826*dev_median # Now reject outliers from this distribution dev_mad_dist_median = np.median(dev_mad[good_line]) dev_mad_dist_mad = 1.4826*np.median(np.abs(dev_mad[good_line] - dev_mad_dist_median)) # i.e. this is like the sigma # Reject lines that are sigrej trace outliers mad_rej = ((dev_mad - dev_mad_dist_median)/dev_mad_dist_mad) < sigrej_trace # Do we need this dev_mad < maxdev step? use_tilt = (mad_rej) & (bad_frac < max_badpix_frac) & good_line & (dev_mad < maxdev) nuse = np.sum(use_tilt) msgs.info('Number of usable arc lines for tilts: {:d}/{:d}'.format(nuse,nlines)) tilts_mad = np.outer(np.ones(nspat),dev_mad) if (nuse < 0.05*nlines): msgs.warn('This slit/order would have too many rejected lines.' + msgs.newline() + 'We should be rejecting nuse ={:d} out of nlines = {:d} total lines'.format(nuse,nlines) + msgs.newline() + 'We are will proceed without rejecting anything but something is probably very wrong with this slit/order.') use_tilt = np.ones(nlines,dtype=bool) nuse = nlines # Tighten it up with Gaussian weighted centroiding trc_tilt_dict = dict(nspec = nspec, nspat = nspat, nsub = nsub, nlines = nlines, nuse = nuse, spat_min=spat_min, spat_max=spat_max, do_crude=do_crude, fwhm = fwhm, use_tilt=use_tilt, tilts_sub_spat=tilts_sub_spat, tilts_sub_fit=tilts_sub_fit, tilts_mad = tilts_mad,tilts_spec=tilts_spec, tilts_spat=tilts_spat, tilts_dspat=tilts_dspat, tilts=tilts, tilts_fit=tilts_fit, tilts_err=tilts_err, tilts_mask=tilts_mask) return trc_tilt_dict
for islit in range(0, nslits): ginga.show_trace(viewer, ch, specobjs_pos[islit].trace_spat, trc_name=specobjs_pos[islit].idx, color='blue') ginga.show_trace(viewer, ch, specobjs_neg[islit].trace_spat, trc_name=specobjs_neg[islit].idx, color='orange') # Boxcar extraction from pypeit.core.extract import extract_boxcar outmask = (slitpix > 0) * ((edgmask == False) & (mask_AB == True)) box_rad = 8.0 # ToDo -- Check for indexes in islit [0-based or 1-based?] for islit in range(1, nslits + 1): # Positive trace flux = extract_boxcar(image, specobjs_pos[islit - 1].trace_spat, box_rad) mvarimg = 1.0 / (ivar_AB + (ivar_AB == 0)) mvar_box = extract.extract_boxcar(mvarimg, specobjs_pos[islit - 1].trace_spat, box_rad, ycen=specobjs_pos[islit - 1].trace_spec) pixtot = extract_boxcar(0 * mvarimg + 1.0, specobjs_pos[islit - 1].trace_spat, box_rad, ycen=specobjs_pos[islit - 1].trace_spec) mask_box = (extract_boxcar(~outmask, specobjs_pos[islit - 1].trace_spat, box_rad, ycen=specobjs_pos[islit - 1].trace_spec) != pixtot) box_denom = extract_boxcar(waveimg > 0.0, specobjs_pos[islit - 1].trace_spat, box_rad, ycen=specobjs_pos[islit - 1].trace_spec) wave = extract_boxcar(waveimg, specobjs_pos[islit - 1].trace_spat, box_rad, ycen=specobjs_pos[islit - 1].trace_spec) / ( box_denom + (box_denom == 0.0))
def extract(self, global_sky, model_noise=None, spat_pix=None): """ Main method to extract spectra from the ScienceImage Args: global_sky (`numpy.ndarray`_): Sky estimate sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): List of SpecObj that have been found and traced model_noise (bool): If True, construct and iteratively update a model inverse variance image using :func:`~pypeit.core.procimg.variance_model`. If False, a variance model will not be created and instead the input sciivar will always be taken to be the inverse variance. See :func:`~pypeit.core.skysub.local_skysub_extract` for more info. Default is None, which is to say pypeit will use the bkg_redux attribute to decide whether or not to model the noise. spat_pix (`numpy.ndarray`_): Image containing the spatial coordinates. This option is used for 2d coadds where the spat_pix image is generated as a coadd of images. For normal reductions spat_pix is not required as it is trivially created from the image itself. Default is None. """ # This holds the objects, pre-extraction # JFH Commenting this out. Not sure why we need this. It overwrites the previous stuff from the init #self.sobjs_obj = sobjs_obj if self.par['reduce']['extraction']['skip_optimal']: # Boxcar only with global sky subtraction msgs.info("Skipping optimal extraction") # This will hold the extracted objects self.sobjs = self.sobjs_obj.copy() # Quick loop over the objects for iobj in range(self.sobjs.nobj): sobj = self.sobjs[iobj] # True = Good, False = Bad for inmask thismask = self.slitmask == sobj.SLITID # pixels for this slit inmask = self.sciImg.select_flag(invert=True) & thismask # Do it extract.extract_boxcar(self.sciImg.image, self.sciImg.ivar, inmask, self.waveimg, global_sky, sobj, base_var=self.sciImg.base_var, count_scale=self.sciImg.img_scale, noise_floor=self.sciImg.noise_floor) # Fill up extra bits and pieces self.objmodel = np.zeros_like(self.sciImg.image) self.ivarmodel = np.copy(self.sciImg.ivar) # NOTE: fullmask is a bit mask, make sure it's treated as such, not # a boolean (e.g., bad pixel) mask. self.outmask = self.sciImg.fullmask self.skymodel = global_sky.copy() else: # Local sky subtraction and optimal extraction. model_noise_1 = not self.bkg_redux if model_noise is None else model_noise self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs = \ self.local_skysub_extract(global_sky, self.sobjs_obj, model_noise=model_noise_1, spat_pix = spat_pix, show_profile=self.extract_show, show=self.extract_show) # Remove sobjs that don't have both OPT_COUNTS and BOX_COUNTS remove_idx = [] for idx, sobj in enumerate(self.sobjs): # Find them if sobj.OPT_COUNTS is None and sobj.BOX_COUNTS is None: remove_idx.append(idx) msgs.warn(f'Removing object at pixel {sobj.SPAT_PIXPOS} because ' f'both optimal and boxcar extraction could not be performed') elif sobj.OPT_COUNTS is None: msgs.warn(f'Optimal extraction could not be performed for object at pixel {sobj.SPAT_PIXPOS}') # Remove them if len(remove_idx) > 0: self.sobjs.remove_sobj(remove_idx) # Return return self.skymodel, self.objmodel, self.ivarmodel, self.outmask, self.sobjs