def parse_nist_tbl(tbl, parse_dict): '''Parses a NIST table using various criteria Parameters ---------- tbl : Table Read previously from NIST ASCII file parse_dict : dict Dict of parsing criteria. Read from load_parse_dict Returns ------- tbl : Table Rows meeting the criteria ''' # Parse gdI = tbl['RelInt'] >= parse_dict['min_intensity'] try: gdA = tbl['Aki'] >= parse_dict['min_Aki'] except TypeError: debugger.set_trace() gdw = tbl['wave'] >= parse_dict['min_wave'] # Combine allgd = gdI & gdA & gdw # Return return tbl[allgd]
def MasterPinhole(self, fitsdict, det, msbias): """ Generate Master pinhole frame for a given detector Parameters ---------- fitsdict : dict Contains relevant information from fits header files det : int Index of the detector Returns ------- boolean : bool Should other ScienceExposure classes be updated? """ dnum = settings.get_dnum(det) # If the master pinhole is already made, use it if self._mspinhole[det - 1] is not None: msgs.info("An identical master pinhole frame already exists") return False if settings.argflag['reduce']['slitcen']['useframe'] in ['trace', 'pinhole']: try: mspinhole = armasters.get_master_frame(self, "pinhole") except IOError: msgs.info("Preparing a master pinhole frame with {0:s}".format( settings.argflag['reduce']['slitcen']['useframe'])) ind = self._idx_cent # Load the pinhole frames frames = arload.load_frames(fitsdict, ind, det, frametype='pinhole', msbias=msbias, # self._msbias[det - 1], trim=settings.argflag['reduce']['trim']) if settings.argflag['pinhole']['combine']['match'] > 0.0: sframes = arsort.match_frames(frames, settings.argflag['pinhole']['combine']['match'], frametype='pinhole', satlevel=settings.spect[dnum]['saturation'] * settings.spect['det'][det - 1]['nonlinear']) subframes = np.zeros((frames.shape[0], frames.shape[1], len(sframes))) numarr = np.array([]) for i in range(len(sframes)): numarr = np.append(numarr, sframes[i].shape[2]) mspinhole = arcomb.comb_frames(sframes[i], det, 'pinhole') subframes[:, :, i] = mspinhole.copy() del sframes # Combine all sub-frames mspinhole = arcomb.comb_frames(subframes, det, 'pinhole', weights=numarr) del subframes else: mspinhole = arcomb.comb_frames(frames, det, 'pinhole') del frames else: # It must be the name of a file the user wishes to load mspinhole_name = settings.argflag['run']['directory']['master'] + '/' + \ settings.argflag['reduce']['slitcen']['useframe'] mspinhole, head = armasters.load_master(mspinhole_name, frametype=None) debugger.set_trace() # NEED TO LOAD EXTRAS AS ABOVE # Set and then delete the Master Trace frame self.SetMasterFrame(mspinhole, "pinhole", det) #armasters.save_masters(self, det, mftype='pinhole') del mspinhole return True
def SetBaseName(self, fitsdict): """ Set the base name that is used for all outputs Parameters ---------- fitsdict : dict Contains relevant information from fits header files """ import datetime # scidx = self._idx_sci[0] tbname = None try: if "T" in fitsdict['date'][scidx]: tbname = fitsdict['date'][scidx] except IndexError: debugger.set_trace() else: if tbname is None: if settings.spect["fits"]["timeunit"] == "mjd": # Not ideal, but convert MJD into a date+time timval = Time(fitsdict['time'][scidx] / 24.0, scale='tt', format='mjd') tbname = timval.isot else: # Really not ideal... just append date and time tbname = fitsdict['date'][scidx] + "T" + str( fitsdict['time'][scidx]) ''' if "T" in fitsdict['date'][scidx]: tbname = fitsdict['date'][scidx] else: # Not ideal, but convert MJD into a date+time debugger.set_trace() # CANNOT GET HERE timval = Time(fitsdict['time'][scidx]/24.0, scale='tt', format='mjd') tbname = timval.isot ''' tval = Time(tbname, format='isot') #'%Y-%m-%dT%H:%M:%S.%f') dtime = datetime.datetime.strptime(tval.value, '%Y-%m-%dT%H:%M:%S.%f') #except ValueError: #tval = datetime.datetime.strptime(tbname, '%Y-%m-%dT%H:%M:%S') self._inst_name = settings.spect['mosaic']['camera'] self._target_name = fitsdict['target'][self._idx_sci[0]].replace( " ", "") self._basename = self._target_name+'_'+self._inst_name+'_'+ \ datetime.datetime.strftime(dtime, '%Y%b%dT') + \ tbname.split("T")[1].replace(':','') # Save Time object self._time = tval return
def one_d_coadd(spectra, smask, weights, debug=False, **kwargs): """ Performs a weighted coadd of the spectra in 1D. Parameters ---------- spectra : XSpectrum1D weights : ndarray Should be masked Returns ------- coadd : XSpectrum1D """ from linetools.spectra.xspectrum1d import XSpectrum1D # Setup fluxes, sigs, wave = unpack_spec(spectra) variances = (sigs > 0.) * sigs**2 inv_variances = (sigs > 0.) / (sigs**2 + (sigs == 0.)) # Sum weights mweights = np.ma.array(weights, mask=smask) sum_weights = np.ma.sum(mweights, axis=0).filled(0.) # Coadd new_flux = np.ma.sum(mweights * fluxes, axis=0) / (sum_weights + (sum_weights == 0.0).astype(int)) var = (variances != 0.0).astype(float) / ( inv_variances + (inv_variances == 0.0).astype(float)) new_var = np.ma.sum((mweights**2.) * var, axis=0) / ( (sum_weights + (sum_weights == 0.0).astype(int))**2.) # Replace masked values with zeros new_flux = new_flux.filled(0.) new_sig = np.sqrt(new_var.filled(0.)) # New obj (for passing around) new_spec = XSpectrum1D.from_tuple((wave, new_flux, new_sig), masking='none') if False: debugger.plot1d(wave, new_flux, new_sig) debugger.set_trace() # Return return new_spec
def clean_cr(spectra, smask, n_grow_mask=1, cr_nsig=7., nrej_low=5., debug=False, cr_everyn=6, cr_bsigma=5., cr_two_alg='bspline', **kwargs): """ Sigma-clips the flux arrays to remove obvious CR Parameters ---------- spectra : smask : ndarray Data mask n_grow_mask : int, optional Number of pixels to grow the initial mask by on each side. Defaults to 1 pixel cr_nsig : float, optional Number of sigma for rejection for CRs Returns ------- """ # Init fluxes, sigs, wave = unpack_spec(spectra) npix = wave.size if spectra.nspec == 2: msgs.info("Only 2 exposures. Using custom procedure") if cr_two_alg == 'diff': diff = fluxes[0, :] - fluxes[1, :] # Robust mean/median med, mad = arutils.robust_meanstd(diff) # Spec0? cr0 = (diff - med) > cr_nsig * mad if n_grow_mask > 0: cr0 = grow_mask(cr0, n_grow=n_grow_mask) msgs.info("Rejecting {:d} CRs in exposure 0".format(np.sum(cr0))) smask[0, cr0] = True if debug: debugger.plot1d(wave, fluxes[0, :], xtwo=wave[cr0], ytwo=fluxes[0, cr0], mtwo='s') # Spec1? cr1 = (-1 * (diff - med)) > cr_nsig * mad if n_grow_mask > 0: cr1 = grow_mask(cr1, n_grow=n_grow_mask) smask[1, cr1] = True if debug: debugger.plot1d(wave, fluxes[1, :], xtwo=wave[cr1], ytwo=fluxes[1, cr1], mtwo='s') msgs.info("Rejecting {:d} CRs in exposure 1".format(np.sum(cr1))) elif cr_two_alg == 'ratio': diff = fluxes[0, :] - fluxes[1, :] rtio = fluxes[0, :] / fluxes[1, :] # Robust mean/median rmed, rmad = arutils.robust_meanstd(rtio) dmed, dmad = arutils.robust_meanstd(diff) # Spec0? med, mad = arutils.robust_meanstd(diff) cr0 = ((rtio - rmed) > cr_nsig * rmad) & ( (diff - dmed) > cr_nsig * dmad) if n_grow_mask > 0: cr0 = grow_mask(cr0, n_grow=n_grow_mask) msgs.info("Rejecting {:d} CRs in exposure 0".format(np.sum(cr0))) smask[0, cr0] = True if debug: debugger.plot1d(wave, fluxes[0, :], xtwo=wave[cr0], ytwo=fluxes[0, cr0], mtwo='s') # Spec1? cr1 = (-1 * (rtio - rmed) > cr_nsig * rmad) & (-1 * (diff - dmed) > cr_nsig * dmad) if n_grow_mask > 0: cr1 = grow_mask(cr1, n_grow=n_grow_mask) smask[1, cr1] = True if debug: debugger.plot1d(wave, fluxes[1, :], xtwo=wave[cr1], ytwo=fluxes[1, cr1], mtwo='s') msgs.info("Rejecting {:d} CRs in exposure 1".format(np.sum(cr1))) elif cr_two_alg == 'bspline': # Package Data for convenience waves = spectra.data['wave'].flatten() # Packed 0,1 flux = fluxes.flatten() sig = sigs.flatten() # gd = np.where(sig > 0.)[0] srt = np.argsort(waves[gd]) idx = gd[srt] # The following may eliminate bright, narrow emission lines mask, spl = arutils.robust_polyfit(waves[idx], flux[idx], 3, function='bspline', weights=1. / sig[gd][srt], sigma=cr_bsigma, maxone=False, everyn=cr_everyn) # Reject CR (with grow) spec_fit = arutils.func_val(spl, wave, 'bspline') for ii in range(2): diff = fluxes[ii, :] - spec_fit cr = (diff > cr_nsig * sigs[ii, :]) & (sigs[ii, :] > 0.) if debug: debugger.plot1d(spectra.data['wave'][0, :], spectra.data['flux'][ii, :], spec_fit, xtwo=spectra.data['wave'][0, cr], ytwo=spectra.data['flux'][ii, cr], mtwo='s') if n_grow_mask > 0: cr = grow_mask(cr, n_grow=n_grow_mask) # Mask smask[ii, cr] = True msgs.info("Cleaning {:d} CRs in exposure {:d}".format( np.sum(cr), ii)) # Reject Low if nrej_low > 0.: for ii in range(2): diff = spec_fit - fluxes[ii, :] rej_low = (diff > nrej_low * sigs[ii, :]) & (sigs[ii, :] > 0.) if False: debugger.plot1d(spectra.data['wave'][0, :], spectra.data['flux'][ii, :], spec_fit, xtwo=spectra.data['wave'][0, rej_low], ytwo=spectra.data['flux'][ii, rej_low], mtwo='s') msgs.info( "Removing {:d} low values in exposure {:d}".format( np.sum(rej_low), ii)) smask[ii, rej_low] = True else: msgs.error("Bad algorithm for combining two spectra!") # Check if debug: gd0 = ~smask[0, :] gd1 = ~smask[1, :] debugger.plot1d(wave[gd0], fluxes[0, gd0], xtwo=wave[gd1], ytwo=fluxes[1, gd1]) debugger.set_trace() else: # Median of the masked array -- Best for 3 or more spectra mflux = np.ma.array(fluxes, mask=smask) refflux = np.ma.median(mflux, axis=0) diff = fluxes - refflux.filled(0.) # Loop on spectra for ispec in range(spectra.nspec): # Generate ivar gds = (~smask[ispec, :]) & (sigs[ispec, :] > 0.) ivar = np.zeros(npix) ivar[gds] = 1. / sigs[ispec, gds]**2 # chi2 = diff[ispec]**2 * ivar badchi = (ivar > 0.0) & (chi2 > cr_nsig**2) nbad = np.sum(badchi) if nbad > 0: # Grow? if n_grow_mask > 0: badchi = grow_mask(badchi, n_grow=n_grow_mask) # Mask smask[ispec, badchi] = True msgs.info("Rejecting {:d} CRs in exposure {:d}".format( nbad, ispec)) # Return return
def reduce_multislit(slf, tilts, sciframe, bpix, datasec_img, scidx, fitsdict, det, mswave, mspixelflatnrm=None, standard=False, slitprof=None, debug=False): """ Run standard extraction steps on an echelle frame Parameters ---------- sciframe : image Bias subtracted image (using arload.load_frame) bpix : ndarray Bad pixel mask scidx : int Index of the frame fitsdict : dict Contains relevant information from fits header files det : int Detector index standard : bool, optional Standard star frame? """ # dnum = settings.get_dnum(det) sciframe, rawvarframe, crmask = reduce_prepare( slf, sciframe, bpix, datasec_img, scidx, fitsdict, det, mspixelflatnrm=mspixelflatnrm, slitprof=slitprof) # Save sciframe slf._sciframe[det - 1] = sciframe.copy() ############### # Estimate Sky Background if settings.argflag['reduce']['skysub']['perform']: # Perform an iterative background/science extraction if debug: debugger.set_trace() # JXP says THIS MAY NOT WORK AS EXPECTED msgs.warn("Reading background from 2D image on disk") datfil = settings.argflag['run']['directory'][ 'science'] + '/spec2d_{:s}.fits'.format( slf._basename.replace(":", "_")) hdu = fits.open(datfil) bgframe = hdu[1].data - hdu[2].data else: msgs.info("First estimate of the sky background") bgframe = bg_subtraction(slf, tilts, det, sciframe, rawvarframe, bpix, crmask) modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) else: modelvarframe = rawvarframe.copy() bgframe = np.zeros_like(sciframe) if not standard: # Need to save slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe ############### # Find objects and estimate their traces scitrace = artrace.trace_objects_in_slits(slf, det, sciframe - bgframe, modelvarframe, crmask, bgreg=20, doqa=False, standard=standard) if scitrace is None: msgs.info("Not performing extraction for science frame" + msgs.newline() + fitsdict['filename'][scidx[0]]) debugger.set_trace() #continue # Make sure that there are objects noobj = True for sl in range(len(scitrace)): if 'nobj' in scitrace[sl].keys( ): # There can be empty dict's (skipped slits) if scitrace[sl]['nobj'] != 0: noobj = False if noobj is True: msgs.warn("No objects to extract for science frame" + msgs.newline() + fitsdict['filename'][scidx]) return True ############### # Finalize the Sky Background image if settings.argflag['reduce']['skysub']['perform']: # Perform an iterative background/science extraction msgs.info("Finalizing the sky background image") # Create a trace mask of the object trcmask = np.zeros_like(sciframe) for sl in range(len(scitrace)): if 'nobj' in scitrace[sl].keys(): if scitrace[sl]['nobj'] > 0: trcmask += scitrace[sl]['object'].sum(axis=2) trcmask[np.where(trcmask > 0.0)] = 1.0 # Do it bgframe = bg_subtraction(slf, tilts, det, sciframe, modelvarframe, bpix, crmask, tracemask=trcmask) # Redetermine the variance frame based on the new sky model modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) # Save if not standard: slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe ############### # Flexure down the slit? -- Not currently recommended if settings.argflag['reduce']['flexure']['method'] == 'slitcen': flex_dict = arwave.flexure_slit(slf, det) arwave.flexure_qa(slf, det, flex_dict, slit_cen=True) # Perform an optimal extraction msgs.work( "For now, perform extraction -- really should do this after the flexure+heliocentric correction" ) return reduce_frame(slf, sciframe, rawvarframe, modelvarframe, bpix, datasec_img, bgframe, scidx, fitsdict, det, crmask, tilts, mswave, standard=standard)
def reduce_echelle(slf, sciframe, scidx, fitsdict, det, standard=False, triml=1, trimr=1, mspixelflatnrm=None, doqa=True): """ Run standard extraction steps on an echelle frame Parameters ---------- sciframe : image Bias subtracted image (using arload.load_frame) scidx : int Index of the frame fitsdict : dict Contains relevant information from fits header files det : int Detector index standard : bool, optional Standard star frame? triml : int (optional) Number of pixels to trim from the left slit edge trimr : int (optional) Number of pixels to trim from the right slit edge """ msgs.work("Multiprocess this algorithm") nspec = sciframe.shape[0] nord = slf._lordloc[det - 1].shape[1] # Prepare the frames for tracing and extraction sciframe, rawvarframe, crmask = reduce_prepare( slf, sciframe, scidx, fitsdict, det, mspixelflatnrm=mspixelflatnrm, standard=standard, slitprof=slitprof) bgframe = np.zeros_like(sciframe) bgnl, bgnr = np.zeros(nord, dtype=np.int), np.zeros(nord, dtype=np.int) skysub = True if settings.argflag['reduce']['skysub']['perform']: # Identify background pixels, and generate an image of the sky spectrum in each slit for o in range(nord): word = np.where((slf._slitpix[det - 1] == o + 1) & (slf._scimask[det - 1] == 0)) if word[0].size == 0: msgs.warn("There are no pixels in slit {0:d}".format(o + 1)) continue tbgframe, nl, nr = background_subtraction(slf, sciframe, rawvarframe, o, det) bgnl[o], bgnr[o] = nl, nr bgframe += tbgframe if nl == 0 and nr == 0: pass # If just one slit cannot do sky subtraction, don't do sky subtraction # msgs.warn("A sky subtraction will not be performed") # skysub = False # bgframe = np.zeros_like(sciframe) # modelvarframe = rawvarframe.copy() # break if skysub: # Provided the for loop above didn't break early, model the variance frame dnum = settings.get_dnum(det) modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) else: modelvarframe = rawvarframe.copy() bgframe = np.zeros_like(sciframe) if not standard: # Need to save slf._modelvarframe[det - 1] = modelvarframe slf._bgframe[det - 1] = bgframe # Obtain a first estimate of the object trace then # fit the traces and perform a PCA for the refinements trccoeff = np.zeros( (settings.argflag['trace']['object']['order'] + 1, nord)) trcxfit = np.arange(nspec) extrap_slit = np.zeros(nord) for o in range(nord): trace, error = artrace.trace_weighted(sciframe - bgframe, slf._lordloc[det - 1][:, o], slf._rordloc[det - 1][:, o], mask=slf._scimask[det - 1], wght="flux") if trace is None: extrap_slit[o] = 1 continue # Find only the good pixels w = np.where((error != 0.0) & (~np.isnan(error))) if w[0].size <= 2 * settings.argflag['trace']['object']['order']: extrap_slit[o] = 1 continue # Convert the trace locations to be a fraction of the slit length, # measured from the left slit edge. trace -= slf._lordloc[det - 1][:, o] trace /= (slf._rordloc[det - 1][:, o] - slf._lordloc[det - 1][:, o]) try: msk, trccoeff[:, o] = arutils.robust_polyfit( trcxfit[w], trace[w], settings.argflag['trace']['object']['order'], function=settings.argflag['trace']['object']['function'], weights=1.0 / error[w]**2, minv=0.0, maxv=nspec - 1.0) except: msgs.info("arproc.reduce_echelle") debugger.set_trace() refine = 0.0 if settings.argflag['trace']['object']['method'] == "pca": # Identify the orders to be extrapolated during reconstruction orders = 1.0 + np.arange(nord) msgs.info("Performing a PCA on the object trace") ofit = settings.argflag['trace']['object']['params'] lnpc = len(ofit) - 1 maskord = np.where(extrap_slit == 1)[0] xcen = trcxfit[:, np.newaxis].repeat(nord, axis=1) trccen = arutils.func_val( trccoeff, trcxfit, settings.argflag['trace']['object']['function'], minv=0.0, maxv=nspec - 1.0).T if np.sum(1.0 - extrap_slit) > ofit[0] + 1: fitted, outpar = arpca.basis( xcen, trccen, trccoeff, lnpc, ofit, skipx0=False, mask=maskord, function=settings.argflag['trace']['object']['function']) if doqa: # arqa.pca_plot(slf, outpar, ofit, "Object_Trace", pcadesc="PCA of object trace") arpca.pca_plot(slf.setup, outpar, ofit, "Object_Trace", pcadesc="PCA of object trace") # Extrapolate the remaining orders requested trccen, outpar = arpca.extrapolate( outpar, orders, function=settings.argflag['trace']['object']['function']) #refine = trccen-trccen[nspec//2, :].reshape((1, nord)) else: msgs.warn("Could not perform a PCA on the object trace" + msgs.newline() + "Not enough well-traced orders") msgs.info("Using direct determination of the object trace instead") pass else: msgs.error("Not ready for object trace method:" + msgs.newline() + settings.argflag['trace']['object']['method']) # Construct the left and right traces of the object profile # The following code ensures that the fraction of the slit # containing the object remains constant along the spectral # direction trcmean = np.mean(trccen, axis=0) trobjl = (trcmean - (1 + bgnl) / slf._pixwid[det - 1].astype(np.float)).reshape( (1, nord)).repeat(nspec, axis=0) trobjl = trccen - trobjl trobjr = (-trcmean + (slf._pixwid[det - 1] - bgnr - 1) / slf._pixwid[det - 1].astype(np.float)).reshape( (1, nord)).repeat(nspec, axis=0) trobjr = trccen + trobjr # Convert trccen to the actual trace locations trccen *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trccen += slf._lordloc[det - 1] trobjl *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trobjl += slf._lordloc[det - 1] trobjr *= (slf._rordloc[det - 1] - slf._lordloc[det - 1]) trobjr += slf._lordloc[det - 1] # Generate an image of pixel weights for each object. Each weight can # take any floating point value from 0 to 1 (inclusive). For the rec_obj_img, # a weight of 1 means that the pixel is fully contained within the object # region, and 0 means that the pixel is fully contained within the background # region. The opposite is true for the rec_bg_img array. A pixel that is on # the border of object/background is assigned a value between 0 and 1. msgs.work( "Eventually allow ARMED to find multiple objects in the one slit") nobj = 1 rec_obj_img = np.zeros(sciframe.shape + (nobj, )) rec_bg_img = np.zeros(sciframe.shape + (nobj, )) for o in range(nord): # Prepare object/background regions objl = np.array([bgnl[o]]) objr = np.array([slf._pixwid[det - 1][o] - bgnr[o] - triml - trimr]) bckl = np.zeros((slf._pixwid[det - 1][o] - triml - trimr, 1)) bckr = np.zeros((slf._pixwid[det - 1][o] - triml - trimr, 1)) bckl[:bgnl[o]] = 1 if bgnr[o] != 0: bckr[-bgnr[o]:] = 1 tobj_img, tbg_img = artrace.trace_objbg_image(slf, det, sciframe - bgframe, o, [objl, objr], [bckl, bckr], triml=triml, trimr=trimr) rec_obj_img += tobj_img rec_bg_img += tbg_img # Create trace dict scitrace = artrace.trace_object_dict(nobj, trccen[:, 0].reshape(trccen.shape[0], 1), object=rec_obj_img, background=rec_bg_img) for o in range(1, nord): scitrace = artrace.trace_object_dict(nobj, trccen[:, o].reshape( trccen.shape[0], 1), tracelist=scitrace) # Save the quality control if doqa: artrace.obj_trace_qa(slf, sciframe, trobjl, trobjr, None, det, root="object_trace", normalize=False) # Finalize the Sky Background image if settings.argflag['reduce']['skysub']['perform'] and (nobj > 0) and skysub: msgs.info("Finalizing the sky background image") # Identify background pixels, and generate an image of the sky spectrum in each slit bgframe = np.zeros_like(sciframe) for o in range(nord): tbgframe, nl, nr = background_subtraction(slf, sciframe, rawvarframe, o, det, refine=refine) bgnl[o], bgnr[o] = nl, nr bgframe += tbgframe modelvarframe = arprocimg.variance_frame(datasec_img, det, sciframe, scidx, settings.spect[dnum], fitsdict=fitsdict, skyframe=bgframe) # Perform an optimal extraction return reduce_frame(slf, sciframe, rawvarframe, modelvarframe, bgframe, scidx, fitsdict, det, crmask, scitrace=scitrace, standard=standard)
def background_subtraction(slf, sciframe, varframe, slitn, det, refine=0.0, doqa=True): """ Generate a frame containing the background sky spectrum Parameters ---------- slf : Class Science Exposure Class sciframe : ndarray science frame varframe : ndarray variance frame slitn : int Slit number det : int Detector index refine : float or ndarray refine the object traces. This should be a small value around 0.0. If a float, a constant offset will be applied. Otherwise, an array needs to be specified of the same length as sciframe.shape[0] that contains the refinement of each pixel along the spectral direction. Returns ------- bgframe : ndarray An image, the same size as sciframe, that contains the background spectrum within the specified slit. nl : int number of pixels from the left slit edge to use as background pixels nr : int number of pixels from the right slit edge to use as background pixels """ # Obtain all pixels that are within the slit edges, and are not masked word = np.where((slf._slitpix[det - 1] == slitn + 1) & (slf._scimask[det - 1] == 0)) if word[0].size == 0: msgs.warn("There are no pixels in slit {0:d}".format(slitn)) debugger.set_trace() nl, nr = 0, 0 return np.zeros_like(sciframe), nl, nr # Calculate the oversampled object profiles oversampling_factor = 3 # should be an integer according to the description in object_profile() xedges, modvals = object_profile(slf, sciframe, slitn, det, refine=refine, factor=oversampling_factor) bincent = 0.5 * (xedges[1:] + xedges[:-1]) npix = slf._pixwid[det - 1][slitn] tilts = slf._tilts[det - 1].copy() lordloc = slf._lordloc[det - 1][:, slitn] rordloc = slf._rordloc[det - 1][:, slitn] # For each pixel, calculate the fraction along the slit's spatial direction spatval = (word[1] - lordloc[word[0]] + refine) / (rordloc[word[0]] - lordloc[word[0]]) # Cumulative sum and normalize csum = np.cumsum(modvals) csum -= csum[0] csum /= csum[-1] # Find a first guess of the edges of the object profile - assume this is the innermost 90 percent of the flux argl = np.argmin(np.abs(csum - 0.05)) argr = np.argmin(np.abs(csum - 0.95)) # Considering the possible background pixels that are left of the object, # find the first time where the object profile no longer decreases as you # move toward the edge of the slit. This is the beginning of the noisy # object profile, which is where the object can no longer be distinguished # from the background. wl = np.where((modvals[1:] < modvals[:-1]) & (bincent[1:] < bincent[argl])) wr = np.where((modvals[1:] > modvals[:-1]) & (bincent[1:] > bincent[argr])) nl, nr = 0, 0 if wl[0].size != 0: # This is the index of the first time where the object profile # no longer decreases as you move towards the slit edge nl_index = np.max(wl[0]) # Calculate nl, defined as: # "number of pixels from the left slit edge to use as background pixels", # which is just nl_index with the sampling factor taken out nl_index_origscale = int(nl_index / oversampling_factor + 0.5) nl = nl_index_origscale if wr[0].size != 0: # This is the index of the first time where the object profile # no longer decreases as you move towards the slit edge nr_index = np.min(wr[0]) # Calculate nr, defined as: # "number of pixels from the right slit edge to use as background pixels", # which is npix minus nr_index with the sampling factor taken out nr_index_origscale = int(nr_index / oversampling_factor + 0.5) nr = npix - nr_index_origscale if nl + nr < 5: msgs.warn( "The object profile appears to extrapolate to the edge of the slit" ) msgs.info( "A background subtraction will not be performed for slit {0:d}". format(slitn + 1)) nl, nr = 0, 0 return np.zeros_like(sciframe), nl, nr # Find background pixels and fit wbgpix_spatval = np.where( (spatval <= float(nl) / npix) | (spatval >= float(npix - nr) / npix)) # this cannot be used to index the 2D array tilts wbgpix = (word[0][wbgpix_spatval], word[1][wbgpix_spatval] ) # this may be approproate for indexing the 2D array tilts if settings.argflag['reduce']['skysub']['method'].lower() == 'bspline': msgs.info("Using bspline sky subtraction") srt = np.argsort(tilts[wbgpix]) ivar = arutils.calc_ivar(varframe) # Perform a weighted b-spline fit to the sky background pixels mask, bspl = arutils.robust_polyfit( tilts[wbgpix][srt], sciframe[wbgpix][srt], 3, function='bspline', weights=np.sqrt(ivar)[wbgpix][srt], sigma=5., maxone=False, **settings.argflag['reduce']['skysub']['bspline']) bgf_flat = arutils.func_val(bspl, tilts.flatten(), 'bspline') bgframe = bgf_flat.reshape(tilts.shape) if doqa: plt_bspline_sky(tilts, sciframe, bgf_flat, gdp) debugger.set_trace() else: msgs.error('Not ready for this method for skysub {:s}'.format( settings.argflag['reduce']['skysub']['method'].lower())) if np.any(np.isnan(bgframe)): msgs.warn("NAN in bgframe. Replacing with 0") bad = np.isnan(bgframe) bgframe[bad] = 0. return bgframe, nl, nr
def global_skysub(sciimg, sciivar, piximg, slitmask, edgmask, skymask=None, bsp=0.6, islit=None, sigrej=3., debug=False): # Python indexing ny = sciimg.shape[0] # nslit = np.max(slitmask) sky_image = np.zeros_like(sciimg) if skymask is None: skymask = np.ones_like(slitmask, dtype=int) # Mask edges and more sky_slitmask = slitmask * (skymask * (sciivar > 0) & (edgmask == 0) & (sciimg != maskval)) if islit is None: nreduce = nslit slit_vec = np.arange(nslit) + 1 else: nreduce = 1 slit_vec = [islit] for jj in range(nreduce): slitid = slit_vec[jj] # Select only the pixels on this slit all = (slitmask == slitid) & (sciimg != maskval) & (sciivar > 0.) isky = sky_slitmask == slitid debugger.set_trace() # The 2 lines above seem a bit wrong if (np.sum(isky) < 10): msgs.warn('Not enough sky pixels found in slit ', slitid, np.sum(isky), np.sum(all)) continue # Setup (sort) isrt = np.argsort(piximg[isky]) wsky = piximg[isky][isrt] sky = sciimg[isky][isrt] sky_ivar = sciivar[isky][isrt] pos_sky = np.where((sky > 1.0) & (sky_ivar > 0.))[0] # Pre-fit! if len(pos_sky) > ny: lsky = np.log(sky[pos_sky]) lsky_ivar = lsky * 0. + 0.1 # Init bspline to get the sky breakpoints (kludgy) tmp = bspline(wsky[pos_sky], nord=4, bkspace=bsp) #skybkpt = bspline_bkpts(wsky[pos_sky], nord=4, bkspace=bsp $ #, / silent) lskyset, outmask, lsky_fit, red_chi = dev_extract.bspline_longslit( wsky[pos_sky], lsky, lsky_ivar, np.ones_like(pos_sky), fullbkpt = tmp.breakpoints, upper=sigrej, lower=sigrej, kwargs_reject={'groupbadpix':True}) res = (sky[pos_sky] - np.exp(lsky_fit)) * np.sqrt(sky_ivar[pos_sky]) lmask = (res < 5.0) & (res > -4.0) sky_ivar[pos_sky] = sky_ivar[pos_sky] * lmask # Full full_bspline = bspline(wsky, nord=4, bkspace=bsp) skyset, full_out, yfit, _ = dev_extract.bspline_longslit( wsky, sky, sky_ivar, np.ones_like(sky), fullbkpt=full_bspline.breakpoints, upper=sigrej, lower=sigrej, kwargs_reject={'groupbadpix':True, 'maxrej': 10}) sky_image[all] = skyset.value(piximg[all])[0] #, skyset) #debugger.set_trace() if debug: from matplotlib import pyplot as plt plt.clf() ax = plt.gca() ax.scatter(wsky[full_out], sky[full_out]) ax.scatter(wsky[~full_out], sky[~full_out], color='red') ax.plot(wsky, yfit, color='green') plt.show() # Return return sky_image
sky_image[all] = skyset.value(piximg[all])[0] #, skyset) #debugger.set_trace() if debug: from matplotlib import pyplot as plt plt.clf() ax = plt.gca() ax.scatter(wsky[full_out], sky[full_out]) ax.scatter(wsky[~full_out], sky[~full_out], color='red') ax.plot(wsky, yfit, color='green') plt.show() # Return return sky_image # Command line execution if __name__ == '__main__': # Load the test image hdul = fits.open('data/LRIS/Sky/lrisr_sky_test.fits') scifrcp = hdul[0].data ivar = hdul[1].data ordpix = hdul[2].data tilts = hdul[3].data*(scifrcp.shape[0]) # edgemask = np.zeros_like(scifrcp) # Run me sky_image = global_skysub(scifrcp, ivar, tilts, ordpix, edgemask) # Show ginga.show_image(scifrcp-sky_image) debugger.set_trace()
def simple_calib(slf, det, get_poly=False): """Simple calibration algorithm for longslit wavelengths Uses slf._arcparam to guide the analysis Parameters ---------- get_poly : bool, optional Pause to record the polynomial pix = b0 + b1*lambda + b2*lambda**2 Returns ------- final_fit : dict Dict of fit info """ # Extract the arc msgs.work("Detecting lines..") tampl, tcent, twid, w, satsnd, yprep = detect_lines( slf, det, slf._msarc[det - 1]) # Cut down to the good ones tcent = tcent[w] tampl = tampl[w] msgs.info('Detected {:d} lines in the arc spectrum.'.format(len(w[0]))) # Parameters (just for convenience) aparm = slf._arcparam[det - 1] # Read Arc linelist llist = aparm['llist'] # IDs were input by hand if len(settings.argflag['arc']['calibrate']['IDpixels']) > 0: # Check that there are at least 5 values pixels = np.array(settings.argflag['arc']['calibrate']['IDpixels']) if np.sum(pixels > 0.) < 5: msgs.error("Need to give at least 5 pixel values!") # msgs.info("Using input lines to seed the wavelength solution") # Calculate median offset mdiff = [ np.min(np.abs(tcent - pix)) for pix in settings.argflag['arc']['calibrate']['IDpixels'] ] med_poff = np.median(np.array(mdiff)) msgs.info("Will apply a median offset of {:g} pixels".format(med_poff)) # Match input lines to observed spectrum nid = len(settings.argflag['arc']['calibrate']['IDpixels']) idx_str = np.ones(nid).astype(int) ids = np.zeros(nid) idsion = np.array([' '] * nid) gd_str = np.arange(nid).astype(int) for jj, pix in enumerate( settings.argflag['arc']['calibrate']['IDpixels']): diff = np.abs(tcent - pix - med_poff) if np.min(diff) > 2.: debugger.set_trace() msgs.error("No match with input pixel {:g}!".format(pix)) else: imn = np.argmin(diff) # Set idx_str[jj] = imn # Take wavelength from linelist instead of input value wdiff = np.abs(llist['wave'] - settings.argflag['arc']['calibrate']['IDwaves'][jj]) imnw = np.argmin(wdiff) if wdiff[imnw] > 0.015: # Arbitrary tolerance msgs.error( "Input IDwaves={:g} is not in the linelist. Fix".format( settings.argflag['arc']['calibrate']['IDwaves'][jj])) else: ids[jj] = llist['wave'][imnw] idsion[jj] = llist['Ion'][imnw] msgs.info("Identifying arc line: {:s} {:g}".format( idsion[jj], ids[jj])) else: # Generate dpix pairs msgs.info("Using pair algorithm for wavelength solution") nlist = len(llist) dpix_list = np.zeros((nlist, nlist)) for kk, row in enumerate(llist): #dpix_list[kk,:] = (np.array(row['wave'] - llist['wave']))/disp dpix_list[kk, :] = slf._msarc[det - 1].shape[0] * ( aparm['b1'] * (np.array(row['wave'] - llist['wave'])) + aparm['b2'] * np.array(row['wave']**2 - llist['wave']**2)) # Lambda pairs for the strongest N lines srt = np.argsort(tampl) idx_str = srt[-aparm['Nstrong']:] idx_str.sort() dpix_obs = np.zeros((aparm['Nstrong'], aparm['Nstrong'])) for kk, idx in enumerate(idx_str): dpix_obs[kk, :] = np.array(tcent[idx] - tcent[idx_str]) # Match up (ugly loops) ids = np.zeros(aparm['Nstrong']) idsion = np.array([' '] * aparm['Nstrong']) for kk in range(aparm['Nstrong']): med_off = np.zeros(nlist) for ss in range(nlist): dpix = dpix_list[ss] min_off = [] for jj in range(aparm['Nstrong']): min_off.append(np.min(np.abs(dpix_obs[kk, jj] - dpix))) med_off[ss] = np.median(min_off) # Set by minimum idm = np.argmin(med_off) ids[kk] = llist['wave'][idm] idsion[kk] = llist['Ion'][idm] # Calculate disp of the strong lines disp_str = np.zeros(aparm['Nstrong']) for kk in range(aparm['Nstrong']): disp_val = (ids[kk] - ids) / (tcent[idx_str[kk]] - tcent[idx_str]) isf = np.isfinite(disp_val) disp_str[kk] = np.median(disp_val[isf]) # Consider calculating the RMS with clipping gd_str = np.where( np.abs(disp_str - aparm['disp']) / aparm['disp'] < aparm['disp_toler'])[0] msgs.info('Found {:d} lines within the dispersion threshold'.format( len(gd_str))) if len(gd_str) < 5: if msgs._debug['arc']: msgs.warn('You should probably try your best to ID lines now.') debugger.set_trace() debugger.plot1d(yprep) else: msgs.error('Insufficient lines to auto-fit.') # Debug #debug=True if msgs._debug['arc']: #tmp = list(gd_str) #tmp.pop(1) #gd_str = np.array(tmp) #xdb.xpcol(tcent[idx_str[gd_str]],ids[gd_str]) #xdb.xplot(tcent[idx_str[gd_str]],ids[gd_str],scatter=True) # debugger.xplot(yprep) debugger.set_trace() msgs.work('Cross correlate here?') # Setup for fitting ifit = idx_str[gd_str] sv_ifit = list(ifit) # Keep the originals all_ids = -999. * np.ones(len(tcent)) all_idsion = np.array(['12345'] * len(tcent)) all_ids[ifit] = ids[gd_str] all_idsion[ifit] = idsion[gd_str] # Fit n_order = aparm['n_first'] flg_quit = False fmin, fmax = -1., 1. msgs.info('Iterative wavelength fitting..') while (n_order <= aparm['n_final']) and (flg_quit is False): #msgs.info('n_order={:d}'.format(n_order)) # Fit with rejection xfit, yfit = tcent[ifit], all_ids[ifit] mask, fit = arutils.robust_polyfit(xfit, yfit, n_order, function=aparm['func'], sigma=aparm['nsig_rej'], minv=fmin, maxv=fmax) # Reject but keep originals (until final fit) ifit = list(ifit[mask == 0]) + sv_ifit # Find new points (should we allow removal of the originals?) twave = arutils.func_val(fit, tcent, aparm['func'], minv=fmin, maxv=fmax) for ss, iwave in enumerate(twave): mn = np.min(np.abs(iwave - llist['wave'])) if mn / aparm['disp'] < aparm['match_toler']: imn = np.argmin(np.abs(iwave - llist['wave'])) #if msgs._debug['arc']: # print('Adding {:g} at {:g}'.format(llist['wave'][imn],tcent[ss])) # Update and append all_ids[ss] = llist['wave'][imn] all_idsion[ss] = llist['Ion'][imn] ifit.append(ss) # Keep unique ones ifit = np.unique(np.array(ifit, dtype=int)) #if msgs._debug['arc']: # debugger.set_trace() # Increment order if n_order < aparm['n_final']: n_order += 1 else: # This does 2 iterations at the final order flg_quit = True # Final fit (originals can now be rejected) fmin, fmax = 0., 1. xfit, yfit = tcent[ifit] / (slf._msarc[det - 1].shape[0] - 1), all_ids[ifit] mask, fit = arutils.robust_polyfit(xfit, yfit, n_order, function=aparm['func'], sigma=aparm['nsig_rej_final'], minv=fmin, maxv=fmax) #, debug=True) irej = np.where(mask == 1)[0] if len(irej) > 0: xrej = xfit[irej] yrej = yfit[irej] for imask in irej: msgs.info('Rejecting arc line {:g}'.format(yfit[imask])) else: xrej = [] yrej = [] xfit = xfit[mask == 0] yfit = yfit[mask == 0] ions = all_idsion[ifit][mask == 0] # if msgs._debug['arc']: msarc = slf._msarc[det - 1] wave = arutils.func_val(fit, np.arange(msarc.shape[0]) / float(msarc.shape[0]), 'legendre', minv=fmin, maxv=fmax) debugger.set_trace() #debugger.xplot(xfit, np.ones(len(xfit)), scatter=True, # xtwo=np.arange(msarc.shape[0]),ytwo=yprep) #debugger.xplot(xfit,yfit, scatter=True, xtwo=np.arange(msarc.shape[0]), # ytwo=wave) #debugger.set_trace() #wave = arutils.func_val(fit, np.arange(msarc.shape[0])/float(msarc.shape[0]), # 'legendre', min=fmin, max=fmax) # 2nd order Poly fit for archival #get_poly=True if get_poly: poly_fit = arutils.func_fit(yfit, xfit, 'polynomial', 2, minv=fmin, maxv=fmax) print(' Most likely you with to record these values:') print(poly_fit) debugger.set_trace() # Pack up fit final_fit = dict(fitc=fit, function=aparm['func'], xfit=xfit, yfit=yfit, ions=ions, fmin=fmin, fmax=fmax, xnorm=float(slf._msarc[det - 1].shape[0]), xrej=xrej, yrej=yrej, mask=mask, spec=yprep, nrej=aparm['nsig_rej_final'], shift=0., tcent=tcent) # QA arqa.arc_fit_qa(slf, final_fit) # RMS rms_ang = arutils.calc_fit_rms(xfit, yfit, fit, aparm['func'], minv=fmin, maxv=fmax) wave = arutils.func_val(fit, np.arange(slf._msarc[det - 1].shape[0]) / float(slf._msarc[det - 1].shape[0]), aparm['func'], minv=fmin, maxv=fmax) rms_pix = rms_ang / np.median(np.abs(wave - np.roll(wave, 1))) msgs.info("Fit RMS = {} pix".format(rms_pix)) # Return return final_fit
def coadd_spectra(spectra, wave_grid_method='concatenate', niter=5, scale_method='auto', do_offset=False, sigrej_final=3., do_var_corr=True, qafile=None, outfile=None, do_cr=True, **kwargs): """ Parameters ---------- spectra : XSpectrum1D wave_grid_method : Returns ------- spec1d : XSpectrum1D """ # Init if niter <= 0: msgs.error('Not prepared for zero iterations') # Single spectrum? if spectra.nspec == 1: msgs.info('Only one spectrum. Writing, as desired, and ending..') if outfile is not None: write_to_disk(spectra, outfile) return spectra # Final wavelength array new_wave = new_wave_grid(spectra.data['wave'], method=wave_grid_method, **kwargs) # Rebin rspec = spectra.rebin(new_wave * u.AA, all=True, do_sig=True, grow_bad_sig=True, masking='none') # Define mask -- THIS IS THE ONLY ONE TO USE rmask = rspec.data['sig'].filled(0.) <= 0. # S/N**2, weights sn2, weights = sn_weight(rspec, rmask) # Scale (modifies rspec in place) scales, omethod = scale_spectra(rspec, rmask, sn2, scale_method=scale_method, **kwargs) # Clean bad CR :: Should be run *after* scaling if do_cr: clean_cr(rspec, rmask, **kwargs) # Initial coadd spec1d = one_d_coadd(rspec, rmask, weights) # Init standard deviation std_dev, _ = get_std_dev(rspec, rmask, spec1d, **kwargs) msgs.info("Initial std_dev = {:g}".format(std_dev)) iters = 0 std_dev = 0. var_corr = 1. # Scale the standard deviation while np.absolute(std_dev - 1.) >= 0.1 and iters < niter: iters += 1 msgs.info("Iterating on coadding... iter={:d}".format(iters)) # Setup (strip out masks, if any) tspec = spec1d.copy() tspec.unmask() newvar = tspec.data['sig'][0, :].filled( 0.)**2 # JFH Interpolates over bad values? newflux = tspec.data['flux'][0, :].filled(0.) newflux_now = newflux # JFH interpolates # Convenient for coadding uspec = rspec.copy() uspec.unmask() # Loop on images to update noise model for rejection for qq in range(rspec.nspec): # Grab full spectrum (unmasked) flux = uspec.data['flux'][qq, :].filled(0.) sig = uspec.data['sig'][qq, :].filled(0.) ivar = np.zeros_like(sig) gd = sig > 0. ivar[gd] = 1. / sig[gd]**2 # var_tot var_tot = newvar + arutils.calc_ivar(ivar) ivar_real = arutils.calc_ivar(var_tot) # smooth out possible outliers in noise var_med = medfilt(var_tot, 5) var_smooth = medfilt(var_tot, 99) #, boundary = 'reflect') # conservatively always take the largest variance var_final = np.maximum(var_med, var_smooth) ivar_final = arutils.calc_ivar(var_final) # Cap S/N ratio at SN_MAX to prevent overly aggressive rejection SN_MAX = 20.0 ivar_cap = np.minimum(ivar_final, (SN_MAX / newflux_now + (newflux_now <= 0.0))**2) #; adjust rejection to reflect the statistics of the distribtuion #; of errors. This fixes cases where for not totally understood #; reasons the noise model is not quite right and #; many pixels are rejected. #; Is the model offset relative to the data? If so take it out if do_offset: diff1 = flux - newflux_now #idum = np.where(arrmask[*, j] EQ 0, nnotmask) debugger.set_trace() # GET THE MASK RIGHT! nnotmask = np.sum(~mask) nmed_diff = np.maximum(nnotmask // 20, 10) #; take out the smoothly varying piece #; JXP -- This isnt going to work well if the data has a bunch of #; null values in it w = np.ones(5, 'd') diff_sm = np.convolve(w / w.sum(), medfilt(diff1 * (~mask), nmed_diff), mode='same') chi2 = (diff1 - diff_sm)**2 * ivar_real # debugger.set_trace() goodchi = (~mask) & (ivar_real > 0.0) & (chi2 <= 36.0 ) # AND masklam, ngd) if np.sum(goodchi) == 0: goodchi = np.array([True] * flux.size) # debugger.set_trace() # Port next line to Python to use this #djs_iterstat, (arrflux[goodchi, j]-newflux_now[goodchi]) $ # , invvar = ivar_real[goodchi], mean = offset_mean $ # , median = offset $ else: offset = 0. chi2 = (flux - newflux_now - offset)**2 * ivar_real goodchi = (~rmask[qq, :]) & (ivar_real > 0.0) & ( chi2 <= 36.0) # AND masklam, ngd) ngd = np.sum(goodchi) if ngd == 0: goodchi = np.array([True] * flux.size) #; evalute statistics of chi2 for good pixels and excluding #; extreme 6-sigma outliers chi2_good = chi2[goodchi] chi2_srt = chi2_good.copy() chi2_srt.sort() #; evaluate at 1-sigma and then scale gauss_prob = 1.0 - 2.0 * (1. - scipy.stats.norm.cdf(1.) ) #gaussint(-double(1.0d)) sigind = int(np.round(gauss_prob * ngd)) chi2_sigrej = chi2_srt[sigind] one_sigma = np.minimum(np.maximum(np.sqrt(chi2_sigrej), 1.0), 5.0) sigrej_eff = sigrej_final * one_sigma chi2_cap = (flux - newflux_now - offset)**2 * ivar_cap # Grow?? chi_mask = (chi2_cap > sigrej_eff**2) & (~rmask[qq, :]) nrej = np.sum(chi_mask) # Apply if nrej > 0: msgs.info("Rejecting {:d} pixels in exposure {:d}".format( nrej, qq)) print(rspec.data['wave'][qq, chi_mask]) rmask[qq, chi_mask] = True #rspec.select = qq #rspec.add_to_mask(chi_mask) #outmask[*, j] = (arrmask[*, j] EQ 1) OR (chi2_cap GT sigrej_eff^2) # Incorporate saving of each dev/sig panel onto one page? Currently only saves last fit #qa_plots(wavelengths, masked_fluxes, masked_vars, new_wave, new_flux, new_var) # Coadd anew spec1d = one_d_coadd(rspec, rmask, weights, **kwargs) # Calculate std_dev std_dev, _ = get_std_dev(rspec, rmask, spec1d, **kwargs) #var_corr = var_corr * std_dev msgs.info("Desired variance correction: {:g}".format(var_corr)) msgs.info("New standard deviation: {:g}".format(std_dev)) if do_var_corr: msgs.info("Correcting variance") for ispec in range(rspec.nspec): rspec.data['sig'][ispec] *= np.sqrt(std_dev) spec1d = one_d_coadd(rspec, rmask, weights) if iters == 0: msgs.warn("No iterations on coadding done") #qa_plots(wavelengths, masked_fluxes, masked_vars, new_wave, new_flux, new_var) else: #if iters > 0: msgs.info( "Final correction to initial variances: {:g}".format(var_corr)) # QA if qafile is not None: msgs.info("Writing QA file: {:s}".format(qafile)) arqa.coaddspec_qa(spectra, rspec, rmask, spec1d, qafile=qafile) # Write to disk? if outfile is not None: write_to_disk(spec1d, outfile) return spec1d
def optimal_extract(slf, det, specobjs, sciframe, varframe, skyframe, crmask, scitrace, pickle_file=None, profiles=None): """ Preform optimal extraction Standard Horne approach Parameters ---------- slf det specobjs sciframe varframe crmask scitrace COUNT_LIM pickle_file Returns ------- newvar : ndarray Updated variance array that includes object model """ from pypit import arproc # Setup #rnimg = arproc.rn_frame(slf,det) #model_var = np.abs(skyframe + sciframe - np.sqrt(2)*rnimg + rnimg**2) # sqrt 2 term deals with negative flux/sky #model_ivar = 1./model_var # Inverse variance model_ivar = np.zeros_like(varframe) gdvar = varframe > 0. model_ivar[gdvar] = arutils.calc_ivar(varframe[gdvar]) cr_mask = 1.0 - crmask # Object model image obj_model = np.zeros_like(varframe) # Loop on slits for sl in range(len(specobjs)): # Loop on objects nobj = scitrace[sl]['traces'].shape[1] for o in range(nobj): msgs.info( "Performing optimal extraction of object {0:d}/{1:d} in slit {2:d}/{3:d}" .format(o + 1, nobj, sl + 1, len(specobjs))) # Get object pixels if scitrace[sl]['background'] is None: # The object for all slits is provided in the first extension objreg = np.copy(scitrace[0]['object'][:, :, o]) wzro = np.where(slf._slitpix[det - 1] != sl + 1) objreg[wzro] = 0.0 else: objreg = scitrace[sl]['object'][:, :, o] # Fit dict fit_dict = scitrace[sl]['opt_profile'][o] if 'param' not in fit_dict.keys(): continue # Slit image slit_img = artrace.slit_image(slf, det, scitrace[sl], o) #, tilts=tilts) #msgs.warn("Turn off tilts") # Object pixels weight = objreg.copy() gdo = (weight > 0) & (model_ivar > 0) # Profile image prof_img = np.zeros_like(weight) prof_img[gdo] = arutils.func_val(fit_dict['param'], slit_img[gdo], fit_dict['func']) # Normalize norm_prof = np.sum(prof_img, axis=1) prof_img /= np.outer(norm_prof + (norm_prof == 0.), np.ones(prof_img.shape[1])) # Mask (1=good) mask = np.zeros_like(prof_img) mask[gdo] = 1. mask *= cr_mask # Optimal flux opt_num = np.sum(mask * sciframe * model_ivar * prof_img, axis=1) opt_den = np.sum(mask * model_ivar * prof_img**2, axis=1) opt_flux = opt_num / (opt_den + (opt_den == 0.)) # Optimal wave opt_num = np.sum(slf._mswave[det - 1] * model_ivar * prof_img**2, axis=1) opt_den = np.sum(model_ivar * prof_img**2, axis=1) opt_wave = opt_num / (opt_den + (opt_den == 0.)) if (np.sum(opt_wave < 1.) > 0) and settings.argflag["reduce"][ "calibrate"]["wavelength"] != "pixel": debugger.set_trace() msgs.error("Zero value in wavelength array. Uh-oh") # Optimal ivar opt_num = np.sum(mask * model_ivar * prof_img**2, axis=1) ivar_den = np.sum(mask * prof_img, axis=1) opt_ivar = opt_num * arutils.calc_ivar(ivar_den) # Save specobjs[sl][o].optimal['wave'] = opt_wave.copy( ) * u.AA # Yes, units enter here specobjs[sl][o].optimal['counts'] = opt_flux.copy() gdiv = (opt_ivar > 0.) & (ivar_den > 0.) opt_var = np.zeros_like(opt_ivar) opt_var[gdiv] = arutils.calc_ivar(opt_ivar[gdiv]) specobjs[sl][o].optimal['var'] = opt_var.copy() #specobjs[o].boxcar['sky'] = skysum # per pixel # Update object model counts_image = np.outer(opt_flux, np.ones(prof_img.shape[1])) obj_model += prof_img * counts_image ''' if 'OPTIMAL' in msgs._debug: debugger.set_trace() debugger.xplot(opt_wave, opt_flux, np.sqrt(opt_var)) ''' # Generate new variance image newvar = arproc.variance_frame(slf, det, sciframe, -1, skyframe=skyframe, objframe=obj_model) # Return return newvar
def bspline_fit(x, y, order=3, knots=None, everyn=20, xmin=None, xmax=None, w=None, bkspace=None): ''' bspline fit to x,y Should probably only be called from func_fit Parameters: --------- x: ndarray y: ndarray func: str Name of the fitting function: polynomial, legendre, chebyshev, bspline deg: int deg of the spline. Default=3 (cubic) xmin: float, optional Minimum value in the array [both must be set to normalize] xmax: float, optional Maximum value in the array [both must be set to normalize] w: ndarray, optional weights to be used in the fitting (weights = 1/sigma) knots: ndarray, optional Internal knots only. External ones are added by scipy everyn: int Knot everyn good pixels, if used bkspace: float Spacing of breakpoints in units of x Returns: --------- tck : tuple describes the bspline ''' # task = 0 # Default of splrep if w is None: ngd = x.size gd = np.arange(ngd) weights = None else: gd = np.where(w > 0.)[0] weights = w[gd] ngd = len(gd) # Make the knots if knots is None: if bkspace is not None: xrnge = (np.max(x[gd]) - np.min(x[gd])) startx = np.min(x[gd]) nbkpts = max(int(xrnge / bkspace) + 1, 2) tempbkspace = xrnge / (nbkpts - 1) knots = np.arange(1, nbkpts - 1) * tempbkspace + startx elif everyn is not None: # A knot every good N pixels idx_knots = np.arange(everyn // 2, ngd - everyn // 2, everyn) knots = x[gd[idx_knots]] else: msgs.error("No method specified to generate knots") else: task = -1 # Generate spline try: tck = interpolate.splrep(x[gd], y[gd], w=weights, k=order, xb=xmin, xe=xmax, t=knots, task=task) except ValueError: # Knot problem (usually) msgs.warn("Problem in the bspline knot") debugger.set_trace() return tck
def boxcar(slf, det, specobjs, sciframe, varframe, skyframe, crmask, scitrace): """ Perform boxcar extraction on the traced objects. Also perform a local sky subtraction Parameters ---------- det : int Detector index specobjs : list of dict list of SpecObj objects sciframe : ndarray science frame varframe : ndarray variance image bgframe : ndarray sky background frame crmask : int ndarray mask of cosmic ray hits scitrace : dict traces, object and background trace images Returns ------- bgcorr : ndarray Correction to the sky background in the object window """ from pypit import arcyutils from astropy.stats import sigma_clip bgfitord = 1 # Polynomial order used to fit the background nslit = len(scitrace) cr_mask = 1.0 - crmask bgfit = np.linspace(0.0, 1.0, sciframe.shape[1]) bgcorr = np.zeros_like(cr_mask) # Loop on Slits for sl in range(nslit): word = np.where(slf._slitpix[det - 1] == sl + 1) if word[0].size == 0: continue mask_slit = np.zeros(sciframe.shape, dtype=np.float) mask_slit[word] = 1.0 # Loop on Objects nobj = scitrace[sl]['nobj'] for o in range(nobj): msgs.info( "Performing boxcar extraction of object {0:d}/{1:d} in slit {2:d}/{3:d}" .format(o + 1, nobj, sl + 1, nslit)) if scitrace[sl]['object'] is None: # The object for all slits is provided in the first extension objreg = np.copy(scitrace[0]['object'][:, :, o]) wzro = np.where(slf._slitpix[det - 1] != sl + 1) objreg[wzro] = 0.0 else: objreg = scitrace[sl]['object'][:, :, o] # Fit the background msgs.info(" Fitting the background") if scitrace[sl]['background'] is None: # The background for all slits is provided in the first extension bckreg = np.copy(scitrace[0]['background'][:, :, o]) wzro = np.where(slf._slitpix[det - 1] != sl + 1) bckreg[wzro] = 0.0 else: bckreg = scitrace[sl]['background'][:, :, o] # Trim CRs further bg_mask = np.zeros_like(sciframe) bg_mask[np.where((bckreg * cr_mask <= 0.))] = 1. bg_mask[np.where((slf._slitpix[det - 1] != sl + 1))] = 1. mask_sci = np.ma.array(sciframe, mask=bg_mask, fill_value=0.) clip_image = sigma_clip(mask_sci, axis=1, sigma=3.) # For the mask only # Fit bgframe = arcyutils.func2d_fit_val( bgfit, sciframe, (~clip_image.mask) * bckreg * cr_mask, bgfitord) # Weights weight = objreg * mask_slit sumweight = np.sum(weight, axis=1) # Generate wavelength array (average over the pixels) wvsum = np.sum(slf._mswave[det - 1] * weight, axis=1) wvsum /= sumweight # Generate sky spectrum (flux per pixel) skysum = np.sum(skyframe * weight, axis=1) skysum /= sumweight # Total the object flux msgs.info(" Summing object counts") scisum = np.sum((sciframe - bgframe) * weight, axis=1) # Total the variance array msgs.info(" Summing variance array") varsum = np.sum(varframe * weight, axis=1) # Update background correction image tmp = bckreg + objreg gdp = np.where((tmp > 0) & (slf._slitpix[det - 1] == sl + 1)) bgcorr[gdp] = bgframe[gdp] # Mask boxmask = np.zeros(wvsum.shape, dtype=np.int) # Bad detector pixels BPs = np.sum(weight * slf._bpix[det - 1], axis=1) bp = BPs > 0. boxmask[bp] += mask_flags['bad_pix'] # CR CRs = np.sum(weight * cr_mask, axis=1) cr = CRs > 0. boxmask[cr] += mask_flags['CR'] # NAN NANs = np.isnan(scisum) if np.sum(NANs) > 0: msgs.warn(" NANs in the spectrum somehow...") boxmask[NANs] += mask_flags['NANs'] scisum[NANs] = 0. varsum[NANs] = 0. skysum[NANs] = 0. # Check on specobjs if not specobjs[sl][o].check_trace(scitrace[sl]['traces'][:, o]): debugger.set_trace() msgs.error("Bad match to specobj in boxcar!") # Fill specobjs[sl][o].boxcar['wave'] = wvsum.copy( ) * u.AA # Yes, units enter here specobjs[sl][o].boxcar['counts'] = scisum.copy() specobjs[sl][o].boxcar['var'] = varsum.copy() if np.sum(specobjs[sl][o].boxcar['var']) == 0.: debugger.set_trace() specobjs[sl][o].boxcar['sky'] = skysum.copy() # per pixel specobjs[sl][o].boxcar['mask'] = boxmask.copy() # Find boxcar size slit_sz = [] inslit = np.where(weight == 1.) for ii in range(weight.shape[0]): inrow = inslit[0] == ii if np.sum(inrow) > 0: slit_sz.append( np.max(inslit[1][inrow]) - np.min(inslit[1][inrow])) slit_pix = np.median(slit_sz) # Pixels specobjs[sl][o].boxcar['size'] = slit_pix # Return return bgcorr
def obj_profiles(slf, det, specobjs, sciframe, varframe, skyframe, crmask, scitrace, COUNT_LIM=25., doqa=True, pickle_file=None): """ Derive spatial profiles for each object Parameters ---------- slf det specobjs sciframe varframe skyframe crmask scitrace Returns ------- """ ''' FOR DEVELOPING import pickle if False: tilts = slf._tilts[det-1] args = [det, specobjs, sciframe, varframe, skyframe, crmask, scitrace, tilts] msgs.warn("Pickling in the profile code") with open("trc_pickle.p",'wb') as f: pickle.dump(args,f) debugger.set_trace() if pickle_file is not None: f = open(pickle_file,'r') args = pickle.load(f) f.close() det, specobjs, sciframe, varframe, skyframe, crmask, scitrace, tilts = args slf = None else: tilts = slf._tilts[det-1] ''' # Init QA # sigframe = np.sqrt(varframe) # Loop on slits for sl in range(len(specobjs)): # Loop on objects nobj = scitrace[sl]['traces'].shape[1] scitrace[sl]['opt_profile'] = [] msgs.work("Should probably loop on S/N") for o in range(nobj): msgs.info( "Deriving spatial profile of object {0:d}/{1:d} in slit {2:d}/{3:d}" .format(o + 1, nobj, sl + 1, len(specobjs))) # Get object pixels if scitrace[sl]['background'] is None: # The object for all slits is provided in the first extension objreg = np.copy(scitrace[0]['object'][:, :, o]) wzro = np.where(slf._slitpix[det - 1] != sl + 1) objreg[wzro] = 0.0 else: objreg = scitrace[sl]['object'][:, :, o] # Calculate slit image slit_img = artrace.slit_image(slf, det, scitrace[sl], o) #, tilts=tilts) # Object pixels weight = objreg.copy() # Identify good rows gdrow = np.where(specobjs[sl][o].boxcar['counts'] > COUNT_LIM)[0] # Normalized image norm_img = sciframe / np.outer(specobjs[sl][o].boxcar['counts'], np.ones(sciframe.shape[1])) # Eliminate rows with CRs (wipes out boxcar) crspec = np.sum(crmask * weight, axis=1) cr_rows = np.where(crspec > 0)[0] weight[cr_rows, :] = 0. # if len(gdrow) > 100: # Good S/N regime msgs.info("Good S/N for profile") # Eliminate low count regions badrow = np.where( specobjs[sl][o].boxcar['counts'] < COUNT_LIM)[0] weight[badrow, :] = 0. # Extract profile gdprof = (weight > 0) & (sigframe > 0.) & ( ~np.isnan(slit_img) ) # slit_img=nan if the slit is partially on the chip slit_val = slit_img[gdprof] flux_val = norm_img[gdprof] #weight_val = sciframe[gdprof]/sigframe[gdprof] # S/N weight_val = 1. / sigframe[gdprof] # 1/N msgs.work( "Weight by S/N in boxcar extraction? [avoid CRs; smooth?]") # Fit fdict = dict( func=settings.argflag['science']['extraction']['profile'], deg=3, extrap=False) if fdict['func'] == 'gaussian': fdict['deg'] = 2 elif fdict['func'] == 'moffat': fdict['deg'] = 3 else: msgs.error("Not ready for this type of object profile") msgs.work( "Might give our own guess here instead of using default") guess = None # Check if there are enough pixels in the slit to perform fit if slit_val.size <= fdict['deg'] + 1: msgs.warn( "Not enough pixels to determine profile of object={0:s} in slit {1:d}" .format(specobjs[sl][o].idx, sl + 1) + msgs.newline() + "Skipping Optimal") fdict['extrap'] = True scitrace[sl]['opt_profile'].append(copy.deepcopy(fdict)) continue # Fit the profile try: mask, gfit = arutils.robust_polyfit(slit_val, flux_val, fdict['deg'], function=fdict['func'], weights=weight_val, maxone=False, guesses=guess) except RuntimeError: msgs.warn("Bad Profile fit for object={:s}." + msgs.newline() + "Skipping Optimal".format(specobjs[sl][o].idx)) fdict['extrap'] = True scitrace[sl]['opt_profile'].append(copy.deepcopy(fdict)) continue except ValueError: debugger.set_trace() # NaNs in the values? Check msgs.work("Consider flagging/removing CRs here") # Record fdict['param'] = gfit.copy() fdict['mask'] = mask fdict['slit_val'] = slit_val fdict['flux_val'] = flux_val scitrace[sl]['opt_profile'].append(copy.deepcopy(fdict)) specobjs[sl][o].optimal['fwhm'] = fdict['param'][1] # Pixels if msgs._debug['obj_profile']: gdp = mask == 0 mn = np.min(slit_val[gdp]) mx = np.max(slit_val[gdp]) xval = np.linspace(mn, mx, 1000) model = arutils.func_val(gfit, xval, fdict['func']) import matplotlib.pyplot as plt plt.clf() ax = plt.gca() ax.scatter(slit_val[gdp], flux_val[gdp], marker='.', s=0.7, edgecolor='none', facecolor='black') ax.plot(xval, model, 'b') # Gaussian too? if False: fdictg = dict(func='gaussian', deg=2) maskg, gfitg = arutils.robust_polyfit( slit_val, flux_val, fdict['deg'], function=fdictg['func'], weights=weight_val, maxone=False) modelg = arutils.func_val(gfitg, xval, fdictg['func']) ax.plot(xval, modelg, 'r') plt.show() debugger.set_trace() elif len(gdrow) > 10: # msgs.warn( "Low extracted flux for obj={:s} in slit {:d}. Not ready for Optimal" .format(specobjs[sl][o].idx, sl + 1)) scitrace[sl]['opt_profile'].append({}) continue elif len(gdrow) >= 0: # limit is ">= 0" to avoid crash for gdrow=0 msgs.warn( "Low extracted flux for obj={:s} in slit {:d}. Not ready for Optimal" .format(specobjs[sl][o].idx, sl + 1)) scitrace[sl]['opt_profile'].append({}) continue # QA if doqa: #not msgs._debug['no_qa'] and doqa: msgs.info("Preparing QA for spatial object profiles") arqa.obj_profile_qa(slf, specobjs, scitrace, det) return
def main(pargs): xgap = 0.0 # Gap between the square detector pixels (expressed as a fraction of the x pixel size) ygap = 0.0 # Gap between the square detector pixels (expressed as a fraction of the x pixel size) ysize = 1.0 # The size of a pixel in the y-direction as a multiple of the x pixel size binbpx = None numamplifiers = None saturation = None add_user_slits = None user_settings = None settings = def_settings.copy() # Read files if pargs.files is not None: files = glob.glob(pargs.files + '*') else: files = None # Instrument specific if pargs.spectrograph == 'keck_deimos': saturation = 65535.0 # The detector Saturation level numamplifiers = 1 if files is None: #files = glob.glob('../RAW_DATA/Keck_DEIMOS/830G_M/DE.20100913.57*') # Mask (57006, 57161) #files = glob.glob('data/DEIMOS/DE.20100913.57*') # Mask (57006, 57161) # The following are with sigdetect=20; sigdetect=50 gets rid of the junk (I think) # det=1 :: 25 slits including star boxes # det=2 :: 26 slits including stars + short first one # det=3 :: 27 slits with a fake, short slit at the start # det=4 :: 27 slits with several junk slits in saturated star boxes # det=5 :: 25 slits with one junk slit on a saturated box # det=6 :: 26 slits with a short, legit first slit # det=7 :: 26 slits with stars # det=8 :: 25 slits well done slits including a short first one # #files = ['../RAW_DATA/Keck_DEIMOS/830G_L/'+ifile for ifile in [ # Longslit in dets 3,7 # 'd0914_0014.fits', 'd0914_0015.fits']] # Fred's latest #files = glob.glob('data/DEIMOS/Trace_flats/d0526_0*') # Longslit files = glob.glob('data/DEIMOS/Trace_flats/d0423_0*') if len(files) == 0: print("No files!!") debugger.set_trace() # Bad pixel mask (important!!) binbpx = ardeimos.bpm(pargs.det) #hdul = fits.open('trace_slit.fits') settings['trace']['slits']['sigdetect'] = 50.0 settings['trace']['slits'][ 'fracignore'] = 0.02 # 0.02 removes star boxes settings['trace']['slits']['pca']['params'] = [3, 2, 1, 0] # For combine user_settings = dict(run={'spectrograph': 'keck_deimos'}) elif pargs.spectrograph == 'keck_lris_red': saturation = 65535.0 # The detector Saturation level numamplifiers = 2 if files is None: #python dev_trace_slits.py keck_lris_red --outfile=../Cooked/Trace/MasterTrace_KeckLRISr_150420_402 --det=1 #files = glob.glob('data/LRIS/Trace_flats/r150420_402*') #add_user_slits = [[489,563,1024]] # Goes with r150420_402* ; and it works # det1 : Missing a slit between two standard stars # det2 : 12 solid slits #python dev_trace_slits.py keck_lris_red --outfile=../Cooked/Trace/MasterTrace_KeckLRISr_20160110_A --det=1 files = [ 'data/LRIS/Trace_flats/LR.20160110.10103.fits.gz', # det=1: finds a ghost slit; crazy edge case.. 'data/LRIS/Trace_flats/LR.20160110.10273.fits.gz' ] # det=2: solid #files = ['data/LRIS/Trace_flats/LR.20160110.10644.fits.gz', # det=1: Well done! including an overlapping slit # 'data/LRIS/Trace_flats/LR.20160110.10717.fits.gz'] # det=2: 21 solid slits including stars # Read head0 = fits.open(files[0])[0].header xbin, ybin = [int(ii) for ii in head0['BINNING'].split(',')] binbpx = arlris.core_bpm(xbin, ybin, 'red', pargs.det) settings['trace']['slits']['sigdetect'] = 50.0 settings['trace']['slits']['pca']['params'] = [3, 2, 1, 0] elif pargs.spectrograph == 'keck_lris_blue': saturation = 65535.0 # The detector Saturation level numamplifiers = 2 if files is None: files = glob.glob( '../RAW_DATA/Keck_LRIS_blue/long_600_4000_d560/b150910_2051*' ) # Single Twilight #files = glob.glob('data/LRIS/Trace_flats/LB.20160109.*') # det=1 : solid; det=2 solid [sigdetect=30] #files = glob.glob('data/LRIS/Trace_flats/LB.20160406.*') # det=1 : solid; settings['trace']['slits']['pca']['params'] = [3, 2, 1, 0] settings['trace']['slits']['sigdetect'] = 30.0 else: debugger.set_trace() # Combine if pargs.pclass: from pypit import processimages tflats = processimages.ProcessImages(files, user_settings=user_settings) mstrace = tflats.combine(bias_subtract='overscan', trim=True) else: mstrace = combine_frames(pargs.spectrograph, files, pargs.det, settings, saturation=saturation, numamplifiers=numamplifiers) # binpx if binbpx is None: binbpx = np.zeros_like(mstrace) # pixlocn pixlocn = arpixels.core_gen_pixloc(mstrace) # Trace tslits = traceslits.TraceSlits(mstrace, pixlocn, binbpx=binbpx, settings=settings) tslit_dict = tslits.run(armlsd=True) #, add_user_slits=add_user_slits) lordloc = tslit_dict['lcen'] rordloc = tslit_dict['rcen'] # Show in Ginga? nslit = lordloc.shape[1] print("Found {:d} slits".format(nslit)) if pargs.show: viewer, ch = ginga.show_image(mstrace) ginga.show_slits(viewer, ch, lordloc, rordloc, np.arange(nslit) + 1, pstep=50) # Output to a MasterFrame? if pargs.outfile is not None: tslits.save_master(pargs.outfile)
def flex_shift(slf, det, obj_skyspec, arx_skyspec): """ Calculate shift between object sky spectrum and archive sky spectrum Parameters ---------- slf det obj_skyspec arx_skyspec Returns ------- flex_dict """ #Determine the brightest emission lines msgs.warn("If we use Paranal, cut down on wavelength early on") arx_amp, arx_cent, arx_wid, arx_w, arx_satsnd, arx_yprep = ararc.detect_lines( slf, det, msarc=None, censpec=arx_skyspec.flux.value, MK_SATMASK=False) obj_amp, obj_cent, obj_wid, obj_w, obj_satsnd, obj_yprep = ararc.detect_lines( slf, det, msarc=None, censpec=obj_skyspec.flux.value, MK_SATMASK=False) #Keep only 5 brightest amplitude lines (xxx_keep is array of indices within arx_w of the 5 brightest) arx_keep = np.argsort(arx_amp[arx_w])[-5:] obj_keep = np.argsort(obj_amp[obj_w])[-5:] #Calculate wavelength (Angstrom per pixel) arx_disp = np.append( arx_skyspec.wavelength.value[1] - arx_skyspec.wavelength.value[0], arx_skyspec.wavelength.value[1:] - arx_skyspec.wavelength.value[:-1]) #arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size obj_disp = np.append( obj_skyspec.wavelength.value[1] - obj_skyspec.wavelength.value[0], obj_skyspec.wavelength.value[1:] - obj_skyspec.wavelength.value[:-1]) #obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size #Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need this? can just use sigmas arx_idx = (arx_cent + 0.5).astype( np.int)[arx_w][arx_keep] # The +0.5 is for rounding arx_res = arx_skyspec.wavelength.value[arx_idx]/\ (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep]) obj_idx = (obj_cent + 0.5).astype( np.int)[obj_w][obj_keep] # The +0.5 is for rounding obj_res = obj_skyspec.wavelength.value[obj_idx]/ \ (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) #obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/( # obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) msgs.info("Resolution of Archive={:g} and Observation={:g}".format( np.median(arx_res), np.median(obj_res))) #Determine sigma of gaussian for smoothing arx_sig2 = (arx_disp[arx_idx] * arx_wid[arx_w][arx_keep])**2. obj_sig2 = (obj_disp[obj_idx] * obj_wid[obj_w][obj_keep])**2. arx_med_sig2 = np.median(arx_sig2) obj_med_sig2 = np.median(obj_sig2) if obj_med_sig2 >= arx_med_sig2: smooth_sig = np.sqrt(obj_med_sig2 - arx_med_sig2) # Ang smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx]) arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix * 2 * np.sqrt(2 * np.log(2))) else: msgs.warn("Prefer archival sky spectrum to have higher resolution") smooth_sig_pix = 0. msgs.warn("New Sky has higher resolution than Archive. Not smoothing") #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2) #Determine region of wavelength overlap min_wave = max(np.amin(arx_skyspec.wavelength.value), np.amin(obj_skyspec.wavelength.value)) max_wave = min(np.amax(arx_skyspec.wavelength.value), np.amax(obj_skyspec.wavelength.value)) #Smooth higher resolution spectrum by smooth_sig (flux is conserved!) # if np.median(obj_res) >= np.median(arx_res): # msgs.warn("New Sky has higher resolution than Archive. Not smoothing") #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig) # else: #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig) # arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2))) #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig) # Define wavelengths of overlapping spectra keep_idx = np.where((obj_skyspec.wavelength.value >= min_wave) & (obj_skyspec.wavelength.value <= max_wave))[0] #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave] #Rebin both spectra onto overlapped wavelength range if len(keep_idx) <= 50: msgs.error("Not enough overlap between sky spectra") else: #rebin onto object ALWAYS keep_wave = obj_skyspec.wavelength[keep_idx] arx_skyspec = arx_skyspec.rebin(keep_wave) obj_skyspec = obj_skyspec.rebin(keep_wave) # Trim edges (rebinning is junk there) arx_skyspec.data['flux'][0, :2] = 0. arx_skyspec.data['flux'][0, -2:] = 0. obj_skyspec.data['flux'][0, :2] = 0. obj_skyspec.data['flux'][0, -2:] = 0. # Normalize spectra to unit average sky count norm = np.sum(obj_skyspec.flux.value) / obj_skyspec.npix obj_skyspec.flux = obj_skyspec.flux / norm norm2 = np.sum(arx_skyspec.flux.value) / arx_skyspec.npix arx_skyspec.flux = arx_skyspec.flux / norm2 if (norm < 0.): msgs.warn("Bad normalization of object in flexure algorithm") msgs.warn("Will try the median") norm = np.median(obj_skyspec.flux.value) if (norm < 0.): msgs.error("Improper sky spectrum for flexure. Is it too faint??") if (norm2 < 0.): msgs.error( "Bad normalization of archive in flexure. You are probably using wavelengths well beyond the archive." ) #deal with bad pixels msgs.work("Need to mask bad pixels") #deal with underlying continuum msgs.work("Consider taking median first [5 pixel]") everyn = obj_skyspec.npix // 20 mask, ct = arutils.robust_polyfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value, 3, function='bspline', sigma=3., everyn=everyn) obj_sky_cont = arutils.func_val(ct, obj_skyspec.wavelength.value, 'bspline') obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont mask, ct_arx = arutils.robust_polyfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value, 3, function='bspline', sigma=3., everyn=everyn) arx_sky_cont = arutils.func_val(ct_arx, arx_skyspec.wavelength.value, 'bspline') arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont # Consider shaprness filtering (e.g. LowRedux) msgs.work("Consider taking median first [5 pixel]") #Cross correlation of spectra #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same") corr = np.correlate(arx_sky_flux, obj_sky_flux, "same") #Create array around the max of the correlation function for fitting for subpixel max # Restrict to pixels within maxshift of zero lag lag0 = corr.size // 2 mxshft = settings.argflag['reduce']['flexure']['maxshift'] max_corr = np.argmax(corr[lag0 - mxshft:lag0 + mxshft]) + lag0 - mxshft subpix_grid = np.linspace(max_corr - 3., max_corr + 3., 7.) #Fit a 2-degree polynomial to peak of correlation function fit = arutils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)], 'polynomial', 2) max_fit = -0.5 * fit[1] / fit[2] #Calculate and apply shift in wavelength shift = float(max_fit) - lag0 msgs.info("Flexure correction of {:g} pixels".format(shift)) #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0] if msgs._debug['flexure']: debugger.plot1d(arx_skyspec.wavelength, arx_sky_flux, xtwo=np.roll(obj_skyspec.wavelength, int(-1 * shift)), ytwo=obj_sky_flux) #debugger.xplot(arx_sky.wavelength, arx_sky.flux, xtwo=np.roll(obj_sky.wavelength.value,9), ytwo=obj_sky.flux*100) debugger.set_trace() flex_dict = dict(polyfit=fit, shift=shift, subpix=subpix_grid, corr=corr[subpix_grid.astype(np.int)], sky_spec=obj_skyspec, arx_spec=arx_skyspec, corr_cen=corr.size / 2, smooth=smooth_sig_pix) # Return return flex_dict
def main(pargs): xgap = 0.0 # Gap between the square detector pixels (expressed as a fraction of the x pixel size) ygap = 0.0 # Gap between the square detector pixels (expressed as a fraction of the x pixel size) ysize = 1.0 # The size of a pixel in the y-direction as a multiple of the x pixel size binbpx = None numamplifiers = None saturation = None add_user_slits = None user_settings = None settings = def_settings.copy() # Read files if pargs.files is not None: files = glob.glob(pargs.files+'*') else: files = None # Instrument specific if pargs.spectrograph == 'keck_deimos': saturation = 65535.0 # The detector Saturation level numamplifiers=1 if files is None: #files = glob.glob('../RAW_DATA/Keck_DEIMOS/830G_M/DE.20100913.57*') # Mask (57006, 57161) #files = glob.glob('data/DEIMOS/DE.20100913.57*') # Mask (57006, 57161) # The following are with sigdetect=20; sigdetect=50 gets rid of the junk (I think) # det=1 :: 25 slits including star boxes # det=2 :: 26 slits including stars + short first one # det=3 :: 27 slits with a fake, short slit at the start # det=4 :: 27 slits with several junk slits in saturated star boxes # det=5 :: 25 slits with one junk slit on a saturated box # det=6 :: 26 slits with a short, legit first slit # det=7 :: 26 slits with stars # det=8 :: 25 slits well done slits including a short first one # #files = ['../RAW_DATA/Keck_DEIMOS/830G_L/'+ifile for ifile in [ # Longslit in dets 3,7 # 'd0914_0014.fits', 'd0914_0015.fits']] # Fred's latest #files = glob.glob('data/DEIMOS/Trace_flats/d0526_0*') # Longslit files = glob.glob('data/DEIMOS/Trace_flats/d0423_0*') if len(files) == 0: print("No files!!") debugger.set_trace() # Bad pixel mask (important!!) binbpx = ardeimos.bpm(pargs.det) #hdul = fits.open('trace_slit.fits') settings['trace']['slits']['sigdetect'] = 50.0 settings['trace']['slits']['fracignore'] = 0.02 # 0.02 removes star boxes settings['trace']['slits']['pca']['params'] = [3,2,1,0] # For combine user_settings = dict(run={'spectrograph': 'keck_deimos'}) elif pargs.spectrograph == 'keck_lris_red': saturation = 65535.0 # The detector Saturation level numamplifiers=2 if files is None: #python dev_trace_slits.py keck_lris_red --outfile=../Cooked/Trace/MasterTrace_KeckLRISr_150420_402 --det=1 #files = glob.glob('data/LRIS/Trace_flats/r150420_402*') #add_user_slits = [[489,563,1024]] # Goes with r150420_402* ; and it works # det1 : Missing a slit between two standard stars # det2 : 12 solid slits #python dev_trace_slits.py keck_lris_red --outfile=../Cooked/Trace/MasterTrace_KeckLRISr_20160110_A --det=1 files = ['data/LRIS/Trace_flats/LR.20160110.10103.fits.gz', # det=1: finds a ghost slit; crazy edge case.. 'data/LRIS/Trace_flats/LR.20160110.10273.fits.gz'] # det=2: solid #files = ['data/LRIS/Trace_flats/LR.20160110.10644.fits.gz', # det=1: Well done! including an overlapping slit # 'data/LRIS/Trace_flats/LR.20160110.10717.fits.gz'] # det=2: 21 solid slits including stars # Read head0 = fits.open(files[0])[0].header xbin, ybin = [int(ii) for ii in head0['BINNING'].split(',')] binbpx = arlris.core_bpm(xbin, ybin, 'red', pargs.det) settings['trace']['slits']['sigdetect'] = 50.0 settings['trace']['slits']['pca']['params'] = [3,2,1,0] elif pargs.spectrograph == 'keck_lris_blue': saturation = 65535.0 # The detector Saturation level numamplifiers=2 if files is None: files = glob.glob('../RAW_DATA/Keck_LRIS_blue/long_600_4000_d560/b150910_2051*') # Single Twilight #files = glob.glob('data/LRIS/Trace_flats/LB.20160109.*') # det=1 : solid; det=2 solid [sigdetect=30] #files = glob.glob('data/LRIS/Trace_flats/LB.20160406.*') # det=1 : solid; settings['trace']['slits']['pca']['params'] = [3,2,1,0] settings['trace']['slits']['sigdetect'] = 30.0 else: debugger.set_trace() # Combine if pargs.pclass: from pypit import processimages tflats = processimages.ProcessImages(files, user_settings=user_settings) mstrace = tflats.combine(bias_subtract='overscan', trim=True) else: mstrace = combine_frames(pargs.spectrograph, files, pargs.det, settings, saturation=saturation, numamplifiers=numamplifiers) # binpx if binbpx is None: binbpx = np.zeros_like(mstrace) # pixlocn pixlocn = arpixels.core_gen_pixloc(mstrace) # Trace tslits = traceslits.TraceSlits(mstrace, pixlocn, binbpx=binbpx, settings=settings) tslit_dict = tslits.run(armlsd=True)#, add_user_slits=add_user_slits) lordloc = tslit_dict['lcen'] rordloc = tslit_dict['rcen'] # Show in Ginga? nslit = lordloc.shape[1] print("Found {:d} slits".format(nslit)) if pargs.show: viewer, ch = ginga.show_image(mstrace) ginga.show_slits(viewer, ch, lordloc, rordloc, np.arange(nslit) + 1, pstep=50) # Output to a MasterFrame? if pargs.outfile is not None: tslits.save_master(pargs.outfile)