def update_model(self): """Update absorption model""" from linetools.analysis import voigt as lav if len(self.abssys_widg.all_abssys) == 0: self.dla_model = None self.spec_widg.model = None return # use finer wavelength array to resolve absorption features. wa = self.full_model.wavelength # Angstroms # should really make this a constant velocity width array instead. if not self.skip_wveval: wa1 = np.arange(wa[0].value, wa[-1].value, self.dw) * wa.unit else: wa1 = wa #all_tau_model = igmlls.tau_multi_lls(wa1, # self.abssys_widg.all_abssys, skip_wveval=self.skip_wveval) all_lines = [] for abssys in self.abssys_widg.all_abssys: for iline in abssys.dla_lines: all_lines.append(iline) #QtCore.pyqtRemoveInputHook() #import pdb; pdb.set_trace() #QtCore.pyqtRestoreInputHook() tau_Lyman = lav.voigt_from_abslines(wa1, all_lines, ret='tau', skip_wveval=self.skip_wveval) ''' # Loop on forest lines for forest in self.all_forest: tau_Lyman = lav.voigt_from_abslines(wa1, forest.lines, ret='tau', skip_wveval=self.skip_wveval) all_tau_model += tau_Lyman ''' all_tau_model = tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: if not self.skip_wveval: mult = np.median(np.diff(wa.value)) / self.dw flux = lsc.convolve_psf(flux, self.smooth * mult) else: flux = lsc.convolve_psf(flux, self.smooth) if not self.skip_wveval: self.dla_model = np.interp(wa.value, wa1.value, flux) else: self.dla_model = flux # Finish self.full_model.flux = self.dla_model * self.conti_dict['co'] # Over-absorbed self.spec_widg.bad_model = np.where((self.dla_model < 0.7) & ( self.full_model.flux < (self.spec_widg.spec.flux - self.spec_widg.spec.sig * 1.5)))[0] # Model self.spec_widg.model = self.full_model
def update_model(self): """Update absorption model""" from linetools.analysis import voigt as lav if len(self.abssys_widg.all_abssys) == 0: self.dla_model = None self.spec_widg.model = None return # use finer wavelength array to resolve absorption features. wa = self.full_model.wavelength # Angstroms # should really make this a constant velocity width array instead. if not self.skip_wveval: wa1 = np.arange(wa[0].value, wa[-1].value, self.dw) * wa.unit else: wa1 = wa #all_tau_model = igmlls.tau_multi_lls(wa1, # self.abssys_widg.all_abssys, skip_wveval=self.skip_wveval) all_lines = [] for abssys in self.abssys_widg.all_abssys: for iline in abssys.dla_lines: all_lines.append(iline) #QtCore.pyqtRemoveInputHook() #import pdb; pdb.set_trace() #QtCore.pyqtRestoreInputHook() tau_Lyman = lav.voigt_from_abslines(wa1, all_lines, ret='tau', skip_wveval=self.skip_wveval) ''' # Loop on forest lines for forest in self.all_forest: tau_Lyman = lav.voigt_from_abslines(wa1, forest.lines, ret='tau', skip_wveval=self.skip_wveval) all_tau_model += tau_Lyman ''' all_tau_model = tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: if not self.skip_wveval: mult = np.median(np.diff(wa.value)) / self.dw flux = lsc.convolve_psf(flux, self.smooth * mult) else: flux = lsc.convolve_psf(flux, self.smooth) if not self.skip_wveval: self.dla_model = np.interp(wa.value, wa1.value, flux) else: self.dla_model = flux # Finish self.full_model.flux = self.dla_model * self.conti_dict['co'] # Over-absorbed self.spec_widg.bad_model = np.where( (self.dla_model < 0.7) & (self.full_model.flux < (self.spec_widg.spec.flux- self.spec_widg.spec.sig*1.5)))[0] # Model self.spec_widg.model = self.full_model
def update_model(self): '''Update absorption model ''' from linetools.analysis import voigt as lav if len(self.abssys_widg.all_abssys) == 0: self.lls_model = None self.spec_widg.model = None return ''' # Regenerate (in case we have switched between multiple spectra) if self.spec_widg.spec.nspec > 1: self.full_model = XSpectrum1D.from_tuple(( self.spec_widg.spec.wavelength,np.ones(len(self.spec_widg.spec.wavelength)))) ''' # use finer wavelength array to resolve absorption features. wa = self.full_model.wavelength # Angstroms # should really make this a constant velocity width array instead. if not self.skip_wveval: wa1 = np.arange(wa[0].value, wa[-1].value, self.dw) * wa.unit else: wa1 = wa all_tau_model = igmlls.tau_multi_lls(wa1, self.abssys_widg.all_abssys, skip_wveval=self.skip_wveval) # Loop on forest lines for forest in self.all_forest: tau_Lyman = lav.voigt_from_abslines(wa1, forest.lines, ret='tau', skip_wveval=self.skip_wveval) all_tau_model += tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: if not self.skip_wveval: mult = np.median(np.diff(wa.value)) / self.dw flux = lsc.convolve_psf(flux, self.smooth * mult) else: flux = lsc.convolve_psf(flux, self.smooth) if not self.skip_wveval: self.lls_model = np.interp(wa.value, wa1.value, flux) else: self.lls_model = flux # Finish self.full_model.flux = self.lls_model * self.continuum.flux # Over-absorbed try: self.spec_widg.bad_model = np.where((self.lls_model < 0.7) & ( self.full_model.flux < (self.spec_widg.spec.flux - self.spec_widg.spec.sig * 1.5)))[0] except: pass # Model self.spec_widg.model = self.full_model
def update_model(self): '''Update absorption model ''' if len(self.abssys_widg.all_abssys) == 0: self.lls_model = None self.spec_widg.model = None return # all_tau_model = xialu.tau_multi_lls(self.full_model.dispersion, self.abssys_widg.all_abssys) # Loop on forest lines for forest in self.all_forest: tau_Lyman = xsv.voigt_model(self.full_model.dispersion, forest.lines, flg_ret=2) all_tau_model += tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: self.lls_model = lsc.convolve_psf(flux, self.smooth) else: self.lls_model = flux # Finish self.full_model.flux = self.lls_model * self.continuum.flux self.spec_widg.model = self.full_model
def gauss_smooth(self, fwhm, **kwargs): """ Smooth a spectrum with a Gaussian Note that the uncertainty array is not smoothed. Parameters ---------- fwhm : float FWHM of the Gaussian in pixels (unitless) Returns ------- A new XSpectrum1D instance of the smoothed spectrum """ # Import from linetools.spectra import convolve as lsc # Apply to flux new_fx = lsc.convolve_psf( self.flux.value, fwhm, **kwargs) * self.flux.unit # Get the right sigma if self.sig_is_set: new_sig = self.sig.value else: new_sig = None # Return return XSpectrum1D.from_tuple( (self.wavelength, new_fx, new_sig), meta=self.meta.copy())
def update_model(self): '''Update absorption model ''' if len(self.abssys_widg.all_abssys) == 0: self.lls_model = None self.spec_widg.model = None return # all_tau_model = np.zeros(len(self.full_model.flux)) # Loop on LLS for lls in self.abssys_widg.all_abssys: # LL wv_rest = self.full_model.dispersion / (lls.zabs + 1) energy = wv_rest.to(u.eV, equivalencies=u.spectral()) # Get photo_cross and calculate tau tau_LL = (10.**lls.NHI / u.cm**2) * xatomi.photo_cross( 1, 1, energy) # Lyman tau_Lyman = xsv.voigt_model(self.full_model.dispersion, lls.lls_lines, flg_ret=2) tau_model = tau_LL + tau_Lyman # Kludge around the limit pix_LL = np.argmin(np.fabs(wv_rest - 911.3 * u.AA)) pix_kludge = np.where((wv_rest > 911.5 * u.AA) & (wv_rest < 912.8 * u.AA))[0] tau_model[pix_kludge] = tau_model[pix_LL] # Add all_tau_model += tau_model # Loop on forest lines for forest in self.all_forest: tau_Lyman = xsv.voigt_model(self.full_model.dispersion, forest.lines, flg_ret=2) all_tau_model += tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: self.lls_model = lsc.convolve_psf(flux, self.smooth) else: self.lls_model = flux # Finish self.full_model.flux = self.lls_model * self.continuum.flux self.spec_widg.model = self.full_model
def lls_model(wave, all_lls, smooth=0.): '''Generate an absorption model ''' from linetools.spectra import convolve as lsc from xastropy.igm.abs_sys import lls_utils as xialu # Tau from LLS all_tau_model = xialu.tau_multi_lls(wave, all_lls) # Flux and smooth flux = np.exp(-1. * all_tau_model) if smooth > 0: lls_model = lsc.convolve_psf(flux, smooth) else: lls_model = flux # Finish norm_flux = lls_model # Return return norm_flux
def update_model(self): '''Update absorption model ''' if len(self.abssys_widg.all_abssys) == 0: self.lls_model = None self.spec_widg.model = None return # all_tau_model = np.zeros(len(self.full_model.flux)) # Loop on LLS for lls in self.abssys_widg.all_abssys: # LL wv_rest = self.full_model.dispersion / (lls.zabs+1) energy = wv_rest.to(u.eV, equivalencies=u.spectral()) # Get photo_cross and calculate tau tau_LL = (10.**lls.NHI / u.cm**2) * xatomi.photo_cross(1,1,energy) # Lyman tau_Lyman = xsv.voigt_model(self.full_model.dispersion, lls.lls_lines, flg_ret=2) tau_model = tau_LL + tau_Lyman # Kludge around the limit pix_LL = np.argmin( np.fabs( wv_rest- 911.3*u.AA ) ) pix_kludge = np.where( (wv_rest > 911.5*u.AA) & (wv_rest < 912.8*u.AA) )[0] tau_model[pix_kludge] = tau_model[pix_LL] # Add all_tau_model += tau_model # Loop on forest lines for forest in self.all_forest: tau_Lyman = xsv.voigt_model(self.full_model.dispersion, forest.lines, flg_ret=2) all_tau_model += tau_Lyman # Flux and smooth flux = np.exp(-1. * all_tau_model) if self.smooth > 0: self.lls_model = lsc.convolve_psf(flux, self.smooth) else: self.lls_model = flux # Finish self.full_model.flux = self.lls_model * self.continuum.flux self.spec_widg.model = self.full_model
def gauss_smooth(self, fwhm, **kwargs): """ Smooth a spectrum with a Gaussian Need to consider smoothing the uncertainty array Parameters ---------- fwhm: float FWHM of the Gaussian in pixels (unitless) Returns: -------- XSpectrum1D of the smoothed spectrum Returns: """ # Import from linetools.spectra import convolve as lsc # Apply to flux new_fx = lsc.convolve_psf(self.flux.value, fwhm, **kwargs) * self.flux.unit # Return return XSpectrum1D.from_array(self.dispersion, new_fx, meta=self.meta.copy(), uncertainty=self.uncertainty)
def auto_plls(self,x,y): '''Automatically fit a pLLS Parameters: ---------- x,y: floats x,y values in the GUI ''' spec = self.spec_widg.spec # For convenience if len(self.abssys_widg.all_abssys) > 0: conti= self.full_model else: conti= self.continuum # Generate toy LLS from click ximn = np.argmin(np.abs(spec.dispersion.value-x)) NHI = 17.29 + np.log10(-1.*np.log(y/conti.flux.value[ximn])) #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() #print('NHI={:g}'.format(NHI)) z = x/(911.7)-1 plls = LLSSystem((0*u.deg,0*u.deg),z,[-300.,300]*u.km/u.s,NHI=NHI) plls.bval = 20*u.km/u.s plls.fill_lls_lines(bval=20*u.km/u.s, do_analysis=0) # wrest, Tau model, flux wrest = spec.dispersion/(1+plls.zabs) tau = igmlls.tau_multi_lls(spec.dispersion,[plls]) emtau = np.exp(-1. * tau) lls_flux = lsc.convolve_psf(emtau, 3.) #xdb.xplot(wrest, lls_flux) # zmin (next highest LLS or zem) if len(self.abssys_widg.all_abssys) != 0: zlls = [lls.zabs for lls in self.abssys_widg.all_abssys if lls.zabs > plls.zabs] if len(zlls) == 0: zmin = self.zqso+0.01 else: zmin = np.min(np.array(zlls)) - 0.01 else: zmin = self.zqso+0.01 # Pixels for analysis and rolling # NEED TO CUT ON X-Shooter ARM apix = np.where( (wrest > 914*u.AA) & #(spec.dispersion<5600*u.AA) & (spec.dispersion<(1+zmin)*1026.*u.AA))[0] # Might go to Lyb nroll = (np.argmin(np.abs(spec.dispersion-(911.7*u.AA*(1+zmin))))- # Extra 0.01 for bad z np.argmin(np.abs(spec.dispersion-(911.7*u.AA*(1+plls.zabs))))) # Require nroll does not exceed length of spectrum if np.max(apix)+nroll > len(spec.dispersion): nroll = len(spec.dispersion) - np.max(apix) - 1 gdpix = np.arange(np.min(apix)-nroll,np.max(apix)+nroll+1) roll_flux = np.concatenate([np.ones(nroll),lls_flux[apix], np.ones(nroll)]) roll_msk = roll_flux < 0.7 # Generate data arrays wave_pad = spec.dispersion[gdpix] #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() flux_pad = spec.flux[gdpix] sig_pad = spec.sig[gdpix] if len(self.abssys_widg.all_abssys) > 0: conti_pad = conti.flux[gdpix] else: conti_pad = conti.flux[gdpix] # Generate matricies flux_matrix = np.zeros((len(roll_flux),nroll)) sig_matrix = np.zeros((len(roll_flux),nroll)) conti_matrix = np.zeros((len(roll_flux),nroll)) roll_matrix = np.zeros((len(roll_flux),nroll)) mask_matrix = np.zeros((len(roll_flux),nroll)) for kk in range(nroll): roll_matrix[:,kk] = np.roll(roll_flux,kk) mask_matrix[:,kk] = np.roll(roll_msk,kk) flux_matrix[:,kk] = flux_pad conti_matrix[:,kk] = conti_pad sig_matrix[:,kk] = sig_pad # Model -- Multiply by continuum model = roll_matrix * conti_matrix # Condition idx = np.where( (model < (flux_matrix-sig_matrix*1.5)) & (mask_matrix==True)) bad_matrix = np.zeros((len(roll_flux),nroll)) bad_matrix[idx] = 1 # Sum on offsets and get redshift bad = np.sum(bad_matrix,0) ibest = np.argmin(bad) zbest = spec.dispersion[ibest+ximn]/(911.7*u.AA)-1 # Quantity # Add pLLS? if bad[ibest] < 10: #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() self.add_LLS(zbest.value, bval=20.*u.km/u.s, NHI=NHI) else: print('No viable pLLS found with our criteria!')
def evaluate(wave,logN,b,z,wrest,f,gamma,fwhm): tau = voigt_tau(wave/1e8, [logN,z,b*1e5,wrest/1e8,f,gamma]) fx = np.exp(-1*tau) if fwhm > 0.: fx = lsc.convolve_psf(fx, fwhm) return fx
def auto_plls(self, x, y): """Automatically fit a pLLS Parameters: ---------- x,y: floats x,y values in the GUI """ spec = self.spec_widg.spec # For convenience if len(self.abssys_widg.all_abssys) > 0: conti = self.full_model else: conti = self.continuum # Generate toy LLS from click ximn = np.argmin(np.abs(spec.wavelength.value - x)) if y > conti.flux.value[ximn]: print("Type F below the continuum fool!") return NHI = 17.29 + np.log10(-1. * np.log(y / conti.flux.value[ximn])) #QtCore.pyqtRemoveInputHook() #pdb.set_trace() #QtCore.pyqtRestoreInputHook() #print('NHI={:g}'.format(NHI)) z = x / (911.7) - 1 plls = LLSSystem((0 * u.deg, 0 * u.deg), z, [-300., 300] * u.km / u.s, NHI=NHI) plls.bval = 20 * u.km / u.s plls.fill_lls_lines(bval=20 * u.km / u.s, do_analysis=0) # wrest, Tau model, flux wrest = spec.wavelength / (1 + plls.zabs) tau = igmlls.tau_multi_lls(spec.wavelength, [plls]) emtau = np.exp(-1. * tau) lls_flux = lsc.convolve_psf(emtau, 3.) #xdb.xplot(wrest, lls_flux) # zmin (next highest LLS or zem) if len(self.abssys_widg.all_abssys) != 0: zlls = [ lls.zabs for lls in self.abssys_widg.all_abssys if lls.zabs > plls.zabs ] if len(zlls) == 0: zmin = self.zqso + 0.05 else: zmin = np.min(np.array(zlls)) - 0.01 else: zmin = self.zqso + 0.05 # Pixels for analysis and rolling # NEED TO CUT ON X-Shooter ARM apix = np.where((wrest > 914 * u.AA) & #(spec.wavelength<5600*u.AA) & (spec.wavelength < (1 + zmin) * 1026. * u.AA))[ 0] # Might go to Lyb nroll = ( np.argmin(np.abs(spec.wavelength - (911.7 * u.AA * (1 + zmin)))) - # Extra 0.01 for bad z np.argmin( np.abs(spec.wavelength - (911.7 * u.AA * (1 + plls.zabs))))) # Require nroll does not exceed length of spectrum if np.max(apix) + nroll > len(spec.wavelength): nroll = len(spec.wavelength) - np.max(apix) - 1 gdpix = np.arange(np.min(apix) - nroll, np.max(apix) + nroll + 1) roll_flux = np.concatenate( [np.ones(nroll), lls_flux[apix], np.ones(nroll)]) roll_msk = roll_flux < 0.7 # Generate data arrays wave_pad = spec.wavelength[gdpix] #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() flux_pad = spec.flux[gdpix] sig_pad = spec.sig[gdpix] if len(self.abssys_widg.all_abssys) > 0: conti_pad = conti.flux[gdpix] else: conti_pad = conti.flux[gdpix] # Generate matricies flux_matrix = np.zeros((len(roll_flux), nroll)) sig_matrix = np.zeros((len(roll_flux), nroll)) conti_matrix = np.zeros((len(roll_flux), nroll)) roll_matrix = np.zeros((len(roll_flux), nroll)) mask_matrix = np.zeros((len(roll_flux), nroll)) for kk in range(nroll): roll_matrix[:, kk] = np.roll(roll_flux, kk) mask_matrix[:, kk] = np.roll(roll_msk, kk) flux_matrix[:, kk] = flux_pad conti_matrix[:, kk] = conti_pad sig_matrix[:, kk] = sig_pad # Model -- Multiply by continuum model = roll_matrix * conti_matrix # Condition idx = np.where((model < (flux_matrix - sig_matrix * 1.5)) & (mask_matrix == True)) bad_matrix = np.zeros((len(roll_flux), nroll)) bad_matrix[idx] = 1 # Sum on offsets and get redshift bad = np.sum(bad_matrix, 0) ibest = np.argmin(bad) zbest = spec.wavelength[ibest + ximn] / (911.7 * u.AA) - 1 # Quantity # Add pLLS? if bad[ibest] < 10: #QtCore.pyqtRemoveInputHook() #xdb.set_trace() #QtCore.pyqtRestoreInputHook() self.add_LLS(zbest.value, bval=20. * u.km / u.s, NHI=NHI) else: print('No viable pLLS found with our criteria!')
def evaluate(wave, logN, b, z, wrest, f, gamma, fwhm): tau = voigt_tau(wave / 1e8, [logN, z, b * 1e5, wrest / 1e8, f, gamma]) fx = np.exp(-1 * tau) if fwhm > 0.: fx = lsc.convolve_psf(fx, fwhm) return fx