Example #1
0
def tau_multi_lls(wave, all_lls):
    '''Calculate opacities on an input observed wavelength grid
    Parameters:
    -----------
    wave: Quantity array
      Wavelengths
    all_lls: List of LLS Class
    '''
    from xastropy.atomic import ionization as xai
    from xastropy.spec import voigt as xsv
    #
    all_tau_model = np.zeros(len(wave))
    # Loop on LLS
    for lls in all_lls:
        # LL
        wv_rest = wave / (lls.zabs + 1)
        energy = wv_rest.to(u.eV, equivalencies=u.spectral())
        # Get photo_cross and calculate tau
        tau_LL = (10.**lls.NHI / u.cm**2) * xai.photo_cross(1, 1, energy)

        # Lyman
        tau_Lyman = xsv.voigt_model(wave, lls.lls_lines, flg_ret=2)
        tau_model = tau_LL + tau_Lyman

        # Kludge around the limit
        pix_LL = np.argmin(np.fabs(wv_rest - 911.3 * u.AA))
        pix_kludge = np.where((wv_rest > 911.5 * u.AA)
                              & (wv_rest < 912.8 * u.AA))[0]
        tau_model[pix_kludge] = tau_model[pix_LL]
        # Add
        all_tau_model += tau_model
    # Return
    return all_tau_model
Example #2
0
def tau_multi_lls(wave, all_lls):
    '''Calculate opacities on an input observed wavelength grid
    Parameters:
    -----------
    wave: Quantity array
      Wavelengths
    all_lls: List of LLS Class
    '''
    from xastropy.atomic import ionization as xai
    from xastropy.spec import voigt as xsv
    #
    all_tau_model = np.zeros(len(wave))
    # Loop on LLS
    for lls in all_lls:
        # LL
        wv_rest = wave / (lls.zabs+1)
        energy = wv_rest.to(u.eV, equivalencies=u.spectral())
        # Get photo_cross and calculate tau
        tau_LL = (10.**lls.NHI / u.cm**2) * xai.photo_cross(1,1,energy)

        # Lyman
        tau_Lyman = xsv.voigt_model(wave, lls.lls_lines, flg_ret=2)
        tau_model = tau_LL + tau_Lyman

        # Kludge around the limit
        pix_LL = np.argmin( np.fabs( wv_rest- 911.3*u.AA ) )
        pix_kludge = np.where( (wv_rest > 911.5*u.AA) & (wv_rest < 912.8*u.AA) )[0]
        tau_model[pix_kludge] = tau_model[pix_LL]
        # Add
        all_tau_model += tau_model
    # Return
    return all_tau_model
Example #3
0
    def flux_model(self, spec, smooth=0):
        """
        Generate a LLS model given an input spectrum

        Parameters:
          spec:  Spectrum1D
          smooth : (0) Number of pixels to smooth by

        Returns:
          Output model is passed back as a Spectrum 
        """
        reload(xsv)

        # ########
        # LLS first

        # Energies in LLS rest-frame
        wv_rest = spec.dispersion / (self.zabs + 1)
        energy = wv_rest.to(u.eV, equivalencies=u.spectral())

        # Get photo_cross and calculate tau
        tau_LL = (10.**self.NHI / u.cm**2) * xatomi.photo_cross(1, 1, energy)

        # ########
        # Now the Lyman series

        # Check for lines
        if 'lls_lines' not in self.__dict__.keys():
            self.fill_lls_lines()

        #xdb.set_trace()
        tau_Lyman = xsv.voigt_model(spec.dispersion, self.lls_lines, flg_ret=2)

        # Combine
        tau_model = tau_LL + tau_Lyman

        # Kludge around the limit
        pix_LL = np.argmin(np.fabs(wv_rest - 911.3 * u.AA))
        pix_kludge = np.where((wv_rest > 911.5 * u.AA)
                              & (wv_rest < 912.8 * u.AA))[0]
        tau_model[pix_kludge] = tau_model[pix_LL]

        # Fill in flux
        model = copy.deepcopy(spec)
        model.flux = np.exp(-1. * tau_model).value

        # Smooth?
        if smooth > 0:
            model.gauss_smooth(smooth)

        # Return
        return model
Example #4
0
    def flux_model(self,spec,smooth=0):
        """
        Generate a LLS model given an input spectrum

        Parameters:
          spec:  Spectrum1D
          smooth : (0) Number of pixels to smooth by

        Returns:
          Output model is passed back as a Spectrum 
        """
        reload(xsv)
        
        # ########
        # LLS first

        # Energies in LLS rest-frame
        wv_rest = spec.dispersion / (self.zabs+1)
        energy = wv_rest.to(u.eV, equivalencies=u.spectral())

        # Get photo_cross and calculate tau
        tau_LL = (10.**self.NHI / u.cm**2) * xatomi.photo_cross(1,1,energy)

        # ########
        # Now the Lyman series

        # Check for lines
        if 'lls_lines' not in self.__dict__.keys():
            self.fill_lls_lines()

        #xdb.set_trace()
        tau_Lyman = xsv.voigt_model(spec.dispersion, self.lls_lines, flg_ret=2)

        # Combine
        tau_model = tau_LL + tau_Lyman

        # Kludge around the limit
        pix_LL = np.argmin( np.fabs( wv_rest- 911.3*u.AA ) )
        pix_kludge = np.where( (wv_rest > 911.5*u.AA) & (wv_rest < 912.8*u.AA) )[0]
        tau_model[pix_kludge] = tau_model[pix_LL]
        
        # Fill in flux
        model = copy.deepcopy(spec)
        model.flux = np.exp(-1. * tau_model).value

        # Smooth?
        if smooth > 0:
            model.gauss_smooth(smooth)

        # Return
        return model
Example #5
0
    def update_model(self):
        '''Update absorption model
        '''
        if len(self.abssys_widg.all_abssys) == 0:
            self.lls_model = None
            self.spec_widg.model = None
            return
        #
        all_tau_model = np.zeros(len(self.full_model.flux))
        # Loop on LLS
        for lls in self.abssys_widg.all_abssys:
            # LL
            wv_rest = self.full_model.dispersion / (lls.zabs + 1)
            energy = wv_rest.to(u.eV, equivalencies=u.spectral())
            # Get photo_cross and calculate tau
            tau_LL = (10.**lls.NHI / u.cm**2) * xatomi.photo_cross(
                1, 1, energy)

            # Lyman
            tau_Lyman = xsv.voigt_model(self.full_model.dispersion,
                                        lls.lls_lines,
                                        flg_ret=2)
            tau_model = tau_LL + tau_Lyman

            # Kludge around the limit
            pix_LL = np.argmin(np.fabs(wv_rest - 911.3 * u.AA))
            pix_kludge = np.where((wv_rest > 911.5 * u.AA)
                                  & (wv_rest < 912.8 * u.AA))[0]
            tau_model[pix_kludge] = tau_model[pix_LL]
            # Add
            all_tau_model += tau_model
        # Loop on forest lines
        for forest in self.all_forest:
            tau_Lyman = xsv.voigt_model(self.full_model.dispersion,
                                        forest.lines,
                                        flg_ret=2)
            all_tau_model += tau_Lyman

        # Flux and smooth
        flux = np.exp(-1. * all_tau_model)
        if self.smooth > 0:
            self.lls_model = lsc.convolve_psf(flux, self.smooth)
        else:
            self.lls_model = flux

        # Finish
        self.full_model.flux = self.lls_model * self.continuum.flux
        self.spec_widg.model = self.full_model
Example #6
0
    def update_model(self):
        '''Update absorption model
        '''
        if len(self.abssys_widg.all_abssys) == 0:
            self.lls_model = None
            self.spec_widg.model = None
            return
        #
        all_tau_model = np.zeros(len(self.full_model.flux))
        # Loop on LLS
        for lls in self.abssys_widg.all_abssys:
            # LL
            wv_rest = self.full_model.dispersion / (lls.zabs+1)
            energy = wv_rest.to(u.eV, equivalencies=u.spectral())
            # Get photo_cross and calculate tau
            tau_LL = (10.**lls.NHI / u.cm**2) * xatomi.photo_cross(1,1,energy)

            # Lyman
            tau_Lyman = xsv.voigt_model(self.full_model.dispersion, 
                lls.lls_lines, flg_ret=2)
            tau_model = tau_LL + tau_Lyman

            # Kludge around the limit
            pix_LL = np.argmin( np.fabs( wv_rest- 911.3*u.AA ) )
            pix_kludge = np.where( (wv_rest > 911.5*u.AA) & (wv_rest < 912.8*u.AA) )[0]
            tau_model[pix_kludge] = tau_model[pix_LL]
            # Add
            all_tau_model += tau_model
        # Loop on forest lines
        for forest in self.all_forest:
            tau_Lyman = xsv.voigt_model(self.full_model.dispersion, 
                forest.lines, flg_ret=2)
            all_tau_model += tau_Lyman

        # Flux and smooth
        flux = np.exp(-1. * all_tau_model)
        if self.smooth > 0:
            self.lls_model = lsc.convolve_psf(flux, self.smooth)
        else:
            self.lls_model = flux

        # Finish
        self.full_model.flux = self.lls_model * self.continuum.flux
        self.spec_widg.model = self.full_model
Example #7
0
    def teff_ll(self, z912, zem, N_eval=5000, cosmo=None):
        """ Calculate teff_LL 
        Effective opacity from LL absorption at z912 from zem

        Parameters:
        z912: float
          Redshift for evaluation
        zem: float
          Redshift of source
        cosmo: astropy.cosmology (None)
          Cosmological model to adopt (as needed)
        N_eval: int (5000)
          Discretization parameter

        Returns:
        zval, teff_LL: array
          z values and Effective opacity from LL absorption from z912 to zem

        JXP 10 Nov 2014
        """
        # Imports
        from astropy import constants as const

        # NHI array
        lgNval = 11.5 + 10.5 * np.arange(N_eval) / (
            N_eval - 1.)  #; This is base 10 [Max at 22]
        dlgN = lgNval[1] - lgNval[0]
        Nval = 10.**lgNval

        #; z array
        zval = z912 + (zem - z912) * np.arange(N_eval) / (N_eval - 1.)
        dz = np.fabs(zval[1] - zval[0])

        teff_LL = np.zeros(N_eval)

        # dXdz
        dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1)
        #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval)

        # Evaluate f(N,X)
        velo = (zval - zem) / (1 + zem) * (const.c.cgs.value / 1e5
                                           )  # Kludge for eval [km/s]

        log_fnX = self.eval(lgNval, zem, vel_array=velo)
        log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

        # Evaluate tau(z,N)
        teff_engy = (const.Ryd.to(u.eV, equivalencies=u.spectral()) /
                     ((1 + zval) / (1 + zem)))
        sigma_z = xai.photo_cross(1, 1, teff_engy)
        #xdb.set_trace()
        #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75)  # Not exact but close
        tau_zN = np.outer(Nval, sigma_z)

        # Integrand
        intg = 10.**(log_fnz) * (1. - np.exp(-1. * tau_zN))

        # Sum
        sumz_first = False
        if sumz_first == False:
            #; Sum in N first
            N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),
                              0) * dlgN * np.log(10.)
            #xdb.set_trace()
            # Sum in z
            teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz
        #xdb.set_trace()

        # Debug
        debug = False
        if debug == True:
            #        x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc
            #        x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #     printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #     writecol, 'debug_file'+strtrim(qq,2)+'.dat', $
            #               lgNval, restEW, log_fnX
            xdb.set_trace()
        # Return
        return zval, teff_LL
Example #8
0
    def teff_ll(self, z912, zem, N_eval=5000, cosmo=None):
        """ Calculate teff_LL 
        Effective opacity from LL absorption at z912 from zem

        Parameters:
        z912: float
          Redshift for evaluation
        zem: float
          Redshift of source
        cosmo: astropy.cosmology (None)
          Cosmological model to adopt (as needed)
        N_eval: int (5000)
          Discretization parameter

        Returns:
        zval, teff_LL: array
          z values and Effective opacity from LL absorption from z912 to zem

        JXP 10 Nov 2014
        """
        # Imports
        from astropy import constants as const

        # NHI array
        lgNval = 11.5 + 10.5*np.arange(N_eval)/(N_eval-1.) #; This is base 10 [Max at 22]
        dlgN = lgNval[1]-lgNval[0]
        Nval = 10.**lgNval

        #; z array
        zval = z912 + (zem-z912)*np.arange(N_eval)/(N_eval-1.)
        dz = np.fabs(zval[1]-zval[0])

        teff_LL = np.zeros(N_eval)

        # dXdz
        dXdz = igmu.cosm_xz(zval, cosmo=cosmo, flg=1) 
        #if keyword_set(FNZ) then dXdz = replicate(1.,N_eval)

        # Evaluate f(N,X)
        velo = (zval-zem)/(1+zem) * (const.c.cgs.value/1e5) # Kludge for eval [km/s]

        log_fnX = self.eval(lgNval, zem, vel_array=velo)  
        log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

        # Evaluate tau(z,N)
        teff_engy = (const.Ryd.to(u.eV,equivalencies=u.spectral()) /
                     ((1+zval)/(1+zem)) )
        sigma_z = xai.photo_cross(1,1,teff_engy)
        #xdb.set_trace()
        #sigma_z = teff_cross * ((1+zval)/(1+zem))**(2.75)  # Not exact but close
        tau_zN = np.outer(Nval, sigma_z)

        # Integrand
        intg = 10.**(log_fnz) * (1. - np.exp(-1.*tau_zN))

        # Sum
        sumz_first = False
        if sumz_first == False:
            #; Sum in N first
            N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),  0) * dlgN * np.log(10.)
            #xdb.set_trace()
            # Sum in z
            teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz 
        #xdb.set_trace()

        # Debug
        debug=False
        if debug == True:
            #        x_splot, lgNval, alog10(10.d^(log_fnX) * dxdz * dz * Nval), /bloc
            #        x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #     printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #     writecol, 'debug_file'+strtrim(qq,2)+'.dat', $
            #               lgNval, restEW, log_fnX
            xdb.set_trace()
        # Return
        return zval, teff_LL