示例#1
0
def test_dxdz():
    # Default cosmology (Vanilla)
    Xz = cosm_xz(3.)
    np.isclose(Xz, 7.68841320742732)
    dxdz = cosm_xz(3., flg_return=1)
    np.isclose(dxdz, 3.5847294011983)
    # Open
    copen = LambdaCDM(H0=70., Om0=0.3, Ode0=0.)
    dxdz = cosm_xz(3., cosmo=copen, flg_return=1)
    np.isclose(dxdz, 2.90092902756)
示例#2
0
文件: dla.py 项目: yzhenggit/pyigm
def lX(z, extrap=False, calc_lz=False):
    """ Returns l(X) from a preferred data source
    Currently Sanchez-Ramirez+16 with smoothing and interpolation

    Parameters
    ----------
    z : float or ndarray
      Redshift
    extrap : bool, optional
      If True, take closest value (i.e. extrapolate constant from each edge)
    calc_lz : bool, optional
      Calculate l(z) instead

    Returns
    -------
    lX : float or ndarray
      Float if z is a float
      Returns 0 if beyond the interpolated range
    """
    # Input
    if isinstance(z, float):
        z = np.array([z])
        flg_float = True
    else:
        flg_float = False

    # Sanchez-Ramirez et al. 2016 (Vanilla cosmology:  0.3, 0.7, 70)
    cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)
    tab7_fil = pyigm_path + '/data/DLA/XQ-100/sramirez16_table7.dat'
    sr16_tab7 = Table.read(tab7_fil, format='ascii')

    # Smooth
    lxmed = medfilt(sr16_tab7['lx'], 21)

    # Interpolate and evaluate
    flX = interpolate.interp1d(sr16_tab7['z'],
                               lxmed,
                               bounds_error=False,
                               fill_value=0.)
    eval = flX(z)

    # Extrapolate?
    if extrap:
        # Low
        zmin = np.min(sr16_tab7['z'])
        lowz = z <= zmin
        eval[lowz] = lxmed[0]
        # High
        zmax = np.max(sr16_tab7['z'])
        hiz = z >= zmax
        eval[hiz] = lxmed[-1]
    # l(z)?
    if calc_lz:
        dXdz = pyigmu.cosm_xz(z, cosmo=cosmo, flg_return=1)
        eval = eval * dXdz
    # Return
    if flg_float:
        return eval[0]
    else:
        return eval
示例#3
0
    def __find_dXtot__(self, zbins, calc_Dz=False):
        """ Calculate DX in zbins
        Parameters
        ----------
        zbins : list
        calc_Dz : bool, optional
          Return Dztot instead of DXtot

        Returns
        -------
        dXtot : ndarray
          dX for the full survey

        """
        # get z, g(z)
        z, gz = self.calculate_gz()
        dz = z[1] - z[0]
        #
        if not calc_Dz:
            dXdz = pyigmu.cosm_xz(z, cosmo=self.cosmo, flg_return=1)
        else:
            dXdz = 1.

        dXtot = np.zeros(len(zbins) - 1)
        for kk in range(len(zbins) - 1):
            # the indices of values within the redshift range
            idx = np.where((z >= zbins[kk]) & (z < zbins[kk + 1]))
            dXtot[kk] = np.sum((gz * dz * dXdz)[idx])

        return dXtot
示例#4
0
def dXdz(outfil='Figures/dXdz.pdf'):
    """ Plot dXdz vs. z
    """
    # z
    zval = np.linspace(1., 5, 100)

    # dX/dz
    dXdz = pyiu.cosm_xz(zval, cosmo=Planck15, flg_return=1)

    # Start the plot
    xmnx = (1., 5)
    pp = PdfPages(outfil)
    fig = plt.figure(figsize=(8.0, 5.0))

    plt.clf()
    gs = gridspec.GridSpec(1, 1)

    # Lya line
    ax = plt.subplot(gs[0])
    #ax.xaxis.set_minor_locator(plt.MultipleLocator(0.5))
    #ax.xaxis.set_major_locator(plt.MultipleLocator(20.))
    #ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1))
    #ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
    ax.set_xlim(xmnx)
    ax.set_ylim(0., 5)
    ax.set_ylabel('dX/dz')
    ax.set_xlabel('z')

    lw = 2.
    # Data
    ax.plot(zval, dXdz, 'k', linewidth=lw)  #, label='SDSS QSOs (z=4)')

    # Label
    csz = 17
    #ax.text(0.10, 0.80, 'HST/WFC3: QSO spectrum (z~2)', color='black',
    #        transform=ax.transAxes, size=csz, ha='left')
    xputils.set_fontsize(ax, 17.)
    # Layout and save
    print('Writing {:s}'.format(outfil))
    plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.4)
    plt.subplots_adjust(hspace=0)
    pp.savefig(bbox_inches='tight')
    plt.close()
    # Finish
    pp.close()
示例#5
0
def dXdz(outfil='Figures/dXdz.pdf'):
    """ Plot dXdz vs. z
    """
    # z
    zval = np.linspace(1., 5, 100)

    # dX/dz
    dXdz = pyiu.cosm_xz(zval, cosmo=Planck15, flg_return=1)

    # Start the plot
    xmnx = (1., 5)
    pp = PdfPages(outfil)
    fig = plt.figure(figsize=(8.0, 5.0))

    plt.clf()
    gs = gridspec.GridSpec(1,1)

    # Lya line
    ax = plt.subplot(gs[0])
    #ax.xaxis.set_minor_locator(plt.MultipleLocator(0.5))
    #ax.xaxis.set_major_locator(plt.MultipleLocator(20.))
    #ax.yaxis.set_minor_locator(plt.MultipleLocator(0.1))
    #ax.yaxis.set_major_locator(plt.MultipleLocator(0.2))
    ax.set_xlim(xmnx)
    ax.set_ylim(0., 5)
    ax.set_ylabel('dX/dz')
    ax.set_xlabel('z')

    lw = 2.
    # Data
    ax.plot(zval, dXdz, 'k', linewidth=lw)#, label='SDSS QSOs (z=4)')

    # Label
    csz = 17
    #ax.text(0.10, 0.80, 'HST/WFC3: QSO spectrum (z~2)', color='black',
    #        transform=ax.transAxes, size=csz, ha='left') 
    xputils.set_fontsize(ax, 17.)
    # Layout and save
    print('Writing {:s}'.format(outfil))
    plt.tight_layout(pad=0.2,h_pad=0.0,w_pad=0.4)
    plt.subplots_adjust(hspace=0)
    pp.savefig(bbox_inches='tight')
    plt.close()
    # Finish
    pp.close()
示例#6
0
    def evaluate(self, NHI, z, vel_array=None, cosmo=None):
        """ Evaluate the f(N,X) model at a set of NHI values

        Parameters
        ----------
        NHI : array
          log NHI values
        z : float or array
          Redshift for evaluation
        vel_array : ndarray, optional
          Velocities relative to z
        cosmo : astropy.cosmology, optional


        Returns
        -------
        log_fNX : float, array, or 2D array
          f(NHI,X)[z] values
          Float if given one NHI,z value each. Otherwise 2D array
          If 2D, it is [NHI,z] on the axes

        """
        # Tuple?
        if isinstance(NHI, tuple):  # All values packed into NHI parameter
            z = NHI[1]
            NHI = NHI[0]
            flg_1D = 1
        else:  # NHI and z separate
            flg_1D = 0

        # NHI
        if isiterable(NHI):
            NHI = np.array(NHI)  # Insist on array
        else:
            NHI = np.array([NHI])
        lenNHI = len(NHI)

        # Redshift
        if vel_array is not None:
            z_val = z + (1 + z) * vel_array / (const.c.to('km/s').value)
        else:
            z_val = z
        if isiterable(z_val):
            z_val = np.array(z_val)
        else:
            z_val = np.array([z_val])
        lenz = len(z_val)

        # Check on zmnx
        bad = np.where((z_val < self.zmnx[0]) | (z_val > self.zmnx[1]))[0]
        if len(bad) > 0:
            raise ValueError(
                'fN.model.eval: z={:g} not within self.zmnx={:g},{:g}'.format(
                    z_val[bad[0]], *(self.zmnx)))

        if self.mtype == 'Hspline':
            # Evaluate without z dependence
            log_fNX = self.model.__call__(NHI)

            # Evaluate
            if (not isiterable(z_val)) | (flg_1D
                                          == 1):  # scalar or 1D array wanted
                log_fNX += self.gamma * np.log10(
                    (1 + z_val) / (1 + self.zpivot))
            else:
                # Matrix algebra to speed things up
                lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val)))
                lenfX = len(log_fNX)
                #
                z_grid1 = 10**(np.outer(
                    np.ones(lenfX) * self.gamma,
                    np.log10(1 + z_val)))  #; (1+z)^gamma
                z_grid2 = np.outer(
                    np.ones(lenfX) * ((1. / (1 + self.zpivot))**self.gamma),
                    np.ones(len(z_val)))
                log_fNX = lgNHI_grid + np.log10(z_grid1 * z_grid2)

        # Gamma function (e.g. Inoue+14)
        elif self.mtype == 'Gamma':
            # Setup the parameters
            Nl, Nu, Nc, bval = [
                self.param['common'][key]
                for key in ['Nl', 'Nu', 'Nc', 'bval']
            ]
            # gNHI
            Bi = self.param['Bi']
            ncomp = len(Bi)
            log_gN = np.zeros((lenNHI, ncomp))
            beta = [self.param[itype]['beta'] for itype in ['LAF', 'DLA']]
            for kk in range(ncomp):
                log_gN[:, kk] += (np.log10(Bi[kk]) + NHI * (-1 * beta[kk]) +
                                  (-1. * 10.**(NHI - Nc) / np.log(10))
                                  )  # log10 [ exp(-NHI/Nc) ]
            # f(z)
            fz = np.zeros((lenz, 2))
            # Loop on NHI
            itypes = ['LAF', 'DLA']
            for kk in range(ncomp):
                if kk == 0:  # LyaF
                    zcuts = self.param['LAF']['zcuts']
                    gamma = self.param['LAF']['gamma']
                else:  # DLA
                    zcuts = self.param['DLA']['zcuts']
                    gamma = self.param['DLA']['gamma']
                zcuts = [0] + zcuts + [999.]
                Aval = self.param[itypes[kk]]['Aval']
                # Cut on z
                for ii in range(1, len(zcuts)):
                    izcut = np.where((z_val < zcuts[ii])
                                     & (z_val > zcuts[ii - 1]))[0]
                    liz = len(izcut)
                    # Evaluate (at last!)
                    if (ii <= 2) & (liz > 0):
                        fz[izcut, kk] = Aval * ((1 + z_val[izcut]) /
                                                (1 + zcuts[1]))**gamma[ii - 1]
                    elif (ii == 3) & (liz > 0):
                        fz[izcut,
                           kk] = Aval * (((1 + zcuts[2]) /
                                          (1 + zcuts[1]))**gamma[ii - 2] *
                                         ((1 + z_val[izcut]) /
                                          (1 + zcuts[2]))**gamma[ii - 1])
            # dX/dz
            if cosmo is None:
                cosmo = self.cosmo
            dXdz = pyigmu.cosm_xz(z_val, cosmo=cosmo, flg_return=1)

            # Final steps
            if flg_1D == 1:
                fnX = np.sum(fz * 10.**log_gN, 1) / dXdz
                log_fNX = np.log10(fnX)
            else:
                # Generate the matrix
                fnz = np.zeros((lenNHI, lenz))
                for kk in range(ncomp):
                    fnz += np.outer(10.**log_gN[:, kk], fz[:, kk])
                # Finish up
                log_fNX = np.log10(fnz) - np.log10(
                    np.outer(np.ones(lenNHI), dXdz))
        elif self.mtype == 'PowerLaw':
            log_fNX = self.param['B'] + self.param['beta'] * NHI
            #
            if (not isiterable(z_val)) | (flg_1D
                                          == 1):  # scalar or 1D array wanted
                log_fNX += self.gamma * np.log10(
                    (1 + z_val) / (1 + self.zpivot))
            else:
                lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val)))
                lenfX = len(log_fNX)
                #
                z_grid1 = 10**(np.outer(
                    np.ones(lenfX) * self.gamma,
                    np.log10(1 + z_val)))  #; (1+z)^gamma
                z_grid2 = np.outer(
                    np.ones(lenfX) * ((1. / (1 + self.zpivot))**self.gamma),
                    np.ones(len(z_val)))
                log_fNX = lgNHI_grid + np.log10(z_grid1 * z_grid2)
        else:
            raise ValueError(
                'fN.model: Not ready for this model type {:%s}'.format(
                    self.mtype))

        # Return
        if (lenNHI + lenz) == 2:
            return log_fNX.flatten()[0]  # scalar
        else:
            return log_fNX
示例#7
0
def lyman_limit(fN_model, z912, zem, N_eval=5000, cosmo=None, debug=False):
    """ Calculate teff_LL

    Effective opacity from LL absorption at z912 from zem

    Parameters
    ----------
    fN_model : FNModel
      f(N) model
    z912 : float
      Redshift for evaluation
    zem : float
      Redshift of source
    cosmo : astropy.cosmology, optional
      Cosmological model to adopt (as needed)
    N_eval : int, optional
      Discretization parameter (5000)
    debug : bool, optional

    Returns
    -------
    zval : array
    teff_LL : array
      z values and Effective opacity from LL absorption from z912 to zem
    """
    if not isinstance(fN_model,FNModel):
        raise IOError("Improper model")
    # NHI array
    lgNval = 11.5 + 10.5*np.arange(N_eval)/(N_eval-1.)  #; This is base 10 [Max at 22]
    dlgN = lgNval[1]-lgNval[0]
    Nval = 10.**lgNval

    # z array
    zval = z912 + (zem-z912)*np.arange(N_eval)/(N_eval-1.)
    dz = np.fabs(zval[1]-zval[0])

    teff_LL = np.zeros(N_eval)

    # dXdz
    dXdz = pyigmu.cosm_xz(zval, cosmo=cosmo, flg_return=1)

    # Evaluate f(N,X)
    velo = (zval-zem)/(1+zem) * (const.c.cgs.value/1e5)  # Kludge for eval [km/s]

    log_fnX = fN_model.evaluate(lgNval, zem, cosmo=cosmo, vel_array=velo)
    log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

    # Evaluate tau(z,N)
    teff_engy = (const.Ryd.to(u.eV, equivalencies=u.spectral()) /
                 ((1+zval)/(1+zem)))
    sigma_z = ltaa.photo_cross(1, 1, teff_engy)
    tau_zN = np.outer(Nval, sigma_z)

    # Integrand
    intg = 10.**(log_fnz) * (1. - np.exp(-1.*tau_zN))

    # Sum
    sumz_first = False
    if sumz_first is False:
        #; Sum in N first
        N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),  0) * dlgN * np.log(10.)
        # Sum in z
        teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz

    # Debug
    if debug is True:
        pdb.set_trace()
    # Return
    return zval, teff_LL
示例#8
0
def lyman_ew(ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000,
             bval=24., cosmo=None, debug=False, cumul=None,
             verbose=False, EW_spline=None, wrest=None):
    """ tau effective from HI Lyman series absorption

    Parameters
    ----------
    ilambda : float
        Observed wavelength (Ang)
    zem : float
        Emission redshift of the source [sets which Lyman lines are included]
    fN_model : FNModel
    NHI_MIN : float, optional
         -- Minimum log HI column for integration [default = 11.5]
    NHI_MAX : float, optional
         -- Maximum log HI column for integration [default = 22.0]
    N_eval : int, optional
      Number of NHI evaluations
    bval : float
         -- Characteristics Doppler parameter for the Lya forest
         -- [Options: 24, 35 km/s]
    cosmo : astropy.cosmology (None)
         -- Cosmological model to adopt (as needed)
    cumul : List of cumulative sums
         -- Recorded only if cumul is not None
    EW_spline : spline, optional
      Speeds up execution if input
    HI : LineList, optional
      HI line list.  Speeds up execution

    Returns
    -------
    teff : float
      Total effective opacity of all lines contributing

    ToDo:
      1. Parallelize the Lyman loop
    """
    # Cosmology
    if cosmo is None:
        cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
    # Lambda
    if not isinstance(ilambda, float):
        raise ValueError('tau_eff: ilambda must be a float for now')
    Lambda = ilambda
    if not isinstance(Lambda,u.quantity.Quantity):
        Lambda = Lambda * u.AA # Ang

    # Read in EW spline (if needed)
    if EW_spline is None:
        if int(bval) == 24:
            EW_FIL = pyigm_path+'/data/fN/EW_SPLINE_b24.yml'
            with open(EW_FIL, 'r') as infile:
                EW_spline = yaml.load(infile)  # dict from mk_ew_lyman_spline
        else:
            raise ValueError('tau_eff: Not ready for this bvalue %g' % bval)

    # Lines
    if wrest is None:
        HI = LineList('HI')
        wrest = HI._data['wrest']

    # Find the lines
    gd_Lyman = wrest[(Lambda/(1+zem)) < wrest]
    nlyman = len(gd_Lyman)
    if nlyman == 0:
        if verbose:
            print('igm.tau_eff: No Lyman lines covered at this wavelength')
        return 0

    # N_HI grid
    lgNval = NHI_MIN + (NHI_MAX-NHI_MIN)*np.arange(N_eval)/(N_eval-1) # Base 10
    dlgN = lgNval[1]-lgNval[0]
    Nval = 10.**lgNval
    teff_lyman = np.zeros(nlyman)

    # For cumulative
    if cumul is not None:
        cumul.append(lgNval)

    # Loop on the lines
    for qq, line in enumerate(gd_Lyman): # Would be great to do this in parallel...
                             # (Can pack together and should)
        # Redshift
        zeval = ((Lambda / line) - 1).value
        if zeval < 0.:
            teff_lyman[qq] = 0.
            continue
        # dxdz
        dxdz = pyigmu.cosm_xz(zeval, cosmo=cosmo, flg_return=1)

        # Get EW values (could pack these all together)
        idx = np.where(EW_spline['wrest']*u.AA == line)[0]
        if len(idx) != 1:
            raise ValueError('tau_eff: Line %g not included or over included?!' % line)
        restEW = interpolate.splev(lgNval, EW_spline['tck'][idx[0]], der=0)

        # dz
        dz = ((restEW*u.AA) * (1+zeval) / line).value

        # Evaluate f(N,X) at zeval
        log_fnX = fN_model.evaluate(lgNval, zeval, cosmo=cosmo).flatten()

        # Sum
        intgrnd = 10.**(log_fnX) * dxdz * dz * Nval
        teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.)
        if cumul is not None:
            cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.))

        # Debug
        if debug:
            try:
                from xastropy.xutils import xdebug as xdb
            except ImportError:
                break
            xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval))
            #x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #writecol, 'debug_file'+strtrim(qq,2)+'.dat',  lgNval, restEW, log_fnX
            xdb.set_trace()

    # Return
    return np.sum(teff_lyman)
示例#9
0
文件: mockforest.py 项目: ninoc/pyigm
def monte_HIcomp( zmnx, fN_model, NHI_mnx=None, dz=0.001, cosmo=None,
    rstate=None, seed=None):
    """ Generate a Monte Carlo draw of HI components (z,N,b)

    Parameters
    ----------
    zmnx : tuple (float,float)
      Redshift range for analysis.
      Should correspond to Lya
    fN_model : fN_Model class
    NHI_mnx : tuple, optional (float,float)
      Range of logNHI for linelist
    dz : float, optional
      Step size for z discretization
    cosmo : astropy Cosmology, optional
    rstate : RandomState, optional
      For random number generation
    seed : int, optional
      Seed for random numbers

    Returns:
    -----------
    HI_comps : list
      List of HI components drawn for the given sightline
    """
    # Init
    # NHI range
    if NHI_mnx is None:
        NHI_mnx = (12., 22.)
    # seed
    if rstate is None:
        if seed is None:
            seed = 12345
        rstate = np.random.RandomState(seed)

    # Check fN_model type
    if fN_model.fN_mtype != 'Hspline':
        raise ValueError('monte_HIlines: Can only handle Hspline so far.')

    # Calculate lX at pivot
    lX, cum_lX, lX_NHI = fN_model.calculate_lox(fN_model.zpivot,
        NHI_mnx[0],NHI_max=NHI_mnx[1], cumul=True)

    # Interpolator for NHI distribution (assumed independent of redshift)
    #   Uses lowest NHI value for the first bit (kludgy but ok)
    invfN = interpolate.interp1d(cum_lX/lX,lX_NHI,bounds_error=False,fill_value=lX_NHI[0])#, kind='quadratic')

    # z evaluation
    zeval = np.arange(zmnx[0], zmnx[1]+dz, dz)

    # Cosmology
    if cosmo is None:
        print('Using a Flat LCDM cosmology: h=0.7, Om=0.3')
        cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)

    # dXdz
    dXdz = pyigmu.cosm_xz(zeval, cosmo=cosmo, flg_return=1)

    # Generate loz
    loz = lX * dXdz * ( (1+zeval)/(1+fN_model.zpivot) )**fN_model.gamma

    # Calculate average number of lines for analysis
    sum_loz = np.cumsum(loz*dz)

    # Interpolator
    #   Uses lowest NHI value for the first bit (kludgy but ok)
    invz = interpolate.interp1d(sum_loz/sum_loz[-1],zeval, bounds_error=False, fill_value=zeval[0])

    # Assume Gaussian stats for number of lines
    nlines = int(np.round(sum_loz[-1] + np.sqrt(sum_loz[-1])*rstate.randn(1)))

    # NHI values
    randNHI = rstate.random_sample(nlines)
    lgNHI = invfN(randNHI)

    # z values
    randz = rstate.random_sample(nlines)
    zval = invz(randz)

    # b values
    randb = rstate.random_sample(nlines)
    bval = dopp_val(randb)

    # Pack em up as a QTable
    HI_comps = QTable([zval, lgNHI, bval], names=('z','lgNHI','bval'))
    return HI_comps
示例#10
0
文件: fnmodel.py 项目: pyigm/pyigm
    def evaluate(self, NHI, z, vel_array=None, cosmo=None):
        """ Evaluate the f(N,X) model at a set of NHI values

        Parameters
        ----------
        NHI : array
          log NHI values
        z : float or array
          Redshift for evaluation
        vel_array : ndarray, optional
          Velocities relative to z
        cosmo : astropy.cosmology, optional


        Returns
        -------
        log_fNX : float, array, or 2D array
          f(NHI,X)[z] values
          Float if given one NHI,z value each. Otherwise 2D array
          If 2D, it is [NHI,z] on the axes

        """
        # Tuple?
        if isinstance(NHI, tuple):  # All values packed into NHI parameter
            z = NHI[1]
            NHI = NHI[0]
            flg_1D = 1
        else:  # NHI and z separate
            flg_1D = 0

        # NHI
        if isiterable(NHI):
            NHI = np.array(NHI)  # Insist on array
        else:
            NHI = np.array([NHI])
        lenNHI = len(NHI)

        # Redshift 
        if vel_array is not None:
            z_val = z + (1+z) * vel_array/(const.c.to('km/s').value)
        else:
            z_val = z
        if isiterable(z_val):
            z_val = np.array(z_val)
        else:
            z_val = np.array([z_val])
        lenz = len(z_val)

        # Check on zmnx
        bad = np.where((z_val < self.zmnx[0]) | (z_val > self.zmnx[1]))[0]
        if len(bad) > 0:
            raise ValueError(
                'fN.model.eval: z={:g} not within self.zmnx={:g},{:g}'.format(
                    z_val[bad[0]], *(self.zmnx)))

        if self.mtype == 'Hspline':
            # Evaluate without z dependence
            log_fNX = self.model.__call__(NHI)

            # Evaluate
            if (not isiterable(z_val)) | (flg_1D == 1):  # scalar or 1D array wanted
                log_fNX += self.gamma * np.log10((1+z_val)/(1+self.zpivot))
            else:
                # Matrix algebra to speed things up
                lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val)))
                lenfX = len(log_fNX)
                #
                z_grid1 = 10**(np.outer(np.ones(lenfX)*self.gamma,
                                        np.log10(1+z_val)))  #; (1+z)^gamma
                z_grid2 = np.outer( np.ones(lenfX)*((1./(1+self.zpivot))**self.gamma), 
                            np.ones(len(z_val)))
                log_fNX = lgNHI_grid + np.log10(z_grid1*z_grid2) 

        # Gamma function (e.g. Inoue+14)
        elif self.mtype == 'Gamma':
            # Setup the parameters
            Nl, Nu, Nc, bval = [self.param['common'][key]
                                for key in ['Nl', 'Nu', 'Nc', 'bval']]
            # gNHI
            Bi = self.param['Bi']
            ncomp = len(Bi)
            log_gN = np.zeros((lenNHI, ncomp))
            beta = [self.param[itype]['beta'] for itype in ['LAF', 'DLA']]
            for kk in range(ncomp):
                log_gN[:, kk] += (np.log10(Bi[kk]) + NHI*(-1 * beta[kk])
                                + (-1. * 10.**(NHI-Nc) / np.log(10)))  # log10 [ exp(-NHI/Nc) ]
            # f(z)
            fz = np.zeros((lenz, 2))
            # Loop on NHI
            itypes = ['LAF', 'DLA']
            for kk in range(ncomp):
                if kk == 0:  # LyaF
                    zcuts = self.param['LAF']['zcuts']
                    gamma = self.param['LAF']['gamma']
                else:        # DLA
                    zcuts = self.param['DLA']['zcuts']
                    gamma = self.param['DLA']['gamma']
                zcuts = [0] + zcuts + [999.]
                Aval = self.param[itypes[kk]]['Aval']
                # Cut on z
                for ii in range(1,len(zcuts)):
                    izcut = np.where( (z_val < zcuts[ii]) &
                                      (z_val > zcuts[ii-1]) )[0]
                    liz = len(izcut)
                    # Evaluate (at last!)
                    if (ii <=2) & (liz > 0):
                        fz[izcut, kk] = Aval * ( (1+z_val[izcut]) /
                                                 (1+zcuts[1]) )**gamma[ii-1]
                    elif (ii == 3) & (liz > 0):
                        fz[izcut, kk] = Aval * ( ( (1+zcuts[2]) /
                                                   (1+zcuts[1]) )**gamma[ii-2] *
                                                    ((1+z_val[izcut]) / (1+zcuts[2]) )**gamma[ii-1] )
            # dX/dz
            if cosmo is None:
                cosmo = self.cosmo
            dXdz = pyigmu.cosm_xz(z_val, cosmo=cosmo, flg_return=1)

            # Final steps
            if flg_1D == 1:
                fnX = np.sum(fz * 10.**log_gN, 1) / dXdz
                log_fNX = np.log10(fnX)
            else: 
                # Generate the matrix
                fnz = np.zeros((lenNHI, lenz))
                for kk in range(ncomp):
                    fnz += np.outer(10.**log_gN[:, kk], fz[:, kk])
                # Finish up
                log_fNX = np.log10(fnz) - np.log10( np.outer(np.ones(lenNHI), dXdz))
        elif self.mtype == 'PowerLaw':
            log_fNX = self.param['B'] + self.param['beta'] * NHI
            #
            if (not isiterable(z_val)) | (flg_1D == 1):  # scalar or 1D array wanted
                log_fNX += self.gamma * np.log10((1+z_val)/(1+self.zpivot))
            else:
                lgNHI_grid = np.outer(log_fNX, np.ones(len(z_val)))
                lenfX = len(log_fNX)
                #
                z_grid1 = 10**(np.outer(np.ones(lenfX)*self.gamma,
                                        np.log10(1+z_val)))  #; (1+z)^gamma
                z_grid2 = np.outer( np.ones(lenfX)*((1./(1+self.zpivot))**self.gamma),
                                    np.ones(len(z_val)))
                log_fNX = lgNHI_grid + np.log10(z_grid1*z_grid2)
        else:
            raise ValueError('fN.model: Not ready for this model type {:%s}'.format(self.mtype))

        # Return
        if (lenNHI + lenz) == 2:
            return log_fNX.flatten()[0]  # scalar
        else:
            return log_fNX
示例#11
0
def monte_HIcomp( zmnx, fN_model, NHI_mnx=None, dz=0.001, cosmo=None,
    rstate=None, seed=None):
    """ Generate a Monte Carlo draw of HI components (z,N,b)

    Parameters
    ----------
    zmnx : tuple (float,float)
      Redshift range for analysis.
      Should correspond to Lya
    fN_model : fN_Model class
    NHI_mnx : tuple, optional (float,float)
      Range of logNHI for linelist
    dz : float, optional
      Step size for z discretization
    cosmo : astropy Cosmology, optional
    rstate : RandomState, optional
      For random number generation
    seed : int, optional
      Seed for random numbers

    Returns:
    -----------
    HI_comps : list
      List of HI components drawn for the given sightline
    """
    # Init
    # NHI range
    if NHI_mnx is None:
        NHI_mnx = (12., 22.)
    # seed
    if rstate is None:
        if seed is None:
            seed = 12345
        rstate = np.random.RandomState(seed)

    # Check fN_model type
    if fN_model.fN_mtype != 'Hspline':
        raise ValueError('monte_HIlines: Can only handle Hspline so far.')

    # Calculate lX at pivot
    lX, cum_lX, lX_NHI = fN_model.calculate_lox(fN_model.zpivot,
        NHI_mnx[0],NHI_max=NHI_mnx[1], cumul=True)

    # Interpolator for NHI distribution (assumed independent of redshift)
    #   Uses lowest NHI value for the first bit (kludgy but ok)
    invfN = interpolate.interp1d(cum_lX/lX,lX_NHI,bounds_error=False,fill_value=lX_NHI[0])#, kind='quadratic')

    # z evaluation
    zeval = np.arange(zmnx[0], zmnx[1]+dz, dz)

    # Cosmology
    if cosmo is None:
        print('Using a Flat LCDM cosmology: h=0.7, Om=0.3')
        cosmo = cosmology.core.FlatLambdaCDM(70., 0.3)

    # dXdz
    dXdz = pyigmu.cosm_xz(zeval, cosmo=cosmo, flg_return=1)

    # Generate loz
    loz = lX * dXdz * ( (1+zeval)/(1+fN_model.zpivot) )**fN_model.gamma

    # Calculate average number of lines for analysis
    sum_loz = np.cumsum(loz*dz)

    # Interpolator
    #   Uses lowest NHI value for the first bit (kludgy but ok)
    invz = interpolate.interp1d(sum_loz/sum_loz[-1],zeval, bounds_error=False, fill_value=zeval[0])

    # Assume Gaussian stats for number of lines
    nlines = int(np.round(sum_loz[-1] + np.sqrt(sum_loz[-1])*rstate.randn(1)))

    # NHI values
    randNHI = rstate.random_sample(nlines)
    lgNHI = invfN(randNHI)

    # z values
    randz = rstate.random_sample(nlines)
    zval = invz(randz)

    # b values
    randb = rstate.random_sample(nlines)
    bval = dopp_val(randb)

    # Pack em up as a QTable
    HI_comps = QTable([zval, lgNHI, bval], names=('z','lgNHI','bval'))
    return HI_comps
示例#12
0
文件: tau_eff.py 项目: ninoc/pyigm
def lyman_limit(fN_model, z912, zem, N_eval=5000, cosmo=None, debug=False):
    """ Calculate teff_LL

    Effective opacity from LL absorption at z912 from zem

    Parameters
    ----------
    fN_model : FNModel
      f(N) model
    z912 : float
      Redshift for evaluation
    zem : float
      Redshift of source
    cosmo : astropy.cosmology, optional
      Cosmological model to adopt (as needed)
    N_eval : int, optional
      Discretization parameter (5000)
    debug : bool, optional

    Returns
    -------
    zval : array
    teff_LL : array
      z values and Effective opacity from LL absorption from z912 to zem
    """
    if not isinstance(fN_model,FNModel):
        raise IOError("Improper model")
    # NHI array
    lgNval = 11.5 + 10.5*np.arange(N_eval)/(N_eval-1.)  #; This is base 10 [Max at 22]
    dlgN = lgNval[1]-lgNval[0]
    Nval = 10.**lgNval

    # z array
    zval = z912 + (zem-z912)*np.arange(N_eval)/(N_eval-1.)
    dz = np.fabs(zval[1]-zval[0])

    teff_LL = np.zeros(N_eval)

    # dXdz
    dXdz = pyigmu.cosm_xz(zval, cosmo=cosmo, flg_return=1)

    # Evaluate f(N,X)
    velo = (zval-zem)/(1+zem) * (const.c.cgs.value/1e5)  # Kludge for eval [km/s]

    log_fnX = fN_model.evaluate(lgNval, zem, cosmo=cosmo, vel_array=velo)
    log_fnz = log_fnX + np.outer(np.ones(N_eval), np.log10(dXdz))

    # Evaluate tau(z,N)
    teff_engy = (const.Ryd.to(u.eV, equivalencies=u.spectral()) /
                 ((1+zval)/(1+zem)))
    sigma_z = ltaa.photo_cross(1, 1, teff_engy)
    tau_zN = np.outer(Nval, sigma_z)

    # Integrand
    intg = 10.**(log_fnz) * (1. - np.exp(-1.*tau_zN))

    # Sum
    sumz_first = False
    if sumz_first is False:
        #; Sum in N first
        N_summed = np.sum(intg * np.outer(Nval, np.ones(N_eval)),  0) * dlgN * np.log(10.)
        # Sum in z
        teff_LL = (np.cumsum(N_summed[::-1]))[::-1] * dz

    # Debug
    if debug is True:
        pdb.set_trace()
    # Return
    return zval, teff_LL
示例#13
0
文件: tau_eff.py 项目: ninoc/pyigm
def lyman_ew(ilambda, zem, fN_model, NHI_MIN=11.5, NHI_MAX=22.0, N_eval=5000,
             bval=24., cosmo=None, debug=False, cumul=None,
             verbose=False, EW_spline=None, wrest=None):
    """ tau effective from HI Lyman series absorption

    Parameters
    ----------
    ilambda : float
        Observed wavelength (Ang)
    zem : float
        Emission redshift of the source [sets which Lyman lines are included]
    fN_model : FNModel
    NHI_MIN : float, optional
         -- Minimum log HI column for integration [default = 11.5]
    NHI_MAX : float, optional
         -- Maximum log HI column for integration [default = 22.0]
    N_eval : int, optional
      Number of NHI evaluations
    bval : float
         -- Characteristics Doppler parameter for the Lya forest
         -- [Options: 24, 35 km/s]
    cosmo : astropy.cosmology (None)
         -- Cosmological model to adopt (as needed)
    cumul : List of cumulative sums
         -- Recorded only if cumul is not None
    EW_spline : spline, optional
      Speeds up execution if input
    HI : LineList, optional
      HI line list.  Speeds up execution

    Returns
    -------
    teff : float
      Total effective opacity of all lines contributing

    ToDo:
      1. Parallelize the Lyman loop
    """
    # Cosmology
    if cosmo is None:
        cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
    # Lambda
    if not isinstance(ilambda, float):
        raise ValueError('tau_eff: ilambda must be a float for now')
    Lambda = ilambda
    if not isinstance(Lambda,u.quantity.Quantity):
        Lambda = Lambda * u.AA # Ang

    # Read in EW spline (if needed)
    if EW_spline is None:
        if int(bval) == 24:
            EW_FIL = pyigm_path+'/data/fN/EW_SPLINE_b24.yml'
            with open(EW_FIL, 'r') as infile:
                EW_spline = yaml.load(infile)  # dict from mk_ew_lyman_spline
        else:
            raise ValueError('tau_eff: Not ready for this bvalue %g' % bval)

    # Lines
    if wrest is None:
        HI = LineList('HI')
        wrest = HI._data['wrest']

    # Find the lines
    gd_Lyman = wrest[(Lambda/(1+zem)) < wrest]
    nlyman = len(gd_Lyman)
    if nlyman == 0:
        if verbose:
            print('igm.tau_eff: No Lyman lines covered at this wavelength')
        return 0

    # N_HI grid
    lgNval = NHI_MIN + (NHI_MAX-NHI_MIN)*np.arange(N_eval)/(N_eval-1) # Base 10
    dlgN = lgNval[1]-lgNval[0]
    Nval = 10.**lgNval
    teff_lyman = np.zeros(nlyman)

    # For cumulative
    if cumul is not None:
        cumul.append(lgNval)

    # Loop on the lines
    for qq, line in enumerate(gd_Lyman): # Would be great to do this in parallel...
                             # (Can pack together and should)
        # Redshift
        zeval = ((Lambda / line) - 1).value
        if zeval < 0.:
            teff_lyman[qq] = 0.
            continue
        # dxdz
        dxdz = pyigmu.cosm_xz(zeval, cosmo=cosmo, flg_return=1)

        # Get EW values (could pack these all together)
        idx = np.where(EW_spline['wrest']*u.AA == line)[0]
        if len(idx) != 1:
            raise ValueError('tau_eff: Line %g not included or over included?!' % line)
        restEW = interpolate.splev(lgNval, EW_spline['tck'][idx], der=0)

        # dz
        dz = ((restEW*u.AA) * (1+zeval) / line).value

        # Evaluate f(N,X) at zeval
        log_fnX = fN_model.evaluate(lgNval, zeval, cosmo=cosmo).flatten()

        # Sum
        intgrnd = 10.**(log_fnX) * dxdz * dz * Nval
        teff_lyman[qq] = np.sum(intgrnd) * dlgN * np.log(10.)
        if cumul is not None:
            cumul.append(np.cumsum(intgrnd) * dlgN * np.log(10.))

        # Debug
        if debug:
            try:
                from xastropy.xutils import xdebug as xdb
            except ImportError:
                break
            xdb.xplot(lgNval, np.log10(10.**(log_fnX) * dxdz * dz * Nval))
            #x_splot, lgNval, total(10.d^(log_fnX) * dxdz * dz * Nval,/cumul) * dlgN * alog(10.) / teff_lyman[qq], /bloc
            #printcol, lgnval, log_fnx, dz,  alog10(10.d^(log_fnX) * dxdz * dz * Nval)
            #writecol, 'debug_file'+strtrim(qq,2)+'.dat',  lgNval, restEW, log_fnX
            xdb.set_trace()

    # Return
    return np.sum(teff_lyman)