예제 #1
0
def Csp(z0, sigma, zlo, zhi, b1, b2, alpha=1.0, disp = 4.0, nowiggle=False, nobroadband=True, noks=False):
  ##  In the zeff approx.
  lochi    = comoving_distance(zlo)
  hichi    = comoving_distance(zhi)

  midz     = np.mean([zlo, zhi])
  
  if nowiggle == 'damped':
    ##  BAO work.
    ks, Ps = Psp(midz, b1, b2)
    ks, Ns = pnw(midz, b1, b2)

    Ps     = interp1d(ks, Ps, kind='linear', copy=True, bounds_error=False, fill_value=0.0)
    Ns     = interp1d(ks, Ns, kind='linear', copy=True, bounds_error=False, fill_value=0.0)

    kp     = np.copy(ks)  ##  Wavenumber in fiducial cosmology.
    ks     = alpha * kp   ##  Wavenumber in true cosmology, kp = (k / alpha).

    if nobroadband:
      Ps     = (Ps(kp) - Ns(kp)) * np.exp(- ks * ks * disp * disp / 2.) + Ns(ks)
      
    else:
      ##  Argument change to broadband term. 
      Ps     = (Ps(kp) - Ns(kp)) * np.exp(- ks * ks * disp * disp / 2.) + Ns(kp)

    ##  Rename to match rest of script.
    ks     = kp

  elif nowiggle:
    ks, Ps = pnw(midz, b1, b2) 

  else:
    ##  Full wiggle.
    ks, Ps = Psp(midz, b1, b2)

  dz      = 0.01
  zs      = np.arange(0.0, 10.0, dz)

  nbar    = dz * np.sum(pz_photo(zs, sigma, z0, zlo, zhi))
  
  zs      = np.arange(zlo, zhi + dz, dz)
  frac    = dz * np.sum(pz_photo(zs, sigma, z0, zlo, zhi))

  Cs      = frac * Ps / (hichi - lochi)

  if noks:
    return  Cs

  else:
    return  ks, Cs
예제 #2
0
def Cab(Llls,
        spec_z,
        surveya='LSST',
        surveyb='QSO',
        alpha=1.0,
        sigma=2.,
        zeff=True,
        nowiggle=False):
    '''
  Angular correlation function: i.e. C_sp(L, z).
  '''

    z = np.arange(0.01, 5.0, 0.01)
    chis = comoving_distance(z)

    ba = bzs(spec_z, surveya)
    bb = bzs(spec_z, surveyb)

    if zeff == True:
        '''                                                                                                                                                    
    Compute Cgg in the z_eff and first-order Limber approximations for a slice of galaxies at spec_z of width dz.                                        
    eqn. (5) of https://arxiv.org/pdf/1511.04457.pdf                                                                                                      
    '''
        ks = (Llls + 0.5) / comoving_distance(
            spec_z
        )  ## For the Phh evaluation in the integral, we take a zeff approx.
        ## i.e. \int dz .... Phh(zeff).
        result = np.broadcast_to(
            Pab(ks, ba, bb, alpha, sigma,
                nowiggle), (len(z), len(Llls)))  ## Broadcast to each redshift.
    else:
        result = np.zeros((len(spec_z), len(Llls)))

        for i, j in enumerate(z):
            ks = (Llls +
                  0.5) / chis[i]  ## Evaluate Phh for each z and k(ell, z).
            ## Given L mixes at range of k -- washes out BAO.

            ## accounts for spatial to angular mapping as function of z, i.e. k = (l + 0.5)/chi(z) but neglects redshift evolution of Pps(k).
            result[i, :] = Pab(ks, ba, bb, alpha, sigma, nowiggle)

    prefactor   = (cosmo.H(z).value/const.c.to('km/s').value)*(sliced_pz(z, spec_z, survey_dzs[surveya], surveya, True)/chis) \
                                                             *(sliced_pz(z, spec_z, survey_dzs[surveyb], surveyb, True)/chis)

    integrand = prefactor[:, None] * result
    integrand /= params[
        'h_100']  ## account for [h^-1 Mpc]^3 of P(k) and h^-1 Mpc of chi_g.

    return simps(integrand, dx=z[1] - z[0], axis=0)  ## integral over z.
예제 #3
0
def  mlimitedM(z, mlim, M_standard=None, kcorr=True):
  ''' 
  Return M_lim (L_lim) in units of M_standard (L_standard) for given redshift 
  and apparent mag. limit.  Here, M_standard is e.g. M* for the Schechter fn. 
  '''

  from  utils  import  comoving_distance


  z     = np.asarray(z) 
  
  aa    = 1. / (1. + z)                                            ## scale factor at z.                                                                
  chi   = comoving_distance(z)                                     ## [Mpc/h]                                                                            

  dmod  = 25. + 5. * np.log10(chi / aa / params['h_100'])          ## \mu = 5.*log_10(D_L/10pc) = 25. + 5 log_10(D_L) for [D_L] = Mpc.                 
                                                                   ## D_L = (1. + z)*chi.                                                                 
  if kcorr:
    kcorr =  -2.5 * np.log10(1.0 / aa)                             ## Eq. (14) of Reddy++ and preceding text; assumes flat Fv.                        

  else:
    kcorr =   0.0                                                  ## Assumed zero k-correction.
                                                                                                                   
  Mlim  = mlim - dmod - kcorr                                      ## Apparent mag. limited (~5 sig. source detected) in R.                      
                                                                   ## Gives an M_AB at 1700 \AA for the mean redshift z \tilde 3.05                     
                                                                   ## if NOT k-corrected; otherwise M_AB(v_obs) i.e. median frequency of R.             

  if M_standard == None:
    return  Mlim

  else:
    Llim  = 10.0 ** (-0.4 * (Mlim - M_standard))                   ## Units are luminosity equivalent of 'M_standard'; e.g. M_* gives [L_*].        
                                                                   ## Lv dv = 4 pi D_L^2 F_obs dv_obs 10**(-m/2.5); similary for L* and m*.              
                                                                   ## Gives (Lv/L) = 10**-0.4(m - m*) = 10**-0.4(M - M*) 
    ##  Rest absolute magnitude.
    return  Mlim, Llim
예제 #4
0
def Ckk(Pk_interps, Llls, pickle=False, zmax=params['zscatter']):
    '''
 Given a matter power spectrum interpolator and an array of L values, 
 return Ckk(L) in the extended Limber approximation.
 '''

    z = np.linspace(0.001, zmax, 2000)
    chis = comoving_distance(z)

    ## Stores the matter power spectrum for each of (L, z) and therefore k
    ## in the extended Limber approximation.
    result = np.zeros((len(z), len(Llls)))

    for i, redshift in enumerate(z):
        ks = (Llls + 0.5) / chis[i]
        result[i, :] = Pmm(Pk_interps, ks, redshift)

    prefactor = (const.c.to('km/s').value /
                 cosmo.H(z).value) * (lensing_kernel(z) / chis)**2.

    integrand = prefactor[:, None] * result
    integrand /= params['h_100']**3.  ## account for [h^-1 Mpc]^3 of Pmm.

    result = simps(integrand, dx=z[1] - z[0], axis=0)

    if pickle:
        from pickle import dump

        ## New beam/noise configurations require pickle files to be removed.
        dump(result, open("pickle/kk.p", "wb"))

    ## Ckk at each L, after integrating the lensing kernel over redshift.
    return result
예제 #5
0
def angular2spatial(ell, redshift):
    K = (ell + 1. / 2.) / comoving_distance(redshift)

    kz = 0.0
    k2 = K**2. + kz**2.

    return k2**0.5
예제 #6
0
def Css(zlo, zhi, b1, noks=False):
  ##  In the zeff approx.
  lochi    = comoving_distance(zlo)
  hichi    = comoving_distance(zhi)

  midz     = np.mean([zlo, zhi])
  ks, Ps   = Psp(midz, b1, b1)

  ##  Fraction of spectroscopic galaxies that reside in the 
  ##  bin is unity by definition. 
  Cs       = Ps / (hichi - lochi) 

  if noks:
    return Ps
  
  else:
    return ks, Ps 
예제 #7
0
def Ckg(Pk_interps, Llls, zmin, zmax, survey_pz, bz, zeff=True):
    dz = 0.01
    zs = np.arange(zmin, zmax, dz)

    ##  Catch normalisation of p(z).  Added 03/01/19.
    ps = survey_pz(zs)
    norm = np.sum(ps) * dz
    ps /= norm

    if zeff:
        ##  Calculate the mean redshift.
        zg = np.sum(ps * zs) * dz / np.sum(dz * ps)

        chi_g = comoving_distance(zg)

        k = (Llls + 0.5) / chi_g

        ##  Limber and thin slice approximation.
        result = Pmm(Pk_interps, k,
                     zg) * bz(zg) * lensing_kernel(zg) / chi_g**2.
        result /= params['h_100']**2.  ##  Account for [h^-1 Mpc]^3 of Pmm
        ##  and h^-1 Mpc of chi_g.

        return result  ##  Dimensionless.

    else:
        ##  Assumes integral over dz slice.
        chis = comoving_distance(zs)
        result = np.zeros((len(zs), len(Llls)))

        for i, redshift in enumerate(zs):
            ks = (Llls + 0.5) / chis[i]
            result[i, :] = Pmm(Pk_interps, ks, redshift) * bz(redshift)

        prefactor = lensing_kernel(zs) * sliced_pz(zs, zmin, zmax,
                                                   survey_pz) / chis**2.
        integrand = prefactor[:, None] * result

        integrand /= params[
            'h_100']**2.  ## Account for [h^-1 Mpc]^3 of Pmm and [h^-1 Mpc] of chi.

        ## Integrated over z, Ckg(L).
        result = simps(integrand, dx=zs[1] - zs[0], axis=0)

        return result
예제 #8
0
def cov_Csp(z0, sigma, zlo, zhi, b1, b2, ns):
  ##  Currently assume dominated by cross-correlation term.
  ##  See eqn. (14) of 1302.6015
  ks, Cxy =  Csp(z0, sigma, zlo, zhi, b1, b2, nowiggle=True)

  ks, Cxx =  Css(zlo, zhi, b1, noks=False)
  Cyy     =  np.zeros_like(Cxx)

  lochi   =  comoving_distance(zlo)
  hichi   =  comoving_distance(zhi)
  
  pdist   =  hichi - lochi
  ns     *=  pdist

  ##  \bar n_p e zs;  See eqn.(14).
  dz      = 0.01
  zs      = np.arange(0.0, 10.0, dz)

  _np     = dz * np.sum(pz_photo(zs, sigma, z0, zlo, zhi))

  return  Cxy ** 2. + (Cxx + 1. / ns) * (Cyy + 1. / _np) 
예제 #9
0
def WCIB(z, nu=353., zc=2., sigmaz=2.):
    '''
    Input:
        nu in GHz; [545, 857, 217, 353] GHz.
    '''

    result = fv(nu * (1. + z), beta=2., T=34., nup=4955, alpha=0.0)
    result *= np.exp(-0.5 * (z - zc)**2. / sigmaz**2.)
    result *= comoving_distance(z)**2.
    result /= (cosmo.H(z).value * (1. + z)**2.)
    result /= result.max()

    return result
예제 #10
0
from    utils              import  latexify
from    utils              import  comoving_distance
from    params             import  get_params 


latexify(fig_width=None, fig_height=None, columns=1, equal=True, fontsize=12)

params   = get_params()

############################
ilist    = [10, 11, 12, 13, 14]

drops    = {3.0: [12.33, 4.0, 0.40], 4.0: [12.83, 6.15, 0.45]}

zee      = 4.0                    ##  [3.0, 4.0]
chi_star = comoving_distance(zee)

a        = 1. / (1. + zee) 

mass     = drops[zee][0]
bias     = drops[zee][1]
unknown  = drops[zee][2]

dm       = np.loadtxt("../dat/dm/dm_%.4lf_10.pkr" % a)

for irun in ilist[1:]:
  dm += np.loadtxt(("../dat/dm/dm_%.4lf_{:d}.pkr" % a).format(irun))

dm      /= float(len(ilist))
dm[:,1] *= 2*np.pi**2/dm[:,0]**3
예제 #11
0
import  numpy   as      np
 
from    cosmo   import  cosmo
from    utils   import  comoving_distance
from    params  import  get_params


params = get_params()

if __name__ == '__main__':
    print('\n\nWelcome.\n\n')

    for zee in np.arange(0., 7., 1.):
        print('%.3lf \t %.3le \t %.3le' % (zee, comoving_distance(zee), params['h_100'] ** 3. * cosmo.comoving_volume(zee).value / 1.e9))

    print('\n\nDone.\n\n')
예제 #12
0
def Fisher(Pk_interps,
           Llls,
           zs,
           tNs,
           tNp,
           pz,
           bz,
           fsky=0.1,
           fover=0.0,
           sources=False,
           printit=True):
    '''    
    Returns the variance on \hat Np = F^{-1}_ii.
    '''

    result = OrderedDict()

    for i, zee in enumerate(zs):
        LMAX = np.ceil(KMAX * comoving_distance(zee) - 0.5).astype('int')
        LCUT = np.where(Llls <= LMAX)

        ##  Returns linear bias of spectroscopic galaxies at each redshift.
        bs = bz(zee)

        ##  Returns number of spectroscopic redshifts in each shell.
        Ns = nbar_convert(tNs, unit='str') * pz(zee)
        Ns *= dz

        if sources:
            ##  Assumed LSST Whitebook for sources, b(z) = 1. + z.
            bp = 1. + zee

        else:
            ## Lenses, pg. 47 of latest whitebook.
            bp = lenses_bz(zee)

        ##  Returns number of photometric redshifts in each shell.
        Np = nbar_convert(tNp, unit='str') * lsst_chang_pz(
            zee, ilim=25.3, source=False)
        Np *= dz

        ## Returns shot noise: wp = Np for fsat = 0.0; eqn. (11) of MQW16.
        wp = Np
        ws = Ns

        wps = fover * np.min([wp, ws])

        cgg = Cij(Pk_interps, Llls, zee - dz / 2., zee + dz / 2.)
        cgg  =   Cgg(Pk_interps, Llls, zee - dz / 2., zee + dz / 2., pz, bz,\
                     survey_pz2 = lambda z: lsst_chang_pz(z, ilim=25.3, source=sources), bz2=lenses_bz, zeff=True)

        ## eqn. (44); sum over k has implicit sum over ell and m.
        result[zee] = {
            'Ns': Ns,
            'Np': Np,
            'bp': bp,
            'bs': bs,
            'cgg': cgg,
            'wp': wp,
            'num': (bp * bs * cgg)**2.
        }

        result[zee]['A00z'] = cgg * (bp *
                                     Np)**2. + wp  ## eqn. (22),  < p   p   >_L
        result[zee][
            'A0i'] = bp * Np * bs * Ns * cgg + wps  ## eqn. (23),  < p   s_i >_L
        result[zee]['Aii'] = (bs *
                              Ns)**2. * cgg + ws  ## eqn. (24),  < s_i s_j >_L
        result[zee][
            'A0i_j'] = bp * bs * Ns * cgg  ## eqn. (25),  < p   s_i >_L, j

        ## eqn. (43); to be later normalised.
        result[zee]['beta'] = (Np * bp)**2. * cgg

    ## Total spec. zs over the whole redshift range.
    nspec = 0.0

    ## ... and normalisation over redshift for beta.
    bnorm = np.zeros_like(result[zee]['beta'])
    A00 = np.zeros_like(result[zee]['A00z'])  ## < p * p >_L
    sshift = np.zeros_like(result[zee]['A0i'])

    for zee in result:
        nspec += result[zee]['Ns']

        ## Array for each L value.
        A00 += result[zee]['A00z']
        bnorm += result[zee]['beta']

        ## Sum to S, eqn. (29);  Together with A00, yields S.
        sshift += result[zee]['A0i']**2. / result[zee]['Aii']

    for zee in result:
        ## Schur-Limber limit, eqn. (44).
        result[zee]['beta'] /= bnorm

        ## Fractional error from eqn. (44);  Schur-Limber limit of small r (due to shotnoise, or redshift overlap).
        result[zee]['ratio'] = result[zee]['num'] / A00

        ## Diagonal by definition in this limit;  Impose LMAX cut.
        result[zee]['Fii'] = result[zee]['Ns'] * fsky * np.sum(
            (2. * Llls[LCUT] + 1.) * result[zee]['ratio'][LCUT])
        result[zee]['var_ii'] = 1. / result[zee]['Fii']

        ## NOTE:  the error is for b * N(z), this fractional error is screwey unless renormalised almost immediately.
        result[zee]['ferr_ii'] = np.sqrt(
            result[zee]['var_ii']) / result[zee]['Np']

    ## Limber approximation, but not in the Schur limit (small cross-correlation).
    for zi in result:
        ## Schur(L)
        result[zi]['S'] = A00 / (A00 - sshift)
        result[zi]['r'] = result[zi]['A0i'] / np.sqrt(A00 * result[zi]['Aii'])

    ## Non-diagonal Fisher matrix outwith the Schur limit.
    Fisher = np.zeros((len(zs), len(zs)))

    for i, zi in enumerate(result):
        Interim = (2. * Llls + 1) * result[zi]['S'] * result[zi][
            'A0i_j'] * result[zi]['A0i_j'] / result[zi]['Aii'] / A00
        Fisher[i, i] = fsky * np.sum(Interim[LCUT])

        for j, zj in enumerate(result):
            Interim                     +=   (2. * Llls + 1) * fsky * 2. * result[zi]['S'] ** 2. * result[zi]['r'] * result[zj]['r'] \
                                                 * np.sqrt(1. / result[zi]['Aii'] / result[zj]['Aii']) * result[zi]['A0i_j'] * result[zj]['A0i_j'] / A00

            Fisher[i, j] += np.sum(Interim[LCUT])

    iFish = np.linalg.inv(Fisher)
    diFish = np.diag(iFish)

    for i, zz in enumerate(result):
        dstr = "\tz:  %.2lf \t\t Schur-Limber:  %.2lf \t\t Limber:  %.2lf" % (
            zz, 100. * result[zz]['ferr_ii'],
            100. * np.sqrt(diFish[i]) / result[zz]['Np'])
        dstr += "\t\t Schur at Lmin: %.2lf, and Lmax: %.2lf" % (
            result[zi]['S'][0], result[zi]['S'][-1])

        if printit:
            print dstr

    ##  Define output.
    output = []

    for outz in zs:
        index = np.where(np.abs((zs - outz)) == np.min(np.abs(zs - outz)))[0]

        ## Save
        output.append([outz, result[outz]['Ns'], result[outz]['Np'], 100. * result[zs[index][0]]['ferr_ii'],\
                                                                     100. * np.sqrt(diFish[index]) / result[zs[index][0]]['Np']])

    output = np.array(output)

    if printit:
        print output

    return output
예제 #13
0
def nmodes2D(ks, dk, zlo, fsky):
  return  2. * fsky * ks * dk * (1. + zlo) ** 2. * comoving_distance(zlo) ** 2.