def check_massfunc(cosmo): """ Check that mass function and supporting functions can be run. """ z = 0. z_arr = np.linspace(0., 2., 10) a = 1. a_arr = 1. / (1. + z_arr) mhalo_scl = 1e13 mhalo_lst = [1e11, 1e12, 1e13, 1e14, 1e15, 1e16] mhalo_arr = np.array([1e11, 1e12, 1e13, 1e14, 1e15, 1e16]) odelta = 200. # massfunc assert_(all_finite(ccl.massfunc(cosmo, mhalo_scl, a, odelta))) assert_(all_finite(ccl.massfunc(cosmo, mhalo_lst, a, odelta))) assert_(all_finite(ccl.massfunc(cosmo, mhalo_arr, a, odelta))) assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_scl, a_arr, odelta) assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_lst, a_arr, odelta) assert_raises(TypeError, ccl.massfunc, cosmo, mhalo_arr, a_arr, odelta) # Check whether odelta out of bounds assert_raises(CCLError, ccl.massfunc, cosmo, mhalo_scl, a, 199.) assert_raises(CCLError, ccl.massfunc, cosmo, mhalo_scl, a, 5000.) # massfunc_m2r assert_(all_finite(ccl.massfunc_m2r(cosmo, mhalo_scl))) assert_(all_finite(ccl.massfunc_m2r(cosmo, mhalo_lst))) assert_(all_finite(ccl.massfunc_m2r(cosmo, mhalo_arr))) # sigmaM assert_(all_finite(ccl.sigmaM(cosmo, mhalo_scl, a))) assert_(all_finite(ccl.sigmaM(cosmo, mhalo_lst, a))) assert_(all_finite(ccl.sigmaM(cosmo, mhalo_arr, a))) assert_raises(TypeError, ccl.sigmaM, cosmo, mhalo_scl, a_arr) assert_raises(TypeError, ccl.sigmaM, cosmo, mhalo_lst, a_arr) assert_raises(TypeError, ccl.sigmaM, cosmo, mhalo_arr, a_arr) # halo_bias assert_(all_finite(ccl.halo_bias(cosmo, mhalo_scl, a))) assert_(all_finite(ccl.halo_bias(cosmo, mhalo_lst, a))) assert_(all_finite(ccl.halo_bias(cosmo, mhalo_arr, a)))
def get_bpe(z, n_r, delta, nmass=256): a = 1./(1+z) lmarr = np.linspace(8.,16.,nmass) marr = 10.**lmarr Dm = delta/ccl.omega_x(cosmo, a, "matter") # CCL uses Delta_m mfunc = ccl.massfunc(cosmo, marr, a, Dm) bh = ccl.halo_bias(cosmo, marr, a, Dm) et = np.array([integrated_profile(get_battaglia(m,z,delta),n_r) for m in marr]) return itg.simps(et*bh*mfunc,x=lmarr)
def pk(self, k, a, lmmin=6., lmmax=17., nlm=256, return_decomposed=False): """ Returns power spectrum at redshift `z` sampled at all values of k in `k`. k : array of wavenumbers in CCL units a : scale factor lmmin, lmmax, nlm : mass edges and sampling rate for mass integral. return_decomposed : if True, returns 1-halo, 2-halo, bias, shot noise and total (see below for order). """ z = 1. / a - 1. marr = np.logspace(lmmin, lmmax, nlm) dlm = np.log10(marr[1] / marr[0]) u_s = self.u_sat(z, marr, k) hmf = ccl.massfunc(self.cosmo, marr, a) hbf = ccl.halo_bias(self.cosmo, marr, a) rhoM = ccl.rho_x(self.cosmo, a, "matter", is_comoving=True) n0_1h = (rhoM - np.sum(hmf * marr) * dlm) / marr[0] n0_2h = (rhoM - np.sum(hmf * hbf * marr) * dlm) / marr[0] #Number of galaxies fc = self.fc_f(z) ngm = self.n_tot(z, marr) ncm = self.n_cent(z, marr) nsm = self.n_sat(z, marr) #Number density ng = np.sum(hmf * ngm) * dlm + n0_1h * ngm[0] if ng <= 1E-16: #Make sure we won't divide by 0 return None #Bias b_hod = np.sum( (hmf * hbf * ncm)[None, :] * (fc + nsm[None, :] * u_s[:, :]), axis=1) * dlm + n0_2h * ncm[0] * (fc + nsm[0] * u_s[:, 0]) b_hod /= ng #1-halo #p1h=np.sum((hmf*ncm**2)[None,:]*(fc+nsm[None,:]*u_s[:,:])**2,axis=1)*dlm+n0_1h*(ncm[0]*(fc+nsm[0]*u_s[:,0]))**2 p1h = np.sum( (hmf * ncm)[None, :] * (2 * fc * nsm[None, :] * u_s[:, :] + (nsm[None, :] * u_s[:, :])**2), axis=1) * dlm + n0_1h * ncm[0] * (2 * fc * nsm[0] * u_s[:, 0] + (nsm[0] * u_s[:, 0])**2) p1h /= ng**2 #2-halo p2h = b_hod**2 * ccl.linear_matter_power(self.cosmo, k, a) if return_decomposed: return p1h + p2h, p1h, p2h, np.ones_like(k) / ng, b_hod else: return p1h + p2h
def test_halo_bias_models_smoke(mf_type): cosmo = ccl.Cosmology(Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96, transfer_function='bbks', matter_power_spectrum='linear', mass_function=mf_type) hbf_cls = ccl.halos.halo_bias_from_name(MF_EQUIV[mf_type]) hbf = hbf_cls(cosmo) for m in MS: bm_old = ccl.halo_bias(cosmo, m, 1.) bm_new = hbf.get_halo_bias(cosmo, m, 1.) assert np.all(np.isfinite(bm_old)) assert np.shape(bm_old) == np.shape(m) assert np.all(np.array(bm_old) == np.array(bm_new))
def construct_bins(self, z): """ Construct a binned halo mass function and bias function. This function sets the ``self.dndlog10M`` and ``self.bias`` arrays. Parameters: z (float): Redshift at which to construct the mass function and bias. """ a = 1./(1.+z) # Define mass bins Mh_edges = np.logspace(np.log10(self.Mmin), np.log10(self.Mmax), int(self.mass_bins)+1) Mh_centres = 0.5 * (Mh_edges[1:] + Mh_edges[:-1]) # Get mass function and bias at mass bin centres self.dndlog10M = ccl.massfunction.massfunc(self.box.cosmo, Mh_centres, a, overdensity=200) self.bias = ccl.halo_bias(cosmo, Mh_centres, a, overdensity=200)
def hm_bias(cosmo, a, profile, logMrange=(6, 17), mpoints=128, selection=None, **kwargs): """Computes the halo model prediction for the bias of a given tracer. Args: cosmo (:obj:`ccl.Cosmology`): cosmology. a (array): array of scale factor values profile (`Profile`): a profile. Only Arnaud and HOD are implemented. logMrange (tuple): limits of integration in log10(M/Msun) mpoints (int): number of mass samples selection (function): selection function in (M,z) to include in the calculation. Pass None if you don't want to select a subset of the M-z plane. **kwargs: parameter used internally by the profiles. """ # Input handling a = np.atleast_1d(a) # Profile normalisations Unorm = profile.profnorm(cosmo, a, squeeze=False, **kwargs) Unorm = Unorm[..., None] # Set up integration boundaries logMmin, logMmax = logMrange # log of min and max halo mass [Msun] mpoints = int(mpoints) # number of integration points M = np.logspace(logMmin, logMmax, mpoints) # masses sampled # Out-of-loop optimisations Dm = profile.Delta / ccl.omega_x(cosmo, a, "matter") # CCL uses Delta_m with warnings.catch_warnings(): warnings.simplefilter("ignore") mfunc = np.array( [ccl.massfunc(cosmo, M, A1, A2) for A1, A2 in zip(a, Dm)]) bh = np.array( [ccl.halo_bias(cosmo, M, A1, A2) for A1, A2 in zip(a, Dm)]) # shape transformations mfunc, bh = mfunc.T[..., None], bh.T[..., None] if selection is not None: select = np.array([selection(M, 1. / aa - 1) for aa in a]) select = select.T[..., None] else: select = 1 U, _ = profile.fourier_profiles(cosmo, np.array([0.001]), M, a, squeeze=False, **kwargs) # Tinker mass function is given in dn/dlog10M, so integrate over d(log10M) b2h = simps(bh * mfunc * select * U, x=np.log10(M), axis=0).squeeze() # Contribution from small masses (added in the beginning) rhoM = ccl.rho_x(cosmo, a, "matter", is_comoving=True) dlM = (logMmax - logMmin) / (mpoints - 1) mfunc, bh = mfunc.squeeze(), bh.squeeze() # squeeze extra dimensions n0_2h = np.array((rhoM - np.dot(M, mfunc * bh) * dlM) / M[0])[None, ..., None] b2h += (n0_2h * U[0]).squeeze() b2h /= Unorm.squeeze() return b2h.squeeze()
def hm_power_spectrum(cosmo, k, a, profiles, logMrange=(6, 17), mpoints=128, include_1h=True, include_2h=True, squeeze=True, hm_correction=None, selection=None, **kwargs): """Computes the halo model prediction for the 3D cross-power spectrum of two quantities. Args: cosmo (:obj:`ccl.Cosmology`): cosmology. k (array): array of wavenumbers in units of Mpc^-1 a (array): array of scale factor values profiles (tuple): tuple of two profile objects (currently only Arnaud and HOD are implemented) corresponding to the two quantities being correlated. logMrange (tuple): limits of integration in log10(M/Msun) mpoints (int): number of mass samples include_1h (bool): whether to include the 1-halo term. include_2h (bool): whether to include the 2-halo term. hm_correction (:obj:`HalomodCorrection` or None): Correction to the halo model in the transition regime. If `None`, no correction is applied. selection (function): selection function in (M,z) to include in the calculation. Pass None if you don't want to select a subset of the M-z plane. **kwargs: parameter used internally by the profiles. """ # Input handling a, k = np.atleast_1d(a), np.atleast_2d(k) # Profile normalisations p1, p2 = profiles Unorm = p1.profnorm(cosmo, a, squeeze=False, **kwargs) if p1.name == p2.name: Vnorm = Unorm else: Vnorm = p2.profnorm(cosmo, a, squeeze=False, **kwargs) if (Vnorm < 1e-16).any() or (Unorm < 1e-16).any(): return None # zero division Unorm, Vnorm = Unorm[..., None], Vnorm[..., None] # transform axes # Set up integration boundaries logMmin, logMmax = logMrange # log of min and max halo mass [Msun] mpoints = int(mpoints) # number of integration points M = np.logspace(logMmin, logMmax, mpoints) # masses sampled # Out-of-loop optimisations Pl = np.array( [ccl.linear_matter_power(cosmo, k[i], a) for i, a in enumerate(a)]) Dm = p1.Delta / ccl.omega_x(cosmo, a, "matter") # CCL uses Delta_m with warnings.catch_warnings(): warnings.simplefilter("ignore") mfunc = np.array( [ccl.massfunc(cosmo, M, A1, A2) for A1, A2 in zip(a, Dm)]) if selection is not None: select = np.array([selection(M, 1. / aa - 1) for aa in a]) mfunc *= select # tinker10 halo bias csm = ccl.Cosmology(Omega_c=cosmo["Omega_c"], Omega_b=cosmo["Omega_b"], h=cosmo["h"], sigma8=cosmo["sigma8"], n_s=cosmo["n_s"], mass_function="tinker10") with warnings.catch_warnings(): warnings.simplefilter("ignore") bh = np.array([ccl.halo_bias(csm, M, A1, A2) for A1, A2 in zip(a, Dm)]) # shape transformations mfunc, bh = mfunc.T[..., None], bh.T[..., None] if selection is not None: select = np.array([selection(M, 1. / aa - 1) for aa in a]) select = select.T[..., None] else: select = 1 U, UU = p1.fourier_profiles(cosmo, k, M, a, squeeze=False, **kwargs) # optimise for autocorrelation (no need to recompute) if p1.name == p2.name: V = U UV = UU else: V, VV = p2.fourier_profiles(cosmo, k, M, a, squeeze=False, **kwargs) r = kwargs["r_corr"] if "r_corr" in kwargs else 0 UV = U * V * (1 + r) # Tinker mass function is given in dn/dlog10M, so integrate over d(log10M) P1h = simps(mfunc * select * UV, x=np.log10(M), axis=0) b2h_1 = simps(bh * mfunc * select * U, x=np.log10(M), axis=0) b2h_2 = simps(bh * mfunc * select * V, x=np.log10(M), axis=0) # Contribution from small masses (added in the beginning) rhoM = ccl.rho_x(cosmo, a, "matter", is_comoving=True) dlM = (logMmax - logMmin) / (mpoints - 1) mfunc, bh = mfunc.squeeze(), bh.squeeze() # squeeze extra dimensions n0_1h = np.array((rhoM - np.dot(M, mfunc) * dlM) / M[0])[None, ..., None] n0_2h = np.array((rhoM - np.dot(M, mfunc * bh) * dlM) / M[0])[None, ..., None] P1h += (n0_1h * U[0] * V[0]).squeeze() b2h_1 += (n0_2h * U[0]).squeeze() b2h_2 += (n0_2h * V[0]).squeeze() F = (include_1h * P1h + include_2h * (Pl * b2h_1 * b2h_2)) / (Unorm * Vnorm) if hm_correction is not None: for ia, (aa, kk) in enumerate(zip(a, k)): R = hm_correction.rk_interp(kk, aa) F[ia, :] *= R return F.squeeze() if squeeze else F
pass # Specify cosmology cosmo = ccl.Cosmology(h=0.67, Omega_c=0.25, Omega_b=0.045, n_s=0.965, sigma8=0.834) z = 0. # Halo mass function Mh = np.logspace(np.log10(MH_MIN), np.log10(MH_MAX), MH_BINS) dndlog10m = ccl.massfunc(cosmo, Mh, 1. / (1. + z)) bh = ccl.halo_bias(cosmo, Mh, a) # Cumulative integral of halo mass function nm = integrate.cumtrapz(dndlog10m[::-1], -np.log10(Mh)[::-1], initial=0.)[::-1] # Rescale nm to get cdf cdf = nm / np.max(nm) # Build interpolator Mh_interp = interpolate.interp1d(cdf, np.log10(Mh), kind='linear') np.random.seed(10) u = np.random.uniform(size=int(1e6)) # Realise halo mass distribution mh_real = 10.**(Mh_interp(u))
def test_halo_bias_smoke(m): a = 0.8 b = ccl.halo_bias(COSMO, m, a) assert np.all(np.isfinite(b)) assert np.shape(b) == np.shape(m)
def pk_gm(self, k, a, lmmin=6., lmmax=17., nlm=256, return_decomposed=False): """ Returns galaxy-matter power spectrum at a single scale-factor a for array of wave vectors k. :param k: wave vector array :param a: single scale factor value :param lmmin: Mmin for HOD integrals :param lmmax: Mmax for HOD integrals :param nlm: sampling rate for mass integral :param return_decomposed: boolean tag, if True return 1h, 2h power spectrum separately :return: """ z = 1. / a - 1. marr = np.logspace(lmmin, lmmax, nlm) dlm = np.log10(marr[1] / marr[0]) u_nfw = self.u_sat(z, marr, k) hmf = ccl.massfunc(self.cosmo, marr, a) hbf = ccl.halo_bias(self.cosmo, marr, a) rhoM = ccl.rho_x(self.cosmo, a, "matter", is_comoving=True) n0_1h = (rhoM - np.sum(hmf * marr) * dlm) / marr[0] n0_2h = (rhoM - np.sum(hmf * hbf * marr) * dlm) / marr[0] # n0_2h = (rhoM - np.sum(hmf*hbf*marr)*dlm) #Number of galaxies fc = self.fc_f(z) ngm = self.n_tot(z, marr) ncm = self.n_cent(z, marr) nsm = self.n_sat(z, marr) #Number density ng = np.sum(hmf * ngm) * dlm + n0_1h * ngm[0] if ng <= 1E-16: #Make sure we won't divide by 0 return None #Bias b_hod=np.sum((hmf*hbf*ncm)[None,:]*(fc+nsm[None,:]*u_nfw[:,:]),axis=1)*dlm \ + n0_2h*ncm[0]*(fc+nsm[0]*u_nfw[:,0]) b_m = np.sum((hmf * hbf)[None, :] * marr[None, :] * u_nfw[:, :], axis=1) * dlm + n0_2h * u_nfw[:, 0] b_hod /= ng b_m /= rhoM #1-halo #p1h=np.sum((hmf*ncm**2)[None,:]*(fc+nsm[None,:]*u_s[:,:])**2,axis=1)*dlm+n0_1h*(ncm[0]*(fc+nsm[0]*u_s[:,0]))**2 p1h=np.sum((hmf*ncm)[None,:]*(fc+nsm[None,:]*u_nfw[:,:])*marr*u_nfw[:,:],axis=1)*dlm \ + n0_1h*ncm[0]*(fc + nsm[0]*u_nfw[:,0])*u_nfw[:,0] p1h /= ng * rhoM #2-halo p2h = b_hod * b_m * ccl.linear_matter_power(self.cosmo, k, a) if return_decomposed: return p1h + p2h, p1h, p2h, np.ones_like(k) / ng, b_hod else: return p1h + p2h