def test_profile_args(self): profile_args = self.subhalo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_subhalo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c/con, 1, 2) profile_args = self.field_halo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_field_halo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2) profile_args = self.field_halo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_subhalo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2) profile_args = self.field_halo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_field_halo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2)
def test_profile_args(self): profile_args = self.subhalo.profile_args (c, rt, rho0) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2) trunc = self.profile_args['RocheNorm'] * (10 ** 8 / 10 ** 7) ** (1. / 3) * \ (self.r3d / 50) ** self.profile_args['RocheNu'] npt.assert_almost_equal(trunc, rt, 3) rho_central = self._cross_norm npt.assert_almost_equal(rho0, rho_central) profile_args = self.field_halo.profile_args (c, rt, rho0) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2) m_h = self.mass * self.lens_cosmo.cosmo.h r50_comoving = self.lens_cosmo.rN_M_nfw_comoving( m_h, self.profile_args['LOS_truncation_factor'], self.z) r50_physical = r50_comoving * self.lens_cosmo.cosmo.scale_factor( self.z) / self.lens_cosmo.cosmo.h r50_physical_kpc = r50_physical * 1000 npt.assert_almost_equal(r50_physical_kpc, rt) rho_central = self._cross_norm npt.assert_almost_equal(rho0, rho_central)
def create_concentration_spline(self): """ Creates a spline for the concentration using colossus. """ try: from colossus.halo import concentration from colossus.cosmology import cosmology except ImportError: print("colossus not installed. No concentration spline available.") return cosmo = self.args['cosmology'] cos = { 'flat': True, 'H0': cosmo['h'] * 100., 'Om0': cosmo['Omega_m'], 'Ob0': cosmo['Omega_b'], 'sigma8': cosmo['sigma8'], 'ns': cosmo['ns'] } cosmology.addCosmology('fiducial', cos) cosmology.setCosmology('fiducial') z = self.args['z'] M = np.logspace(12, 17, 50) c = np.zeros_like(M) for i in range(len(M)): c[i] = concentration.concentration(M[i], '200m', z=z, model='diemer15') self.args['cspline'] = interp.interp1d(M, c) return
def mc(self, m, z): matchCosmo() c200 = chc.concentration(m, '200c', z, self.modelname) return c200
def get_concentration_spline(cal=False): from colossus.halo import concentration from colossus.cosmology import cosmology cosmo = get_cosmo_default(cal) cos = { 'flat': True, 'H0': cosmo['h'] * 100., 'Om0': cosmo['om'], 'Ob0': cosmo['ob'], 'sigma8': cosmo['sigma8'], 'ns': cosmo['ns'] } cosmology.addCosmology('fiducial', cos) cosmology.setCosmology('fiducial') N = 20 M = np.logspace(12, 17, N) z = np.linspace(0.2, 0.65, N) c_array = np.ones((N, N)) for i in range(N): for j in range(N): c_array[j, i] = concentration.concentration(M[i], '200m', z=z[j], model='diemer15') return interp2d(M, z, c_array)
def compute3DProfile(halobase, mcmodel='diemer15'): #read 3D profile simprofile = readMXXLProfile.MXXLProfile('{}.radial_profile_3D.txt'.format(halobase)) simvolume = (4./3.)*np.pi*(simprofile.outer_radius**3 - simprofile.inner_radius**3) simdensity = simprofile.diff_mass / simvolume #M_sol / Mpc**3 r_mpc = simprofile.median_radius #read halo mass sim_and_haloid = os.path.basename(halobase) tokens = sim_and_haloid.split('_') simid = '_'.join(tokens[:2]) haloid = '{}_0'.format(tokens[2]) #make sure cosmology always matches curcosmo = readMXXLProfile.cosmo nfwutils.global_cosmology.set_cosmology(curcosmo) cmc.matchCosmo() #compute Diemer SD prediction r_kpch = (r_mpc*1000*curcosmo.h) m200 = answers[simid][haloid]['m200'] #M_sol/h zcluster = answers[simid][haloid]['redshift'] c200 = chc.concentration(m200, '200c', zcluster, model=mcmodel) diemer_profile = dk14prof.getDK14ProfileWithOuterTerms(M = m200, c = c200, z = zcluster, mdef = '200c') #density is returned with units M_solh^2/kpc^3. convert_units = 1e9*curcosmo.h**2 # converts to M_sol/Mpc^3 diemer_density = diemer_profile.density(r_kpch)*convert_units return dict(radius=r_mpc, simprofile=simdensity, diemerprofile=diemer_density)
def xihm_model(r, rt, m, c, alpha, bias, ma, mb, xi2h, Om): """Our model for the halo-mass correlation function. Args: r (float or array like): 3d distances from halo center in Mpc/h comoving. rt (float): Truncation radius. The boundary of the halo. m (float): Mass in Msun/h. c (float): Concentration. alpha (float): Width parameter of the exclusion function. bias (float): Large scale halo bias. ma (float): Mass of the first correction term in Msun/h. mb (float): Mass of the second correction term in Msun/h. xi2h (float or array like): 2-halo term at r. Om (float): Omega_matter. Returns: float or arraylike: Halo-mass correlation function. """ rhom = Om * rhocrit # Get xi1h xi1h = 1. + cluster_toolkit.xi.xi_nfw_at_R(r, m, c, Om) xi1h *= thetat(r, rt, alpha) # Get concentrations ca = concentration(M=ma, mdef='200m', z=0.) cb = concentration(M=mb, mdef='200m', z=0.) # Substract 1halo term r1 = M_to_R(m, z=0., mdef='200m') rb = M_to_R(mb, z=0., mdef='200m') rb = rt / r1 * rb xi2h = xi2h - mb / rhom * utconvut(r, rb, cb) # Get xi2h xi2h = bias * xi2h # Get correction C = -utconvthetae(r, rt, m, alpha, ma, ca, Om) * xi2h - utconvthetae( r, rt, m, alpha, mb, cb, Om) # Full xi xi = xi1h + xi2h + C return xi
def __init__(self, parameters, zL, mdef, chooseCosmology, part = None, \ se = None, be = None, cM_relation = None): profile.__init__(self, zL, mdef, chooseCosmology) self.M_mdef = parameters['M'].value #M200 in M_dot/h if cM_relation == True: self.c = hc.concentration(self.M, self.mdef, self.zL) #self.c = 3.614*((1+self.zL)**(-0.424))*(self.M/self.cosmo.h/1E14)**(-0.105) else: self.c = parameters['c'].value if se is None: self.se = 1.5 else: self.se = se if be is None: self.be = 1.0 else: self.be = be self.zL = zL self.mdef = mdef if part is not None: self.part = part else: self.part = 'both' #[rs] = Mpc/h self.r_mdef = Halo.mass_so.M_to_R(self.M_mdef, self.zL, self.mdef) / 1E3 #Mpc/h self.rs = self.r_mdef / self.c #Mpc/h self.Delta = int(mdef[:-1]) #[rho_mdef] = M_dot Mpc^3 from M_{\odot}h^2/kpc^3 self.rho_mdef = (Halo.mass_so.densityThreshold(self.zL, self.mdef) * 1E9 * (self.cosmo.h)**2.) / self.Delta ''' self.dk14Prof = HaloDensityProfile.DK14Profile(M = self.M_mdef, c = self.c, z = \ self.zL, mdef = self.mdef, \ be = self.be, se = self.se, \ part = self.part) ''' self.dk14Prof = profile_dk14.getDK14ProfileWithOuterTerms( M=self.M_mdef, c=self.c, z=self.zL, mdef=self.mdef, outer_term_names=['pl']) #self.dk14Prof.par.se = self.se #self.dk14Prof.par.be = self.be self.rmaxMult = 2. #self.dk14Prof.par.rs = self.rs*1E3 #[rs] = kpc/h from Mpc/h #self.dk14Prof.selected = 'by_accretion_rate' #beta = 6 gamma = 4; more accurate results self.dk14Prof.selected = 'by_mass' self.profile = 'dk' #super(dkProfile, self).__init__() return
def calc_stat(mass, redshift, mass_definition="200m"): if mass_definition == "200m": M200m = mass c200m = concentration.concentration(M200m, mass_definition, redshift) M500c, R500c, c500c = changeMassDefinition(M200m, c200m, redshift, '200m', '500c', profile='nfw') elif mass_definition == "500c": M500c = mass c500c = concentration.concentration(M500c, '500c', redshift) M200m, R200m, c200m = changeMassDefinition(M500c, c500c, redshift, '500c', '200m') Rsp = sb.splashbackRadius(redshift, '200m', M=M200m)[0] * (1 + redshift) return M200m, M500c, R200m, Rsp
def convert_mass(m, z, mdef_in='200c', mdef_out='200m', concentration_model='diemer19', profile='nfw'): ''' Converts between mass definitions. ''' c = concentration.concentration(m, mdef_in, z, model=concentration_model, conversion_profile=profile) return mass_defs.changeMassDefinition(m, c, z, mdef_in, mdef_out, profile=profile)[0]
def lnlike(params, R, ds, icov, flags, z, extras): lM = params k, Plin, Pnl, cosmo, inparams = extras inparams['Mass'] = 10**lM #Mpc/h inparams["concentration"] = conc.concentration(10**lM, '200m', z, model='diemer15') result = pyDS.calc_Delta_Sigma(k, Plin, k, Pnl, cosmo, inparams) model = result['ave_delta_sigma']*h*(1.+z)**2 #Msun/pc^2 physical X = ds - model X = X[flags] return -0.5*np.dot(X, np.dot(icov, X))
def calculate_concentration(redshift,M200m,cosmology): from colossus.halo import concentration from colossus.cosmology import cosmology as col_cosmology params = {'flat': True, 'H0': cosmology['h'], 'Om0': cosmology['om'], 'Ob0': 1.0-cosmology['om'], 'sigma8': cosmology['sigma8'], 'ns': cosmology['ns']} col_cosmology.addCosmology('fiducial_cosmology',params) col_cosmology.setCosmology('fiducial_cosmology') concentration = concentration.concentration(M200m,'200m',redshift,model='diemer15') print "concentration = ",concentration,2.0 return 2.0#concentration
def r50(self, t): # Rdisk using formula from Mo, Mao & White 1998 Mhh = self.Mh * self.cosmo.h cvir = concentration(Mhh, 'vir', self.z, model='diemer15') M200c, R200c, c200c = changeMassDefinition(Mhh, cvir, self.z, 'vir', '200c') md = self.Ms / (M200c / self.cosmo.h) jmlam = self.etar * 0.045 # assume etar fraction of the angular momentum lost eta50 = self.eta50_MMW98(c200c, jmlam, md) r_50 = eta50 * R200c / self.cosmo.h return r_50
def test_profile_args(self): profile_args = self.subhalo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_subhalo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c/con, 1, 2) profile_args = self.field_halo.profile_args (c) = profile_args con = concentration(self.lens_cosmo.cosmo.h * self.mass_field_halo, '200c', self.z, model='diemer19') npt.assert_almost_equal(c / con, 1, 2) profile_args = self.subhalo_custom.profile_args (c) = profile_args c0 = self.profile_args_custom['mc_model']['c0'] beta = self.profile_args_custom['mc_model']['beta'] zeta = self.profile_args_custom['mc_model']['zeta'] h = self.lens_cosmo.cosmo.h mh_sub = self.mass_subhalo * h nu = peaks.peakHeight(mh_sub, self.z) nu_ref = peaks.peakHeight(h * 10 ** 8, 0.) con_subhalo = c0 * (1 + self.z) ** zeta * (nu / nu_ref) ** -beta npt.assert_almost_equal(con_subhalo/c, 1, 2) profile_args = self.field_halo_custom.profile_args (c) = profile_args c0 = self.profile_args_custom['mc_model']['c0'] beta = self.profile_args_custom['mc_model']['beta'] zeta = self.profile_args_custom['mc_model']['zeta'] h = self.lens_cosmo.cosmo.h mh_sub = self.mass_field_halo * h nu = peaks.peakHeight(mh_sub, self.z) nu_ref = peaks.peakHeight(h * 10 ** 8, 0.) con_field_halo = c0 * (1 + self.z) ** zeta * (nu / nu_ref) ** -beta npt.assert_almost_equal(con_field_halo / c, 1, 2)
def hmc(model, values, cosmo=cosmo, mdef='200c', relation='diemer15', z=0, sigma=0.16): """Joint prior from the halo mass--concentration mass relation. Relation is from Diemer & Kravtsov 2015 Parameters ---------- model : DynamicalModel instance values : array_like values in parameter space at which to evaluate the prior cosmo : colossus.Cosmology instance mdef : string Colossus mass definition string for input halo parameters e.g., 'vir', '200m', '200c' relation : string See the list here for the options. https://bdiemer.bitbucket.io/colossus/halo_concentration.html Defaults to the mass-concentration model of Diemer & Kravtsov 2015. z : float redshift sigma : float scatter in M-c relation, default from Diemer & Kravtsov """ h = cosmo.h rho_crit = cosmo.rho_c(z) * h**2 # Msun / kpc3 kwargs = model.construct_kwargs(values) Mh_function = lambda r: model.mass_model['dm'](r, **kwargs) if 'M200' in kwargs: M200 = kwargs['M200'] else: r200 = mass._rvir(Mh_function, rhigh=1e8, delta_c=200, rho_crit=rho_crit) M200 = Mh_function(r200) if 'c200' in kwargs: log_c200 = np.log10(kwargs['c200']) else: try: r_s = kwargs['r_s'] log_c200 = np.log10(r200 / r_s) except KeyError: raise ValueError('Need to have a defined halo concentration!') # colossus uses halo mass in units of Msun / h c200_model = concentration(M200 * h, mdef=mdef, z=z, model=relation) return pdf.lngauss(x=log_c200, mu=np.log10(c200_model), sigma=sigma)
def log_likelihood_DS(lM200, R, ds, iCds): c200 = concentration.concentration(10**lM200, '200c', zmean, model=cmodel) DS = Delta_Sigma_NFW_2h(R, zmean, M200=10**lM200, c200=c200, cosmo_params=params, terms='1h') L_DS = -np.dot((ds - DS), np.dot(iCds, (ds - DS))) / 2.0 return L_DS
def M200(Lambda,z): M0 = 2.21e14 alpha = 1.33 M200m = M0*((Lambda/40.)**alpha) from colossus.cosmology import cosmology from colossus.halo import mass_defs from colossus.halo import concentration c200m = concentration.concentration(M200m, '200m', z, model = 'duffy08') M200c, R200c, c200c = mass_defs.changeMassDefinition(M200m, c200m, z, '200m', '200c') return M200c
def get_conc(M, z): colcos = { "H0": cosmo['h'] * 100., "Om0": cosmo['om'], 'Ob0': 0.049017, 'sigma8': 0.83495, 'ns': 0.96191, 'flat': True } col_cosmology.addCosmology('fox_cosmology', colcos) col_cosmology.setCosmology('fox_cosmology') om = cosmo['om'] return conc.concentration(M, '200m', z, model='diemer15')
def delta_z(z_c, M_200, R, mean_z): c_200 = concen.concentration(M_200, '200c', z_c, model='diemer15') Sigma = Sig_NFW(M_200, c_200, R) mu = np.power(1. - Sigma / cosmos.Sig_cr(z_c, z_step_g), 2) top_integrand = np.power(mu, (beta(z_step_g) - 1.)) * p_g * z_step_g bottom_integrand = np.power(mu, (beta(z_step_g) - 1.)) * p_g #print(top_integrand, bottom_integrand) if np.any(np.isnan(top_integrand)): print( "M_200 = {3} \nz_c = {4} \nSigma = {0} \nSigma_cr = {1} \nbeta = {2}" .format(Sigma, cosmos.Sig_cr(z_c, z_step_g), beta(z_step_g), M_200, z_c)) exit() return simp_integral(dz_g, top_integrand) / ( mean_z * simp_integral(dz_g, bottom_integrand)) - 1.
def createSims(bins, nperbin, zcluster, basedir, simgroup, idstart, cosmology): outputdir = '{}/{}'.format(basedir, simgroup) if not os.path.exists(outputdir): os.makedirs(outputdir) nfwutils.global_cosmology.set_cosmology(cosmology) cmc.matchCosmo() #bins in delta=200 #masses are M_sol*h m200s = [] c200s = [] answers = {} for mlow, mhigh in bins: curmasses = np.exp(np.random.uniform(np.log(mlow), np.log(mhigh), size=nperbin)) m200s.append(curmasses) curconcens = chc.concentration(curmasses, '200c', zcluster, model='diemer15') c200s.append(curconcens) m200s = np.hstack(m200s) c200s = np.hstack(c200s) for i, id in enumerate(range(idstart, idstart+len(m200s))): config = dict(max_dist = 3., gridlength = 1024., zcluster = zcluster, m200 = float(m200s[i]), c200 = float(c200s[i])) with open('{}/analytic_{}.yaml'.format(outputdir, id), 'w') as output: yaml.dump(config, output) answers[id] = dict(m200 = m200s[i], concen = c200s[i], redshift = zcluster) with open('analytic_{}_answers.pkl'.format(simgroup), 'wb') as output: cPickle.dump(answers, output)
def computeSDProfiles(halobase, mcmodel='diemer15'): #read 2D profile simprofile = readMXXLProfile.MXXLProfile('{}.radial_profile.txt'.format(halobase)) simarea = np.pi*(simprofile.outer_radius**2 - simprofile.inner_radius**2) simdensity = simprofile.diff_mass / simarea #M_sol / Mpc**2 r_mpc = simprofile.median_radius #read halo mass sim_and_haloid = os.path.basename(halobase) tokens = sim_and_haloid.split('_') simid = '_'.join(tokens[:2]) haloid = '_'.join(tokens[2:]) #make sure cosmology always matches curcosmo = readMXXLProfile.cosmo nfwutils.global_cosmology.set_cosmology(curcosmo) cmc.matchCosmo() #compute Diemer SD prediction r_kpch = (r_mpc*1000*curcosmo.h) m200 = answers[simid][haloid]['m200'] #M_sol/h zcluster = answers[simid][haloid]['redshift'] c200 = chc.concentration(m200, '200c', zcluster, model=mcmodel) diemer_profile = dk14prof.getDK14ProfileWithOuterTerms(M = m200, c = c200, z = zcluster, mdef = '200c') surfacedensity_func, deltaSigma_func = readAnalytic.calcLensingTerms(diemer_profile, np.max(r_kpch)) convert_units = 1./(curcosmo.h*1e6) #M_sol / Mpc^2 -> diemer units diemer_surfacedensity = surfacedensity_func(r_kpch)/convert_units #add mean background density to Diemer prediction acosmo = astrocosmo.FlatLambdaCDM(curcosmo.H0, curcosmo.omega_m) density_units_3d = units.solMass / units.Mpc**3 density_units_2d = units.solMass / units.Mpc**2 back_density = (acosmo.Om(zcluster)*acosmo.critical_density(zcluster)).to(density_units_3d) print back_density back_sd_contrib = back_density*(200.*units.Mpc/curcosmo.h) print back_sd_contrib # simdensity = simdensity*density_units_2d - back_sd_contrib diemer_surfacedensity += back_sd_contrib return dict(radius=r_mpc, simprofile=simdensity, diemerprofile=diemer_surfacedensity)
def __init__(self, parameters, zL, n, mdef, chooseCosmology, Tau=None, cM_relation=None, esp=None): profile.__init__(self, zL, mdef, chooseCosmology) cosmo = Cosmology.setCosmology(chooseCosmology) self.parameters = parameters self.M_mdef = parameters['M'].value #M200 input in M_dot/h if cM_relation == True: self.c = hc.concentration(self.M_mdef * cosmo.h, self.mdef, self.zL) else: self.c = parameters['c'].value self.zL = zL self.n = n #sharpness of truncation (n = 1 or 2) if Tau == None: #dimensionless truncation radius (T = rt/rvir => fit =2.6) self.T = 2.6 else: self.T = Tau self.r_mdef = Halo.mass_so.M_to_R(self.M, self.zL, self.mdef) / 1E3 #Mpc/h self.rs = self.r_mdef / self.c self.rt = self.T * self.rs self.mdef = mdef self.chooseCosmology = chooseCosmology self.G, self.v_c, self.H2, self.cosmo = self.calcConstants() self.Delta = int(mdef[:-1]) #[rho_mdef] = M_dot Mpc^3 from M_{\odot}h^2/kpc^3 self.rho_mdef = (Halo.mass_so.densityThreshold(self.zL, self.mdef) * 1E9 * (self.cosmo.h)**2.) / self.Delta self.profile = 'nfwBMO' if esp == None: self.esp = 1E-5 else: self.esp = esp return
def mass_conversion(m200m, redshift, cosmology, mass_is_log=True): """Convert m200m to m500c Parameters ---------- m200m : array_like Halo mass(es) calculated in a radius such that the mean density is 200 times the mean density of the universe. redshift : float Redshift of the halos used to calculate the concentration and perform the mass conversion cosmology : dict Cosmology parameters being used mass_is_log : bool Flag to tell the script if it should intake and output ln(M) or M Returns ------- output : array_like Halo mass(es) calculated in a radius such that the mean density is 500 times the critical density of the universe. Notes ----- We assume that every halo is at the same redshift. """ setCosmology('myCosmo', cosmology) if mass_is_log: m200m = np.exp(m200m) m500c = changeMassDefinition(m200m, concentration(m200m, '200m', redshift), redshift, '200m', '500c')[0] if mass_is_log: m500c = np.log(m500c) return m500c
def log_likelihood(data_model, R, profiles, iCOV): lM200, q = data_model c200 = concentration.concentration(10**lM200, '200c', zmean, model=cmodel) e = (1. - q) / (1. + q) ds, gt, gx = profiles iCds, iCgt, iCgx = iCOV DS = Delta_Sigma_NFW(R, zmean, M200=10**lM200, c200=c200, cosmo=cosmo_as) GT, GX = GAMMA_components(R, zmean, ellip=e, M200=10**lM200, c200=c200, cosmo=cosmo_as) L_DS = -np.dot((ds - DS), np.dot(iCds, (ds - DS))) / 2.0 L_GT = -np.dot((gt - GT), np.dot(iCgt, (gt - GT))) / 2.0 L_GX = -np.dot((gx - GX), np.dot(iCgx, (gx - GX))) / 2.0 return L_DS + L_GT + L_GX
def cmb_test_data(nber_maps, validation_analyis=False, clus_position_analysis=False, extragal_bias_analysis=False): nx, dx, ny, dy = 240, 0.25, 240, 0.25 map_params = [nx, dx, ny, dy] l, cl = CosmoCalc().cmb_power_spectrum() l, bl = exp.beam_power_spectrum(1.4) l, nl = exp.white_noise_power_spectrum(2.0) if validation_analyis is True: sims_clus_2e14, sims_clus_6e14, sims_clus_10e14 = [], [], [] kappa_map_2e14 = lensing.NFW(2e14, 3, 1, 1100).convergence_map(map_params) kappa_map_6e14 = lensing.NFW(6e14, 3, 1, 1100).convergence_map(map_params) kappa_map_10e14 = lensing.NFW(10e14, 3, 1, 1100).convergence_map(map_params) alpha_vec_2e14 = lensing.deflection_from_convergence( map_params, kappa_map_2e14) alpha_vec_6e14 = lensing.deflection_from_convergence( map_params, kappa_map_6e14) alpha_vec_10e14 = lensing.deflection_from_convergence( map_params, kappa_map_10e14) for i in range(nber_maps): sim = tools.make_gaussian_realization(map_params, l, cl) sim_clus_2e14 = lensing.lens_map(map_params, sim, alpha_vec_2e14) sim_clus_6e14 = lensing.lens_map(map_params, sim, alpha_vec_6e14) sim_clus_10e14 = lensing.lens_map(map_params, sim, alpha_vec_10e14) sim_clus_2e14 = tools.convolve(sim_clus_2e14, l, np.sqrt(bl), map_params=map_params) sim_clus_6e14 = tools.convolve(sim_clus_6e14, l, np.sqrt(bl), map_params=map_params) sim_clus_10e14 = tools.convolve(sim_clus_10e14, l, np.sqrt(bl), map_params=map_params) noise_map = tools.make_gaussian_realization(map_params, l, nl) sim_clus_2e14 += noise_map sim_clus_6e14 += noise_map sim_clus_10e14 += noise_map sims_clus_2e14.append(sim_clus_2e14) sims_clus_6e14.append(sim_clus_6e14) sims_clus_10e14.append(sim_clus_10e14) return sims_clus_2e14, sims_clus_6e14, sims_clus_10e14 if clus_position_analysis is True: sims_baseline, sims_centorid_shift = [], [] kappa_map_6e14_baseline = lensing.NFW(2e14, 3, 1, 1100).convergence_map(map_params) alpha_vec_6e14_baseline = lensing.deflection_from_convergence( map_params, kappa_map_6e14_baseline) for i in range(nber_maps): x_shift, y_shift = np.random.normal( loc=0.0, scale=0.5), np.random.normal(loc=0.0, scale=0.5) centroid_shift = [x_shift, y_shift] kappa_map_6e14_centroid_shift = lensing.NFW( 6e14, 3, 1, 1100).convergence_map(map_params, centroid_shift) alpha_vec_6e14_centroid_shift = lensing.deflection_from_convergence( map_params, kappa_map_6e14_centroid_shift) sim = tools.make_gaussian_realization(map_params, l, cl) sim_baseline = lensing.lens_map(map_params, sim, alpha_vec_6e14_baseline) sim_centroid_shift = lensing.lens_map( map_params, sim, alpha_vec_6e14_centroid_shift) sim_baseline = tools.convolve(sim_baseline, l, np.sqrt(bl), map_params=map_params) sim_centroid_shift = tools.convolve(sim_centroid_shift, l, np.sqrt(bl), map_params=map_params) noise_map = tools.make_gaussian_realization(map_params, l, nl) sim_baseline += noise_map sim_centroid_shift += noise_map sims_baseline.append(sim_baseline) sims_centroid_shift.append(sim_centroid_shift) return sims_baseline, sims_centroid_shift if extragal_bias_analysis is True: sims_baseline, sims_tsz, sims_ksz, sims_tsz_ksz = [], [], [], [] c500 = concentration.concentration(2e14, '500c', 0.7) M200c, _, c200c = mass_defs.changeMassDefinition(2e14, c500, 0.7, '500c', '200c', profile='nfw') kappa_map_M200c = lensing.NFW(M200c, c200c, 0.7, 1100).convergence_map(map_params) alpha_vec_M200c = lensing.deflection_from_convergence( map_params, kappa_map_M200c) fname = '/Volumes/Extreme_SSD/codes/master_thesis/code/data/mdpl2_cutouts_for_tszksz_clus_detection_M1.7e+14to2.3e+14_z0.6to0.8_15320haloes_boxsize20.0am.npz' cutouts_dic = np.load(fname, allow_pickle=1, encoding='latin1')['arr_0'].item() mass_z_key = list(cutouts_dic.keys())[0] cutouts = cutouts_dic[mass_z_key] scale_fac = fg.compton_y_to_delta_Tcmb(145, uK=True) tsz_cutouts, ksz_cutouts = [], [] for kcntr, keyname in enumerate(cutouts): tsz_cutout = cutouts[keyname]['y'] * scale_fac tsz_cutouts.append(tsz_cutout) ksz_cutout = cutouts[keyname]['ksz'] * random.randrange(-1, 2, 2) ksz_cutouts.append(ksz_cutout) s, e = int((nx - 40) / 2), int((ny + 40) / 2) for i in range(nber_maps): sim = tools.make_gaussian_realization(map_params, l, cl) sim_M200c = lensing.lens_map(map_params, sim, alpha_vec_M200c) sim_baseline, sim_tsz, sim_ksz, sim_tsz_ksz = np.copy( sim_M200c), np.copy(sim_M200c), np.copy(sim_M200c), np.copy( sim_M200c) tsz_cutout = tools.rotate( tsz_cutouts[random.randint(0, len(tsz_cutouts) - 1)], random.randint(-180, 180)) ksz_cutout = tools.rotate( ksz_cutouts[random.randint(0, len(ksz_cutouts) - 1)], random.randint(-180, 180)) tsz_ksz_cutout = tsz_cutout + ksz_cutout sim_tsz[s:e, s:e] = sim_tsz[s:e, s:e] + tsz_cutout sim_ksz[s:e, s:e] = sim_ksz[s:e, s:e] + ksz_cutout sim_tsz_ksz[s:e, s:e] = sim_tsz_ksz[s:e, s:e] + tsz_ksz_cutout sim_baseline = tools.convolve(sim_baseline, l, np.sqrt(bl), map_params=map_params) sim_tsz = tools.convolve(sim_tsz, l, np.sqrt(bl), map_params=map_params) sim_ksz = tools.convolve(sim_ksz, l, np.sqrt(bl), map_params=map_params) sim_tsz_ksz = tools.convolve(sim_tsz_ksz, l, np.sqrt(bl), map_params=map_params) noise_map = tools.make_gaussian_realization(map_params, l, nl) sim_baseline += noise_map sim_tsz += noise_map sim_ksz += noise_map sim_tsz_ksz += noise_map sims_baseline.append(sim_baseline) sims_tsz.append(sim_tsz) sims_ksz.append(sim_ksz) sims_tsz_ksz.append(sim_tsz_ksz) return sims_baseline, sims_tsz, sims_ksz, sims_tsz_ksz
def con(M, z, model="ishiyama21"): if model == "Bullock2001": # Employs fit from Bullock et al. 2001, to match at z=0 return 9. / (1. + z) * (M / 1.5e13)**(-0.13) else: # Employs fit from colossus. Default here is taken as "ishiyama21" return concentration(M, "vir", z=z, model=model)
cmap = plt.get_cmap(cmaps[i]) index = inds[i] z = zs[i] #Get power spectra Plin = np.loadtxt("txt_files/P_files/Plin_z%.2f.txt" % z) Pnl = np.loadtxt("txt_files/P_files/Pnl_z%.2f.txt" % z) #Give the bin edges in comoving units; Mpc/h params["R_bin_min"] = 0.0323 * h * (1 + z) params["R_bin_max"] = 30.0 * h * (1 + z) for j in linds: R, DS, err, flag = np.loadtxt(datapath % (z, j), unpack=True) lM = masses[i, j] print 10**lM params['Mass'] = 10**lM params["concentration"] = conc.concentration(10**lM, '200m', z, model='diemer15') result = pyDS.calc_Delta_Sigma(k, Plin, k, Pnl, cosmo, params) model = result['ave_delta_sigma'] * h * (1. + z)**2 #Msun/pc^2 physical plt.loglog(R, model, c=cmap(c[j]), ls='-') plt.errorbar(R, DS, err, c=cmap(c[j]), marker='o', ls='') plt.ylim(.1, 1e3) plt.xlabel(r"$R\ [{\rm Mpc}]$") plt.ylabel(r"$\Delta\Sigma\ [{\rm M_\odot/pc^2}]$") plt.title("z=%.2f" % z) plt.subplots_adjust(bottom=0.17, left=0.2) plt.gcf().savefig("BFs_z%.2f.png" % z) plt.show()
# catalog is updated to that of SPT-SZ if sptsz_sim: from astropy.io import fits data = fits.open('SPT2500d.fits') cluslist = data[1].data M500c = cluslist['M500'] selectedinds = np.where(M500c >0)[0] M500list = M500c[selectedinds]*1e14 zlist = cluslist['redshift'][selectedinds] z_L_list = np.ones(len(M500list))*0.7 M_200_list = np.zeros(len(M500list)) mdef = '500c' mdefout = '200m' for i,mm in enumerate(M500list): cval = concentration.concentration(mm, mdef, zlist[i]) Mval, r200val, c200val = mass_defs.changeMassDefinition(mm, cval, zlist[i], mdef, mdefout, profile='nfw') M_200_list[i] = Mval totalclus = len(M_200_list) np.random.seed(cmbrandomseedval) randomseeds = np.unique(np.random.randint(1e6,size= 2 * totalclus))[0:totalclus] # add tSZ either Arnaud profile, Sehgal simulations, or Takahashi simulations # to be done -get Arnaud from the for loop as well" if sehgal_sims: tSZ_emission, tSZ_emission_90ghz = cluster_stuff.fn_pick_add_sehgal_sims(M_200_list) tSZ_emission = tSZ_emission/1e6 tSZ_emission_90ghz = tSZ_emission_90ghz/1e6 if takahashi_sims: tSZ_emission, tSZ_emission_90ghz = cluster_stuff.fn_add_daisuke_sims(M_200_list,z_L_list)
def predict(self, model, separate_gal_type=False, baryon_kwargs={}, **occ_kwargs): """ Predicts the number density and correlation function for a certain model. Parameters ---------- model : HodModelFactory Instance of ``halotools.empirical_models.HodModelFactory`` describing the model for which predictions are made. separate_gal_type : boolean, optional If True, the return values are dictionaries divided by each galaxy types contribution to the output result. **occ_kwargs : dict, optional Keyword arguments passed to the ``mean_occupation`` functions of the model. Returns ------- ngal : numpy.array or dict Array or dictionary of arrays containing the number densities for each galaxy type stored in self.gal_type. The total galaxy number density is the sum of all elements of this array. xi : numpy.array or dict Array or dictionary of arrays storing the prediction for the correlation function. """ try: assert (sorted(model.gal_types) == sorted( ['centrals', 'satellites'])) except AssertionError: raise RuntimeError('The model instance must only have centrals ' + 'and satellites as galaxy types. Check the ' + 'gal_types attribute of the model instance.') try: assert (model._input_model_dictionary['centrals_occupation']. prim_haloprop_key == self.attrs['prim_haloprop_key']) assert (model._input_model_dictionary['satellites_occupation']. prim_haloprop_key == self.attrs['prim_haloprop_key']) except AssertionError: raise RuntimeError('Mismatch in the primary halo properties of ' + 'the model and the TabCorr instance.') try: if hasattr(model._input_model_dictionary['centrals_occupation'], 'sec_haloprop_key'): assert (model._input_model_dictionary['centrals_occupation']. sec_haloprop_key == self.attrs['sec_haloprop_key']) if hasattr(model._input_model_dictionary['satellites_occupation'], 'sec_haloprop_key'): assert (model._input_model_dictionary['satellites_occupation']. sec_haloprop_key == self.attrs['sec_haloprop_key']) except AssertionError: raise RuntimeError('Mismatch in the secondary halo properties ' + 'of the model and the TabCorr instance.') try: assert np.abs(model.redshift - self.attrs['redshift']) < 0.05 except AssertionError: raise RuntimeError('Mismatch in the redshift of the model and ' + 'the TabCorr instance.') mean_occupation = np.zeros(len(self.gal_type)) mask = self.gal_type['gal_type'] == 'centrals' mean_occupation[mask] = model.mean_occupation_centrals( prim_haloprop=self.gal_type['prim_haloprop'][mask], sec_haloprop_percentile=( self.gal_type['sec_haloprop_percentile'][mask]), **occ_kwargs) mean_occupation[~mask] = model.mean_occupation_satellites( prim_haloprop=self.gal_type['prim_haloprop'][~mask], sec_haloprop_percentile=( self.gal_type['sec_haloprop_percentile'][~mask]), **occ_kwargs) ngal = mean_occupation * self.gal_type['n_h'].data if self.attrs['mode'] == 'auto': ngal_sq = np.outer(ngal, ngal) ngal_sq = 2 * ngal_sq - np.diag(np.diag(ngal_sq)) ngal_sq = symmetric_matrix_to_array(ngal_sq) xi = self.tpcf_matrix * ngal_sq / np.sum(ngal_sq) elif self.attrs['mode'] == 'cross': xi = self.tpcf_matrix * ngal / np.sum(ngal) # baryonification if (len(baryon_kwargs) > 0) & (has_bcm): #params = {'flat': True, 'H0': 67.2, 'Om0': 0.31, 'Ob0': 0.049, 'sigma8': 0.81, 'ns': 0.95} params = baryon_kwargs['cosmo_params'] params['flat'] = True use_2h = baryon_kwargs.pop('use_2h', True) use_clf_fsat = baryon_kwargs.pop('use_clf_fsat', False) print(use_clf_fsat) cosmology.addCosmology('myCosmo', params) cosmology.setCosmology('myCosmo') halo_mass = np.array(self.gal_type['prim_haloprop']) par = bfc.par() par.baryon.eta_tot = baryon_kwargs['eta_tot'] par.baryon.eta_cga = baryon_kwargs['eta_cga'] if 'transfct' in baryon_kwargs.keys(): par.files.transfct = baryon_kwargs['transfct'] else: dirname = os.path.dirname(os.path.abspath(__file__)) par.files.transfct = '{}/files/CDM_PLANCK_tk.dat'.format( dirname) rbin = annular_area_weighted_midpoints(self.tpcf_args[1]) rho_r = annular_area_weighted_midpoints(np.logspace(-2, 2, 100)) n_mhalo = 10 mhalo_min = np.min(np.log10(halo_mass)) mhalo_max = np.max(np.log10(halo_mass)) mhalo_grid = np.logspace(mhalo_min, mhalo_max, n_mhalo) halo_conc_grid = concentration.concentration( mhalo_grid, 'vir', self.attrs['redshift'], model='diemer19') # fudge factor accounting for scatter in mvir-c relation halo_conc_grid = halo_conc_grid * 0.93 if use_clf_fsat: f_sat = np.array([ model.model_dictionary['satellites_occupation']. stellar_mass_fraction(m) for m in mhalo_grid ]) f_cen = np.array([ model.model_dictionary['centrals_occupation']. stellar_mass_fraction(m) for m in mhalo_grid ]) f_star = f_sat + f_cen print(f_star) sys.stdout.flush() else: f_star = [None] * n_mhalo f_cen = [None] * n_mhalo # baryon params par.baryon.Mc = baryon_kwargs['Mc'] par.baryon.mu = baryon_kwargs['mu'] par.baryon.thej = baryon_kwargs['thej'] if use_2h: # 2h term vc_r, vc_m, vc_bias, vc_corr = bfc.cosmo(par) # print('2h term took {}s'.format(end - start)) bias_tck = splrep(vc_m, vc_bias, s=0) corr_tck = splrep(vc_r, vc_corr, s=0) cosmo_bias_grid = splev(mhalo_grid, bias_tck) cosmo_corr = splev(rho_r, corr_tck) profs = [ bfc.profiles(rho_r, mhalo_grid[i], halo_conc_grid[i], cosmo_corr, cosmo_bias_grid[i], par, fstar=f_star[i], fcga=f_cen[i])[1] for i in range(len(mhalo_grid)) ] else: profs = [ bfc.onehalo_profiles(rho_r, mhalo_grid[i], halo_conc_grid[i], par, fstar=f_star[i], fcga=f_cen[i])[1] for i in range(len(mhalo_grid)) ] correction_factors_grid = [ dens_to_ds(rbin, rho_r, profs[i], epsabs=1e-1, epsrel=1e-3)[2] for i in range(len(mhalo_grid)) ] correction_factors_grid = np.array(correction_factors_grid) correction_factors_spl = interp1d(mhalo_grid, correction_factors_grid.T, fill_value='extrapolate', bounds_error=False) correction_factors = correction_factors_spl(halo_mass) xi = correction_factors * xi elif (len(baryon_kwargs) > 0): raise ImportError( "You passed me baryon correction module parameters, but I couldn't import the baryonification module" ) if not separate_gal_type: ngal = np.sum(ngal) xi = np.sum(xi, axis=1).reshape(self.tpcf_shape) return ngal, xi else: ngal_dict = {} xi_dict = {} for gal_type in np.unique(self.gal_type['gal_type']): mask = self.gal_type['gal_type'] == gal_type ngal_dict[gal_type] = np.sum(ngal[mask]) if self.attrs['mode'] == 'auto': for gal_type_1, gal_type_2 in ( itertools.combinations_with_replacement( np.unique(self.gal_type['gal_type']), 2)): mask = symmetric_matrix_to_array( np.outer(gal_type_1 == self.gal_type['gal_type'], gal_type_2 == self.gal_type['gal_type']) | np.outer(gal_type_2 == self.gal_type['gal_type'], gal_type_1 == self.gal_type['gal_type'])) xi_dict['%s-%s' % (gal_type_1, gal_type_2)] = np.sum( xi * mask, axis=1).reshape(self.tpcf_shape) elif self.attrs['mode'] == 'cross': for gal_type in np.unique(self.gal_type['gal_type']): mask = self.gal_type['gal_type'] == gal_type xi_dict[gal_type] = np.sum(xi * mask, axis=1).reshape(self.tpcf_shape) return ngal_dict, xi_dict
def __init__(self, z): self.f = interp1d(np.linspace(9,15.1,200), \ concentration.concentration(10**np.linspace(9, 15.1, 200), "vir", z, model="bullock01"))
#------------------- # saving mcmc out mcmc_out = sampler.get_chain(flat=True).T table = [ fits.Column(name='lM200', format='E', array=mcmc_out[0]), fits.Column(name='q', format='E', array=mcmc_out[1]) ] tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(table)) lM = np.percentile(mcmc_out[0][1500:], [16, 50, 84]) q = np.percentile(mcmc_out[1][1500:], [16, 50, 84]) c200 = concentration.concentration(10**lM[1], '200c', zmean, model=cmodel) h = fits.Header() h.append(('lM200', np.round(lM[1], 4))) h.append(('elM200M', np.round(np.diff(lM)[0], 4))) h.append(('elM200m', np.round(np.diff(lM)[1], 4))) h.append(('c200', np.round(c200, 4))) h.append(('q', np.round(q[1], 4))) h.append(('eqM', np.round(np.diff(q)[0], 4))) h.append(('eqm', np.round(np.diff(q)[1], 4))) primary_hdu = fits.PrimaryHDU(header=h) hdul = fits.HDUList([primary_hdu, tbhdu])