def _compute_ngals(ngal_density, field_size, cosmo, cluster_z, zsrc, zsrc_min=None, zsrc_max=None): """ A private function that computes the number of galaxies to draw given the user-defined field size, galaxy density, cosmology, cluster redshift, galaxy redshift distribution and requested redshift range. For a more detailed description of each of the parameters, see the documentation of `generate_galaxy_catalog`. """ field_size_arcmin = convert_units(field_size, 'Mpc', 'arcmin', redshift=cluster_z, cosmo=cosmo) ngals = int(ngal_density * field_size_arcmin * field_size_arcmin) if isinstance(zsrc, float): return int(ngals) elif zsrc in ('chang13', 'desc_srd'): z_distrib_func = _chang_z_distrib if zsrc == 'chang13' else _srd_z_distrib # Compute the normalisation for the redshift distribution function (z=[0,\infty]) norm, _ = integrate.quad(z_distrib_func, 0., 100) # Probability to find the galaxy in the requested redshift range prob = integrate.quad(_chang_z_distrib, zsrc_min, zsrc_max)[0] / norm return int(ngals * prob)
def compute_source_number_per_bin(rmin, rmax, radial_unit, lens_redshift, source_pdz, source_density, nbins=10, method='evenwidth', cosmo=None, delta_z_cut = 0.1): """ """ binedges = utils.make_bins(rmin, rmax, nbins, method=method) bin_center = binedges[0:-1] + (binedges[1:] - binedges[0:-1])/2. binedges_arcmin = utils.convert_units(binedges, radial_unit, 'arcmin', lens_redshift, cosmo) bin_center_arcmin = binedges_arcmin[0:-1] + (binedges_arcmin[1:] - binedges_arcmin[0:-1])/2. area = (np.pi * (binedges_arcmin[1:]**2 - binedges_arcmin[0:-1]**2)) if isinstance(source_pdz, types.FunctionType): norm = integrate.quad(source_pdz , lens_redshift + delta_z_cut, np.inf)[0] elif isinstance(source_pdz, itp.interp1d): norm = np.sum(source_pdz.y[source_pdz.x>(lens_redshift + delta_z_cut)]) / np.sum(source_pdz.y) Ngal = norm * (source_density * area).value return bin_center, binedges, Ngal
def test_convert_units(): """ Test the wrapper function to convert units. Corner cases should be tested in the individual functions. This function should test one case for all supported conversions and the error handling. """ # Make an astropy cosmology object for testing # cosmo = FlatLambdaCDM(H0=70., Om0=0.3) cosmo = md.Cosmology(H0=70.0, Omega_dm0=0.3 - 0.045, Omega_b0=0.045) # Test that each unit is supported utils.convert_units(1.0, 'radians', 'degrees') utils.convert_units(1.0, 'arcmin', 'arcsec') utils.convert_units(1.0, 'Mpc', 'kpc') utils.convert_units(1.0, 'Mpc', 'kpc') # Error checking assert_raises(ValueError, utils.convert_units, 1.0, 'radians', 'CRAZY') assert_raises(ValueError, utils.convert_units, 1.0, 'CRAZY', 'radians') assert_raises(TypeError, utils.convert_units, 1.0, 'arcsec', 'Mpc') assert_raises(TypeError, utils.convert_units, 1.0, 'arcsec', 'Mpc', None, cosmo) assert_raises(TypeError, utils.convert_units, 1.0, 'arcsec', 'Mpc', 0.5, None) assert_raises(ValueError, utils.convert_units, 1.0, 'arcsec', 'Mpc', -0.5, cosmo) # Test cases to make sure angular -> angular is fitting together assert_allclose(utils.convert_units(np.pi, 'radians', 'degrees'), 180., **TOLERANCE) assert_allclose(utils.convert_units(180.0, 'degrees', 'radians'), np.pi, **TOLERANCE) assert_allclose(utils.convert_units(1.0, 'degrees', 'arcmin'), 60., **TOLERANCE) assert_allclose(utils.convert_units(1.0, 'degrees', 'arcsec'), 3600., **TOLERANCE) # Test cases to make sure physical -> physical is fitting together assert_allclose(utils.convert_units(1.0, 'Mpc', 'kpc'), 1.0e3, **TOLERANCE) assert_allclose(utils.convert_units(1000., 'kpc', 'Mpc'), 1.0, **TOLERANCE) assert_allclose(utils.convert_units(1.0, 'Mpc', 'pc'), 1.0e6, **TOLERANCE) # Test conversion from angular to physical # Using astropy, circular now but this will be fine since we are going to be # swapping to CCL soon and then its kosher r_arcmin, redshift = 20.0, 0.5 d_a = cosmo.eval_da(redshift) * 1.e3 #kpc truth = r_arcmin * (1.0 / 60.0) * (np.pi / 180.0) * d_a assert_allclose( utils.convert_units(r_arcmin, 'arcmin', 'kpc', redshift, cosmo), truth, **TOLERANCE) # Test conversion both ways between angular and physical units # Using astropy, circular now but this will be fine since we are going to be # swapping to CCL soon and then its kosher r_kpc, redshift = 20.0, 0.5 # d_a = cosmo.angular_diameter_distance(redshift).to('kpc').value d_a = cosmo.eval_da(redshift) * 1.e3 #kpc truth = r_kpc * (1.0 / d_a) * (180. / np.pi) * 60. assert_allclose( utils.convert_units(r_kpc, 'kpc', 'arcmin', redshift, cosmo), truth, **TOLERANCE)
def modele_determination(bin_center, radial_unit, lens_redshift, mass, profile_type, dict_profile, clmm_cosmo, conc=3.0, delta_mdef=200, zinf=1e10, verbose=True): """Computes the model at the position of the bin_center. This is not precise enough (biased) when their is only few galaxies per bin. Rather take the mean radius of the galaxies in the bin (not yet implemented). 'conc', the concentration, can be a float for a fixed value or an array with the same size as the mass in case each concentration apply to a different mass.""" ########################### #PATCH TO AVOID CLMM TO CRASH if np.any(mass<0) or np.any(conc<0): model_inf = np.ones(bin_center.size)*np.inf if verbose: print("No negative value allowed") ########################### else: if profile_type not in dict_profile.keys(): print("Wrong profile type") rad_Mpc = utils.convert_units(bin_center, radial_unit, 'Mpc', lens_redshift, clmm_cosmo) if isinstance(mass, (list, tuple, np.ndarray)) and not isinstance(conc, (list, tuple, np.ndarray)) : conc = np.ones(len(mass)) * conc elif not isinstance(mass, (list, tuple, np.ndarray)) and isinstance(conc, (list, tuple, np.ndarray)): mass = np.ones(len(conc)) * mass if profile_type != "redshift depth contrast" and profile_type != "density contrast": if isinstance(mass, (list, tuple, np.ndarray)): model_inf = np.zeros((rad_Mpc.size, len(mass))) for i in range(len(mass)): model_inf[:,i] = dict_profile[profile_type]['model_arg'] * \ dict_profile[profile_type]['model_func'](rad_Mpc, mdelta=mass[i], cdelta=conc[i], z_cluster=lens_redshift, z_source=zinf, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane') else: model_inf = dict_profile[profile_type]['model_arg'] * \ dict_profile[profile_type]['model_func'](rad_Mpc, mdelta=mass, cdelta=conc, z_cluster=lens_redshift, z_source=zinf, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane') model = compute_Bs_mean(lens_redshift, zinf, dict_profile[profile_type]['source_pdz'], clmm_cosmo) * model_inf return model if profile_type == "density contrast": func = dict_profile[profile_type]['source_pdz'] zmin, zmax, nz = 0.001, 5, 10000 zint = np.linspace(zmin, zmax, nz) zrand = np.random.choice(zint, 1000, p=func(zint)/np.sum(func(zint))) if isinstance(mass, (list, tuple, np.ndarray)): model = np.zeros((rad_Mpc.size, len(mass))) for i in range(rad_Mpc.size): for j in range(len(mass)): #dict_profile[profile_type]['model_arg'] * \ model[i,j] = np.mean(dict_profile[profile_type]['model_func'](rad_Mpc[i], mdelta=mass[j], cdelta=conc[j], z_cluster=lens_redshift, z_source=zrand, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane'))**(dict_profile[profile_type]['model_arg']-1) - 1 else : model = np.zeros((rad_Mpc.size)) for i in range(rad_Mpc.size): #dict_profile[profile_type]['model_arg'] * \ model[i] = np.mean(dict_profile[profile_type]['model_func'](rad_Mpc[i], mdelta=mass, cdelta=conc, z_cluster=lens_redshift, z_source=zrand, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane'))**(dict_profile[profile_type]['model_arg']-1) - 1 return model if profile_type == "redshift depth contrast": func = dict_profile[profile_type]['source_pdz'] zmin, zmax, nz = 0.001, 5, 10000 z = np.linspace(zmin, zmax, nz) if isinstance(mass, (list, tuple, np.ndarray)): model_z = np.zeros((rad_Mpc.size, len(mass), z.size)) for i in range(rad_Mpc.size): for j in range(len(mass)): #dict_profile[profile_type]['model_arg'] * \ model_z[i,j,:] = dict_profile[profile_type]['model_func'](rad_Mpc[i], mdelta=mass[j], cdelta=conc[j], z_cluster=lens_redshift, z_source=z, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane')**(dict_profile[profile_type]['model_arg']-1) else : model_z = np.zeros((rad_Mpc.size, z.size)) for i in range(rad_Mpc.size): #dict_profile[profile_type]['model_arg'] * \ model_z[i,:] = dict_profile[profile_type]['model_func'](rad_Mpc[i], mdelta=mass, cdelta=conc, z_cluster=lens_redshift, z_source=z, cosmo= clmm_cosmo, delta_mdef=delta_mdef, halo_profile_model='nfw', z_src_model='single_plane')**(dict_profile[profile_type]['model_arg']-1) ax = len(np.shape(model_z))-1 z_cut = dict_profile[profile_type]['delta_z_cut'] + lens_redshift if len(model_z.shape)>2: model_z_cut = model_z[:,:,np.where(z>z_cut)[0]] else: model_z_cut = model_z[:,np.where(z>z_cut)[0]] zmean_mu = (np.sum(z[z>z_cut]*func(z[z>z_cut])*model_z_cut, axis=ax)/np.sum(func(z[z>z_cut]) * model_z_cut, axis=ax)) zmean_tot = np.sum(z[z>z_cut]*func(z[z>z_cut]))/np.sum(func(z[z>z_cut])) model = zmean_mu/zmean_tot - 1 return model, model_z