def get_w_interpolation(self, z_vals=None, recompute=False, doInverse=False): if recompute: if z_vals is None: z_vals = self.__z_vals w_vals = self.w(z_vals) np.save("../InterpolationData/comovingDistance", (z_vals, w_vals)) elif self.__wz_interp is None: z_vals, w_vals = np.load( "../InterpolationData/comovingDistance.npy") if self.__wz_interp is None or recompute: self.__wz_interp = interpolate(z_vals, w_vals) self.__zw_interp = interpolate(w_vals, z_vals) if doInverse: return self.__wz_interp, self.__zw_interp else: return self.__wz_interp
def get_t_interpolation(self, z_vals=None, recompute=False, doInverse=False, save=True): if recompute: if z_vals is None: z_vals = self.__z_vals t_vals = self.t(z_vals) if save: np.save("../InterpolationData/time", (z_vals, t_vals)) elif self.__tz_interp is None: z_vals, t_vals = np.load("../InterpolationData/time.npy") if self.__tz_interp is None or recompute: self.__tz_interp = interpolate(z_vals, t_vals) self.__zt_interp = interpolate(np.flip(t_vals, 0), np.flip(z_vals, 0)) if doInverse: return self.__tz_interp, self.__zt_interp else: return self.__tz_interp
def get_g_camb(self): import camb dw_dz = lambda z: 1 / (self.a0 * self.H0 * self.E(z)) z_vis = np.linspace(0, 20, 5000) pars = camb.set_params(H0=100 * self.h, ombh2=self.Omega_Bh2, omch2=self.Omega0 * self.h**2 - self.Omega_Bh2, ns=self.n, tau=self.tau) data = camb.get_background(pars) back_ev = data.get_background_redshift_evolution(z_vis, ['x_e', 'visibility'], format='array') g_interpolation = glueAsymptode( interpolate(z_vis, back_ev[:, 1]), min=0.1, minAsym=lambda z: interpolate(z_vis, back_ev[:, 1])(0.1) * (1 + z)**2) norm = self.__num.integrate(lambda z: g_interpolation(z) * dw_dz(z), min(z_vis), max(z_vis)) g_func = lambda z: (1 - np.exp(-self.tau)) / norm * g_interpolation(z) return g_func
def get_D_interpolation(self, z_vals=None, recompute=False, save=True): if recompute: if z_vals is None: z_vals = self.__z_vals d_vals = self.D(z_vals) if save: np.save("../InterpolationData/growthFactor", (z_vals, d_vals)) elif self.__dz_interp is None: z_vals, d_vals = np.load("../InterpolationData/growthFactor.npy") if self.__dz_interp is None or recompute: self.__dz_interp = interpolate(z_vals, d_vals) return self.__dz_interp
def run_mpv(log_path, result_path, file_root, physics_params, window, minMass, z_vals, doUnbiased=True, high_res=True, high_res_multi=5): with open(log_path, "a+", buffering=1) as log_file: with SaveOutput(log_file): try: startMPV = time.time() P_CAMB = np.loadtxt(file_root + '_matterpower_out.dat') p_interp = interpolate(P_CAMB[:, 0] * physics_params.h, P_CAMB[:, 1] / physics_params.h ** 3, physics_params.P_cdm, physics_params.P_cdm) gInterpolator = GrowthInterpolation(file_root, physics=physics_params) mpv = mean_pairwise_velocity(p_interp, gInterpolator, kmin=min(P_CAMB[:, 0] * physics_params.h), kmax=max(P_CAMB[:, 0] * physics_params.h), mMin=minMass, mMax=1e16, physics=physics_params, AXIONS=True, jenkins_mass=True, window_function=window) v_vals_array = [] r_vals = [] if doUnbiased: v_vals_unbiased_array=[] for z in z_vals: if doUnbiased: rs, vs, vs_unbiased = mpv.compute(z, do_unbiased=doUnbiased, high_res=high_res, high_res_multi=high_res_multi) v_vals_unbiased_array.append(vs_unbiased) else: rs, vs = mpv.compute(z, do_unbiased=doUnbiased, high_res=high_res, high_res_multi=high_res_multi) v_vals_array.append(vs) r_vals = rs if doUnbiased: np.save(result_path, (r_vals, v_vals_array, v_vals_unbiased_array)) return (r_vals, v_vals_array, v_vals_unbiased_array) else: np.save(result_path, (r_vals, v_vals_array)) return (r_vals, v_vals_array) print("Time taken for mean pairwise velocity computation: {:.2f} s\n".format(time.time() - startMPV)) print("___\n") except OSError as ex: print(str(ex) + "\n") print("Velocity Spectra did not compute because CAMB failed!\n") return False except Exception as ex: print(str(ex) + "\n") print("Something went wrong in process {}!\n") return False
def read_in_H(self, a_vals, H_vals_normalized): if np.any(H_vals_normalized[np.where(a_vals == 1.0)[0]] != 1): raise ValueError( "The data read in for H does not seem to be normalized!") self.__H_interp = interpolate(a_vals, H_vals_normalized)
delta_r = 2.0 r_vals = np.arange(20.0, 180.0, delta_r) / phys.h delta_r /= phys.h zmin = zmin_vals[stage] zmax = zmax_vals[stage] Nz = z_bin_no[stage] z_step = (zmax - zmin) / Nz z_vals = np.linspace(zmin + z_step / 2, zmax - z_step / 2, Nz) print("Getting covariance matrix.") start_cov = time.time() filename = "../../data/axion_frac=0.000_matterpower_out.dat" P_CAMB = np.loadtxt(filename) p_interp = interpolate(P_CAMB[:, 0] * phys.h, P_CAMB[:, 1] / phys.h**3, phys.P_cdm, phys.P_cdm) cov_path = "covariance_matrix_{}_{}.dat".format( ['top-hat', 'gaussian', 'sharp-k', 'no-filter'][window], ["stageII", "stageIII", "stageIV"][stage]) try: cov = Cov.load_covariance(cov_path) except (IOError, OSError) as e: print("Covariance matrix not found. Recomputing...") cov = Cov(p_interp, None, zmin, zmax, Nz, r_vals, delta_r,
def compute(self, z, do_unbiased=False, high_res=False, high_res_multi=2, diagnostic=False): """ :type z: float :type do_unbiased: bool :type high_res: bool :type high_res_multi: int """ # do_unbiased: whether to compute unbiased spectra as well # high_res : compute correlations functions with hiher resolution # high_res_multi:multiplier by which to increase the number of intenrgation points for the computation of correlations functions a = 1 / (1 + z) masses = np.logspace( np.log10(self.__mMin) - 0.5, np.log10(self.__mMax) + 0.5, 300) print("Computing variance of the mass distribution...") sigma_sq = np.vectorize( lambda r: mean_pairwise_velocity.sigma_mass_distribution_sq( r, a, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function))( self.__radius_of_mass(masses)) sigma_sq_0 = np.vectorize( lambda r: mean_pairwise_velocity.sigma_mass_distribution_sq( r, 1.0, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function))( self.__radius_of_mass(masses)) sigma8 = np.sqrt( mean_pairwise_velocity.sigma_mass_distribution_sq( 8 / self.__phys.h, 1, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function)) print(sigma8) sigma_sq_log_interp = interpolate(np.log(masses), np.log(sigma_sq)) sigma_sq_interp = lambda m: np.exp(sigma_sq_log_interp(np.log(m))) sigma_sq_0_log_interp = interpolate(np.log(masses), np.log(sigma_sq_0)) sigma_sq_0_interp = lambda m: np.exp(sigma_sq_0_log_interp(np.log(m))) if self.__jenkins_mass: mass_function = self.jenkins_mass_function else: mass_function = self.press_schechter_mass_function k_vals = np.logspace(np.log10(self.__kmin), np.log10(self.__kmax), 300) print("Computing halo bias moments...") halo_bias_moments = [np.zeros(len(k_vals)), np.zeros(len(k_vals))] halo_bias_moments[0][:] = np.array( list( map( lambda k: self.mass_averaged_halo_bias( k, 1, lambda m: self.halo_bias(m, sigma_sq_interp, sigma_sq_0_interp), lambda m: mass_function(m, sigma_sq_interp), mMin=self.__mMin, mMax=self.__mMax), k_vals))) halo_bias_moments[1][:] = np.array( list( map( lambda k: self.mass_averaged_halo_bias( k, 2, lambda m: self.halo_bias(m, sigma_sq_interp, sigma_sq_0_interp), lambda m: mass_function(m, sigma_sq_interp), mMin=self.__mMin, mMax=self.__mMax), k_vals))) # computation may fail when numerator is nuermcially zero; use last successful value as limit halo_bias_moments[0][np.isnan(halo_bias_moments[0])] = np.nanmin( halo_bias_moments[0]) halo_bias_moments[1][np.isnan(halo_bias_moments[1])] = np.nanmin( halo_bias_moments[1]) halo_bias_1_interp = interp1d(k_vals, halo_bias_moments[0], fill_value=(halo_bias_moments[0][0], halo_bias_moments[0][-1]), bounds_error=False) halo_bias_2_interp = interp1d(k_vals, halo_bias_moments[1], fill_value=(halo_bias_moments[1][0], halo_bias_moments[1][-1]), bounds_error=False) print("Computing two-point correlation functions...") r_vals = np.linspace(1e-3, 300, 550) correlation_func_q2_vals = [] correlation_func_q1_vals = [] if do_unbiased: correlation_func_q1_vals_unbiased = [] correlation_func_q2_vals_unbiased = [] if high_res: res_multiplier = high_res_multi else: res_multiplier = 1.0 if self.__AXIONS: # axion growth function f = lambda k: self.__dlogG_dlogA(k, a) correlation_func_vals = np.array( list( map( lambda r: mean_pairwise_velocity.correlation_func( r, a, self.__power_spectrum, self.__growth, self.__G0, halo_bias_1_interp, halo_bias_2_interp, self.__kmin, self.__kmax, f, N=1000 * res_multiplier), r_vals))) correlation_func_q1_vals, correlation_func_q2_vals = correlation_func_vals[:, 0], correlation_func_vals[:, 1] if do_unbiased: correlation_func_vals_unbiased = np.array( list( map( lambda r: mean_pairwise_velocity.correlation_func( r, a, self.__power_spectrum, self.__growth, self.__G0, lambda k: 1, lambda k: 1, self.__kmin, self.__kmax, f, N=1000 * res_multiplier), r_vals))) correlation_func_q1_vals_unbiased, correlation_func_q2_vals_unbiased = correlation_func_vals_unbiased[:, 0], correlation_func_vals_unbiased[:, 1] else: correlation_func_vals = np.array( list( map( lambda r: mean_pairwise_velocity.correlation_func( r, a, self.__power_spectrum, self.__growth, self.__G0, halo_bias_1_interp, halo_bias_2_interp, self.__kmin, self.__kmax, N=1000 * res_multiplier), r_vals))) correlation_func_q1_vals, correlation_func_q2_vals = correlation_func_vals[:, 0], correlation_func_vals[:, 1] if do_unbiased: correlation_func_vals_unbiased = np.array( list( map( lambda r: mean_pairwise_velocity.correlation_func( r, a, self.__power_spectrum, self.__growth, self.__G0, lambda k: 1, lambda k: 1, self.__kmin, self.__kmax, N=1000 * res_multiplier), r_vals))) correlation_func_q1_vals_unbiased, correlation_func_q2_vals_unbiased = correlation_func_vals_unbiased[:, 0], correlation_func_vals_unbiased[:, 1] print("Computing volume averaged correlation function ...") correlation_func_q1_interp = interp1d(r_vals, correlation_func_q1_vals) correlation_func_bar_vals = [] if do_unbiased: correlation_func_q1_interp_unbiased = interp1d( r_vals, correlation_func_q1_vals_unbiased) correlation_func_bar_vals_unbiased = [] correlation_func_bar_vals = np.vectorize( lambda r: mean_pairwise_velocity.correlation_func_bar( r, correlation_func_q1_interp, rmin=min(r_vals)))(r_vals) if do_unbiased: correlation_func_bar_vals_unbiased = np.vectorize( lambda r: mean_pairwise_velocity.correlation_func_bar( r, correlation_func_q1_interp_unbiased, rmin=min(r_vals)))( r_vals) if self.__AXIONS: v_vals = 2 / 3 * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array(correlation_func_bar_vals) / ( 1 + np.array(correlation_func_q2_vals)) if do_unbiased: v_vals_unbiased = 2 / 3 * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array( correlation_func_bar_vals_unbiased) / ( 1 + np.array(correlation_func_q2_vals_unbiased)) else: v_vals = 2 / 3 * self.__dlogG_dlogA( 0.0, a) * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array(correlation_func_bar_vals) / ( 1 + np.array(correlation_func_q2_vals)) if do_unbiased: v_vals_unbiased = 2 / 3 * self.__dlogG_dlogA( 0.0, a) * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array( correlation_func_bar_vals_unbiased ) / (1 + np.array(correlation_func_q2_vals_unbiased)) if diagnostic: return r_vals, v_vals, correlation_func_q1_vals, correlation_func_q2_vals, correlation_func_bar_vals, k_vals, halo_bias_moments, masses, sigma_sq, sigma_sq_0, mass_function( masses, sigma_sq_interp) if do_unbiased: return r_vals, v_vals, v_vals_unbiased else: return r_vals, v_vals
def pre_compute(self, z, high_res=False, high_res_multi=2): a = 1 / (1 + z) masses = np.logspace(1, 19, 300) print("Computing variance of the mass distribution...") sigma_sq = np.vectorize(lambda r: self.sigma_mass_distribution_sq( r, a, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function))( self.__radius_of_mass(masses)) sigma_sq_0 = np.vectorize(lambda r: self.sigma_mass_distribution_sq( r, 1.0, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function))( self.__radius_of_mass(masses)) sigma8 = np.sqrt( self.sigma_mass_distribution_sq( 8 / self.__phys.h, 1, self.__power_spectrum, self.__growth, self.__G0, kmin_log=np.log(self.__kmin), kmax_log=np.log(self.__kmax), window_function=self.__window_function)) print(sigma8) sigma_sq_log_interp = interpolate(np.log(masses), np.log(sigma_sq)) sigma_sq_interp = lambda m: np.exp(sigma_sq_log_interp(np.log(m))) sigma_sq_0_log_interp = interpolate(np.log(masses), np.log(sigma_sq_0)) sigma_sq_0_interp = lambda m: np.exp(sigma_sq_0_log_interp(np.log(m))) k_vals = np.logspace(np.log10(self.__kmin), np.log10(self.__kmax), 200) """ plt.figure() m_vals=np.logspace(12, 15, 100) plt.loglog(m_vals, np.array(list(map(lambda m: self.__mass_function(m, sigma_sq_interp), m_vals))*m_vals)) plt.loglog(m_vals, np.array(list(map(lambda m: self.jenkins_mass_function(m, sigma_sq_interp), m_vals))*m_vals)) plt.show() exit()""" print("Computing halo bias moments...") halo_bias_moments = [np.zeros(len(k_vals)), np.zeros(len(k_vals))] halo_bias_moments[0] = np.array( list( map( lambda k: self.mass_averaged_halo_bias( k, 1, lambda m: self.halo_bias(m, sigma_sq_interp, sigma_sq_0_interp), lambda m: self.__mass_function(m, sigma_sq_interp), mMin=self.__mMin, mMax=self.__mMax), k_vals))) halo_bias_moments[1] = np.array( list( map( lambda k: self.mass_averaged_halo_bias( k, 2, lambda m: self.halo_bias(m, sigma_sq_interp, sigma_sq_0_interp), lambda m: self.__mass_function(m, sigma_sq_interp), mMin=self.__mMin, mMax=self.__mMax), k_vals))) # computation may fail when numerator is numercially zero; use last successful value as limit halo_bias_moments[0][np.isnan(halo_bias_moments[0])] = np.nanmin( halo_bias_moments[0]) halo_bias_moments[1][np.isnan(halo_bias_moments[1])] = np.nanmin( halo_bias_moments[1]) halo_bias_1_interp = interp1d(k_vals, halo_bias_moments[0], fill_value=(halo_bias_moments[0][0], halo_bias_moments[0][-1]), bounds_error=False) halo_bias_2_interp = interp1d(k_vals, halo_bias_moments[1], fill_value=(halo_bias_moments[1][0], halo_bias_moments[1][-1]), bounds_error=False) print(halo_bias_1_interp(self.__kmax), halo_bias_1_interp(self.__kmin)) print("Computing two-point correlation functions...") r_vals = np.linspace(1e-3, 300, 400) correlation_func_q2_vals = [] correlation_func_q1_vals = [] if high_res: res_multiplier = high_res_multi else: res_multiplier = 1.0 if self.__AXIONS: correlation_func_vals = np.array( list( map( lambda r: self.correlation_func(r, a, self.__power_spectrum, self.__growth, self.__G0, halo_bias_1_interp, halo_bias_2_interp, min(k_vals), max(k_vals), self.__f, N=500 * res_multiplier ), r_vals))) correlation_func_q1_vals, correlation_func_q2_vals = correlation_func_vals[:, 0], correlation_func_vals[:, 1] else: correlation_func_vals = np.array( list( map( lambda r: self.correlation_func(r, a, self.__power_spectrum, self.__growth, self.__G0, halo_bias_1_interp, halo_bias_2_interp, min(k_vals), max(k_vals), N=500 * res_multiplier ), r_vals))) correlation_func_q1_vals, correlation_func_q2_vals = correlation_func_vals[:, 0], correlation_func_vals[:, 1] print("Computing volume averaged correlation function ...") correlation_func_q1_interp = interp1d(r_vals, correlation_func_q1_vals) correlation_func_q2_interp = interp1d(r_vals, correlation_func_q2_vals) correlation_func_bar_vals = [] correlation_func_bar_vals = np.vectorize( lambda r: self.correlation_func_bar( r, correlation_func_q1_interp, rmin=min(r_vals)))(r_vals) if self.__AXIONS: v_vals = 2 / 3 * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array(correlation_func_bar_vals) / ( 1 + np.array(correlation_func_q2_vals)) else: v_vals = 2 / 3 * self.__f(a) * 100 * self.__phys.h * self.__phys.E( z) * a * r_vals * np.array(correlation_func_bar_vals) / ( 1 + np.array(correlation_func_q2_vals)) v_interp = interp1d(r_vals, v_vals) return sigma_sq_interp, sigma_sq_0_interp, halo_bias_1_interp, halo_bias_2_interp, correlation_func_q1_interp, correlation_func_q2_interp, v_interp