def test_const_match(w0_test, cosmo_input): """test w_matcher gets constant w cosmology correct""" cosmo_start = cosmo_input[0] wm = cosmo_input[3] zs = cosmo_input[5] w_use_int = w0_test cosmo_match_a = cosmo_start.copy() cosmo_match_a['de_model'] = 'jdem' cosmo_match_a['w0'] = w_use_int cosmo_match_a['wa'] = 0. cosmo_match_a['w'] = w_use_int for i in range(0, 36): cosmo_match_a['ws36_' + str(i).zfill(2)] = w_use_int cosmo_match_b = cosmo_match_a.copy() cosmo_match_b['de_model'] = 'w0wa' cosmo_match_c = cosmo_match_a.copy() cosmo_match_c['de_model'] = 'constant_w' C_match_a = cp.CosmoPie(cosmology=cosmo_match_a, p_space='jdem') C_match_b = cp.CosmoPie(cosmology=cosmo_match_b, p_space='jdem') C_match_c = cp.CosmoPie(cosmology=cosmo_match_c, p_space='jdem') w_a = wm.match_w(C_match_a, zs) w_b = wm.match_w(C_match_b, zs) w_c = wm.match_w(C_match_c, zs) mult_a = wm.match_growth(C_match_a, zs, w_a) mult_b = wm.match_growth(C_match_b, zs, w_b) mult_c = wm.match_growth(C_match_c, zs, w_c) error_a_1 = np.linalg.norm(w_a - C_match_a.de_object.w_of_z(zs)) / w_a.size error_a_2 = np.linalg.norm(w_use_int - w_a) / zs.size error_b_1 = np.linalg.norm(w_b - C_match_b.de_object.w_of_z(zs)) / w_b.size error_b_2 = np.linalg.norm(w_use_int - w_b) / zs.size error_c_1 = np.linalg.norm(w_c - C_match_c.de_object.w_of_z(zs)) / w_c.size error_c_2 = np.linalg.norm(w_use_int - w_c) / zs.size #should usually do more like 1.e-12 or better if no grid issues atol_loc = 1.e-10 assert error_a_1 < atol_loc assert error_a_2 < atol_loc assert error_b_1 < atol_loc assert error_b_2 < atol_loc assert error_c_1 < atol_loc assert error_c_2 < atol_loc assert np.allclose(w_a, C_match_a.de_object.w_of_z(zs)) assert np.allclose(w_b, C_match_b.de_object.w_of_z(zs)) assert np.allclose(w_c, C_match_c.de_object.w_of_z(zs)) assert np.allclose(w_a, w_use_int) assert np.allclose(w_b, w_use_int) assert np.allclose(w_c, w_use_int) assert np.allclose(mult_a, mult_b) assert np.allclose(mult_a, mult_c) assert np.allclose(mult_b, mult_c)
def __init__(self,C_fid,wmatcher_params): """ C_fid: the fiducial CosmoPie, w(z) irrelevant because it will be ignored wmatcher_params: w_step: resolution of w grid w_min: minimum w to consider w_max: maximum w to consider a_step: resolution of a grid a_min: minimum a to consider a_max: maximum a to consider """ #appears only use of C_fid to extract cosmology #CosmoPie appears to be used to extract G_norm,G, and Ez self.C_fid = C_fid self.cosmo_fid = self.C_fid.cosmology.copy() self.cosmo_fid['w'] = -1. self.cosmo_fid['de_model'] = 'constant_w' self.w_step = wmatcher_params['w_step'] self.w_min = wmatcher_params['w_min'] self.w_max = wmatcher_params['w_max'] self.ws = np.arange(self.w_min,self.w_max,self.w_step) self.n_w = self.ws.size self.a_step = wmatcher_params['a_step'] self.a_min = wmatcher_params['a_min'] self.a_max = wmatcher_params['a_max'] self.a_s = np.arange(self.a_max,self.a_min-self.a_step/10.,-self.a_step) self.n_a = self.a_s.size self.zs = 1./self.a_s-1. self.cosmos = np.zeros(self.n_w,dtype=object) self.integ_Es = np.zeros((self.n_w,self.n_a)) self.Gs = np.zeros((self.n_w,self.n_a)) for i in range(0,self.n_w): self.cosmos[i] = self.cosmo_fid.copy() self.cosmos[i]['w'] = self.ws[i] C_i = cp.CosmoPie(cosmology=self.cosmos[i],p_space=self.cosmo_fid['p_space'],silent=True) E_as = C_i.Ez(self.zs) self.integ_Es[i] = cumtrapz(1./(self.a_s**2*E_as)[::-1],self.a_s[::-1],initial=0.) self.Gs[i] = C_i.G(self.zs) self.G_interp = RectBivariateSpline(self.ws,self.a_s[::-1],self.Gs[:,::-1],kx=3,ky=3) self.ind_switches = np.argmax(np.diff(self.integ_Es,axis=0)<0,axis=0)+1 #there is a purely numerical issue that causes the integral to be non-monotonic, this loop eliminates the spurious behavior for i in range(1,self.n_a): if self.ind_switches[i]>1: if self.integ_Es[self.ind_switches[i]-1,i]-self.integ_Es[0,i]>=0: self.integ_Es[0:(self.ind_switches[i]-1),i] = self.integ_Es[self.ind_switches[i]-1,i] else: raise RuntimeError( "Nonmonotonic integral, solution is not unique at "+str(self.a_s[i])) self.integ_E_interp = RectBivariateSpline(self.ws,self.a_s[::-1],self.integ_Es,kx=3,ky=3)
def test_jdem_w0wa_match(w0_test, wa_test, cosmo_input): """check w_matcher gets w0wa cosmology compatible with binned w(z) cosmology""" cosmo_start = cosmo_input[0] wm = cosmo_input[3] zs = cosmo_input[5] w0_use = w0_test wa_use = wa_test cosmo_match_w0wa = cosmo_start.copy() cosmo_match_jdem = cosmo_start.copy() cosmo_match_w0wa['de_model'] = 'w0wa' cosmo_match_w0wa['w0'] = w0_use cosmo_match_w0wa['wa'] = wa_use cosmo_match_w0wa['w'] = w0_use #+0.9*wa_use cosmo_match_jdem['de_model'] = 'jdem' cosmo_match_jdem['w'] = w0_use + 0.9 * wa_use a_jdem = 1. - 0.025 * np.arange(0, 36) for i in range(0, 36): cosmo_match_jdem[ 'ws36_' + str(i).zfill(2)] = w0_use + (1. - (a_jdem[i] - 0.025 / 2.)) * wa_use C_match_w0wa = cp.CosmoPie(cosmology=cosmo_match_w0wa, p_space='jdem') C_match_jdem = cp.CosmoPie(cosmology=cosmo_match_jdem, p_space='jdem') w_w0wa = wm.match_w(C_match_w0wa, zs) w_jdem = wm.match_w(C_match_jdem, zs) mult_w0wa = wm.match_growth(C_match_w0wa, zs, w_w0wa) mult_jdem = wm.match_growth(C_match_jdem, zs, w_jdem) error_w0wa_jdem = np.linalg.norm(w_w0wa - w_jdem) / w_jdem.size error_mult_w0wa_jdem = np.linalg.norm(mult_w0wa - mult_jdem) / w_jdem.size assert error_w0wa_jdem < 4.e-4 assert error_mult_w0wa_jdem < 1.e-3
def test_change_params(): """test rotation function work""" C_fid = cp.CosmoPie(defaults.cosmology.copy(),'jdem') f_set_in1 = np.zeros(3,dtype=object) for i in range(0,3): f_set1 = np.random.rand(6,6) f_set1 = np.dot(f_set1.T,f_set1) f_set1 = f_set1+np.diag(np.random.rand(6)) f_set_in1[i] = f_set1 f_set_in2 = rotate_jdem_to_lihu(f_set_in1,C_fid) f_set_in3 = rotate_lihu_to_jdem(f_set_in2,C_fid) f_set_in4 = rotate_jdem_to_lihu(f_set_in3,C_fid) for i in range(0,3): assert np.allclose(f_set_in1[i],f_set_in3[i]) assert np.allclose(f_set_in2[i],f_set_in4[i])
def test_vary_1_parameter(param_set, param_vary): """test variation of parameters for halofit and linear match expectations from camb""" atol_rel = 1.e-8 rtol = 3.e-3 eps = 0.1 camb_params = defaults.camb_params.copy() camb_params['force_sigma8'] = param_set[0] camb_params['leave_h'] = param_set[1] power_params = defaults.power_params.copy() power_params.camb = camb_params cosmo_fid = defaults.cosmology.copy() if param_set[2] == 'halofit': nonlinear_model = camb.model.NonLinear_both else: nonlinear_model = camb.model.NonLinear_none if isinstance(cosmo_fid[param_vary], float): cosmo_pert = cosmo_fid.copy() cosmo_pert[param_vary] *= (1. + eps) print(cosmo_pert['Omegar']) C_pert = cp.CosmoPie(cosmo_pert, p_space='jdem') P_pert = mps.MatterPower(C_pert, power_params) k_pert = P_pert.k C_pert.k = k_pert P_res1 = P_pert.get_matter_power(np.array([0.]), pmodel=param_set[2])[:, 0] k_res2, P_res2 = camb_pow(C_pert.cosmology, zbar=np.array([0.]), camb_params=camb_params, nonlinear_model=nonlinear_model) atol_power = np.max(P_res1) * atol_rel atol_k = np.max(k_pert) * atol_rel assert np.allclose(k_res2, k_pert, atol=atol_k, rtol=rtol) assert np.allclose(P_res1, P_res2, atol=atol_power, rtol=rtol)
def cosmo_input(): """get cosmology for test""" cosmo_start = defaults.cosmology.copy() cosmo_start['w'] = -1 cosmo_start['de_model'] = 'constant_w' C_start = cp.CosmoPie(cosmology=cosmo_start, p_space='jdem') #base set #params = {'w_step':0.005,'w_min':-3.50,'w_max':0.1,'a_step':0.001,'a_min':0.000916674,'a_max':1.00} #mod se params = { 'w_step': 0.005, 'w_min': -3.5, 'w_max': 0.1, 'a_step': 0.001, 'a_min': 0.000916674, 'a_max': 1.00 } wm = WMatcher(C_start, params) a_grid = np.arange(1.00, 0.001, -0.01) zs = 1. / a_grid - 1. return [cosmo_start, C_start, params, wm, a_grid, zs]
def test_casarini_match(cosmo_input): """check code matches results extracted from a figure""" cosmo_start = cosmo_input[0] wm = cosmo_input[3] #zs = cosmo_input[5] #should match arXiv:1601.07230v3 figure 2 cosmo_match_a = cosmo_start.copy() cosmo_match_a['de_model'] = 'w0wa' cosmo_match_a['w0'] = -1.2 cosmo_match_a['wa'] = 0.5 cosmo_match_a['w'] = -1.2 cosmo_match_b = cosmo_match_a.copy() cosmo_match_b['w0'] = -0.6 cosmo_match_b['wa'] = -1.5 cosmo_match_b['w'] = -0.6 C_match_a = cp.CosmoPie(cosmology=cosmo_match_a, p_space='jdem') C_match_b = cp.CosmoPie(cosmology=cosmo_match_b, p_space='jdem') weff_2 = np.loadtxt('test_inputs/wmatch/weff_2.dat') ws_a_pred = weff_2[:, 1] zs_a_pred = weff_2[:, 0] weff_1 = np.loadtxt('test_inputs/wmatch/weff_1.dat') ws_b_pred = weff_1[:, 1] zs_b_pred = weff_1[:, 0] z_max_a = np.max(zs_a_pred) z_max_b = np.max(zs_b_pred) z_min_a = np.min(zs_a_pred) z_min_b = np.min(zs_b_pred) zs_a = np.arange(z_min_a, z_max_a, 0.05) zs_b = np.arange(z_min_b, z_max_b, 0.05) ws_a = wm.match_w(C_match_a, zs_a) ws_b = wm.match_w(C_match_b, zs_b) ws_a_interp = InterpolatedUnivariateSpline(zs_a_pred, ws_a_pred, k=1, ext=2)(zs_a) ws_b_interp = InterpolatedUnivariateSpline(zs_b_pred, ws_b_pred, k=1, ext=2)(zs_b) ws_a = wm.match_w(C_match_a, zs_a) ws_b = wm.match_w(C_match_b, zs_b) mse_w_a = np.linalg.norm( (ws_a - ws_a_interp) / ws_a_interp) / ws_a_interp.size mse_w_b = np.linalg.norm( (ws_b - ws_b_interp) / ws_b_interp) / ws_b_interp.size print(mse_w_a, mse_w_b) assert mse_w_a < 7.e-4 assert mse_w_b < 7.e-3 sigma_a_in = np.loadtxt('test_inputs/wmatch/sigma_2.dat') sigma_b_in = np.loadtxt('test_inputs/wmatch/sigma_1.dat') zs_a_pred_2 = sigma_a_in[:, 0] zs_b_pred_2 = sigma_a_in[:, 0] z_max_a_2 = np.max(zs_a_pred_2) z_max_b_2 = np.max(zs_b_pred_2) z_min_a_2 = np.min(zs_a_pred_2) z_min_b_2 = np.min(zs_b_pred_2) zs_a_2 = np.arange(z_min_a_2, z_max_a_2, 0.05) zs_b_2 = np.arange(z_min_b_2, z_max_b_2, 0.05) pow_mults_a = wm.match_growth(C_match_a, zs_a_2, ws_a) pow_mults_b = wm.match_growth(C_match_b, zs_b_2, ws_b) sigma_a_interp = InterpolatedUnivariateSpline(sigma_a_in[:, 0], sigma_a_in[:, 1], k=3, ext=2)(zs_a_2) sigma_b_interp = InterpolatedUnivariateSpline(sigma_b_in[:, 0], sigma_b_in[:, 1], k=3, ext=2)(zs_b_2) sigma_a = np.sqrt(pow_mults_a) * 0.83 sigma_b = np.sqrt(pow_mults_b) * 0.83 mse_sigma_a = np.linalg.norm( (sigma_a - sigma_a_interp) / sigma_a_interp) / sigma_a_interp.size mse_sigma_b = np.linalg.norm( (sigma_b - sigma_b_interp) / sigma_b_interp) / sigma_b_interp.size print(mse_sigma_a, mse_sigma_b) assert mse_sigma_a < 5.e-4 assert mse_sigma_b < 5.e-4
print(mse_sigma_a, mse_sigma_b) assert mse_sigma_a < 5.e-4 assert mse_sigma_b < 5.e-4 if __name__ == '__main__': do_test_battery = False do_other_tests = True if do_test_battery: pytest.cmdline.main(['w_matcher_tests.py']) if do_other_tests: cosmo_start = defaults.cosmology.copy() cosmo_start['w'] = -1 cosmo_start['de_model'] = 'constant_w' C_start = cp.CosmoPie(cosmology=cosmo_start, p_space='jdem') params = { 'w_step': 0.01, 'w_min': -3.50, 'w_max': 0.1, 'a_step': 0.001, 'a_min': 0.000916674, 'a_max': 1.00 } do_convergence_test_w0wa = False do_convergence_test_jdem = False do_match_casarini = True do_plots = True fails = 0
def test_hmf(): """run various hmf tests as a block""" cosmo_fid = defaults.cosmology.copy() cosmo_fid['h'] = 0.65 cosmo_fid['Omegamh2'] = 0.148 cosmo_fid['Omegabh2'] = 0.02 cosmo_fid['OmegaLh2'] = 0.65 * 0.65**2 cosmo_fid['sigma8'] = 0.92 cosmo_fid['ns'] = 1. cosmo_fid = cp.add_derived_pars(cosmo_fid, p_space='basic') power_params = defaults.power_params.copy() power_params.camb['force_sigma8'] = True power_params.camb['npoints'] = 1000 power_params.camb['maxkh'] = 20000. power_params.camb['kmax'] = 100. #0.899999976158 power_params.camb['accuracy'] = 2. C = cp.CosmoPie(cosmo_fid, 'basic') P = mps.MatterPower(C, power_params) C.set_power(P) params = defaults.hmf_params.copy() params['z_min'] = 0.0 params['z_max'] = 5.0 params['log10_min_mass'] = 6 params['log10_max_mass'] = 18.63 params['n_grid'] = 1264 params['n_z'] = 5. / 0.5 hmf = ST_hmf(C, params=params) Ms = hmf.mass_grid do_sanity_checks = True if do_sanity_checks: print("sanity") #some sanity checks zs = np.arange(0.001, 5., 0.1) Gs = C.G_norm(zs) #arbitrary input M(z) cutoff m_z_in = np.exp(np.linspace(np.log(np.min(Ms)), np.log(1.e15), zs.size)) m_z_in[0] = Ms[0] #check normalized to unity (all dark matter is in some halo) # f_norm_residual = trapz(hmf.f_sigma(Ms,Gs).T,np.log(hmf.sigma[:-1:]**-1),axis=1) #assert np.allclose(np.zeros(Gs.size)+1.,norm_residual) _, _, dndM = hmf.mass_func(Ms, Gs) n_avgs_alt = np.zeros(zs.size) bias_n_avgs_alt = np.zeros(zs.size) dndM_G_alt = np.zeros((Ms.size, zs.size)) for i in range(0, zs.size): n_avgs_alt[i] = hmf.n_avg(m_z_in[i], zs[i]) bias_n_avgs_alt[i] = hmf.bias_n_avg(m_z_in[i], zs[i]) dndM_G_alt[:, i] = hmf.dndM_G(Ms, Gs[i]) dndM_G = hmf.dndM_G(Ms, Gs) #consistency checks for vector method assert np.allclose(dndM_G, dndM_G_alt) n_avgs = hmf.n_avg(m_z_in, zs) bias_n_avgs = hmf.bias_n_avg(m_z_in, zs) assert np.allclose(n_avgs, n_avgs_alt) assert np.all(n_avgs >= 0.) assert np.allclose(bias_n_avgs, bias_n_avgs_alt) #check integrating dn/dM over all M actually gives n assert np.allclose(trapz(dndM.T, Ms, axis=1), hmf.n_avg(np.zeros(zs.size) + Ms[0], zs)) test_xs = np.outer(hmf.nu_of_M(Ms), 1. / Gs**2) test_integrand = hmf.f_sigma(Ms, Gs) * hmf.bias_G( Ms, Gs, hmf.bias_norm(Gs)) #not sure why, but this is true (if ignore h) #test_term = np.trapz(test_integrand,test_xs,axis=0)*hmf.f_norm(Gs) #assert np.allclose(1.,test_term,rtol=1e-3) b_norm_residual = np.trapz(test_integrand, test_xs, axis=0) assert np.allclose(np.zeros(zs.size) + 1., b_norm_residual) #b_norm_residual_alt = np.trapz(hmf.f_sigma(Ms,Gs)*hmf.bias_G(Ms,Gs),test_xs,axis=0) b_norm_residual_alt2 = np.trapz(hmf.f_sigma(Ms, Gs) * hmf.bias_G(Ms, Gs, 1.), test_xs, axis=0) #check including norm_in behaves appropriately does not matter #assert np.allclose(b_norm_residual_alt,b_norm_residual_alt2) assert np.allclose(b_norm_residual, b_norm_residual_alt2 / hmf.bias_norm(Gs)) #hcekc all the matter in a halo if include normalization factor assert np.allclose( np.trapz(hmf.f_sigma(Ms, 1., 1.), np.log(1. / hmf.sigma[:-1:])), hmf.f_norm(1.)) #sanity check M_star assert np.round(np.log10(hmf.M_star())) == 13. assert np.isclose(C.sigma_r(0., 8. / C.h), cosmo_fid['sigma8'], rtol=1.e-3) assert np.isclose(C.sigma_r(1., 8. / C.h), C.G_norm(1.) * cosmo_fid['sigma8'], rtol=1.e-3) #M_restrict = 10**np.linspace(np.log10(4.*10**13),16,100) #n_restrict = hmf.n_avg(M_restrict,0.) nu = hmf.nu_of_M(Ms) bias_nu = hmf.bias_nu(nu) bias_G = hmf.bias_G(Ms, 1.) bias_z = hmf.bias(Ms, 0.) assert np.allclose(bias_nu, bias_G) assert np.allclose(bias_nu, bias_z) assert np.allclose(bias_G, bias_z) assert np.all(bias_nu >= 0) # dndm = hmf.dndM_G(Ms,1.) # bias_avg = np.trapz(bias_nu*dndm,Ms) n_avg2 = hmf.n_avg(Ms, 0.) assert np.all(n_avg2 >= 0.) # bias_n_avg1 = bias_nu*n_avg2 #bias_n_avg2 = hmf.bias_n_avg(Ms) # integ_pred = np.trapz(hmf.f_sigma(Ms,1.,1.),np.log(1./hmf.sigma[:-1:])) # integ_res = np.trapz(Ms*hmf.dndM_G(Ms,1.),Ms)/C.rho_bar(0.) #assert np.isclose(integ_res,integ_pred,rtol=1.e-2) #xs n#np.linspace(np.log(np.sqrt(nu[0])),np.log(nu[-1]),10000) #xs = np.exp(np.linspace(np.log(np.sqrt(nu[0]),np.log(1.e36),10000)) #F = -cumtrapz(1./np.sqrt(2.*np.pi)*np.exp(-xs[::-1]**2/2.),xs[::-1])[::-1] #cons_res = np.trapz(hmf.dndM_G(Ms,1.)*Ms*hmf.bias(Ms,1.),Ms)/C.rho_bar(0.) assert np.isclose(np.trapz(bias_nu * hmf.f_nu(nu), np.sqrt(nu)), 1., rtol=1.e-1) #assert np.isclose(np.trapz(hmf.f_nu(nu)/np.sqrt(nu),np.sqrt(nu)),1.,rtol=2.e-1) #bias_avg = np.trapz(hmf.dndM_G(Ms,1.)*hmf.bias_G(Ms,1.),Ms)/np.trapz(hmf.dndM_G(Ms,1.),Ms) #check for various edge cases of n_avg and bias_n_avg z2s = np.array([0., 1.]) m2s = np.array([1.e8, 1.e9]) n1 = hmf.n_avg(m2s[0], z2s[0]) n2 = hmf.n_avg(m2s[0], z2s) n3 = hmf.n_avg(m2s, z2s[0]) n4 = hmf.n_avg(m2s, z2s) n5 = hmf.n_avg(m2s[1], z2s[1]) n6 = hmf.n_avg(m2s[1], z2s[0]) n7 = hmf.n_avg(m2s[0], z2s[1]) assert np.isclose(n1, n2[0]) assert np.isclose(n1, n3[0]) assert np.isclose(n1, n4[0]) assert np.isclose(n5, n4[1]) assert np.isclose(n2[1], n7) assert np.isclose(n3[1], n6) bn1 = hmf.bias_n_avg(m2s[0], z2s[0]) bn2 = hmf.bias_n_avg(m2s[0], z2s) bn3 = hmf.bias_n_avg(m2s, z2s[0]) bn4 = hmf.bias_n_avg(m2s, z2s) bn5 = hmf.bias_n_avg(m2s[1], z2s[1]) bn6 = hmf.bias_n_avg(m2s[1], z2s[0]) bn7 = hmf.bias_n_avg(m2s[0], z2s[1]) assert np.isclose(bn1, bn2[0]) assert np.isclose(bn1, bn3[0]) assert np.isclose(bn1, bn4[0]) assert np.isclose(bn5, bn4[1]) assert np.isclose(bn2[1], bn7) assert np.isclose(bn3[1], bn6) n_avgs_0 = hmf.n_avg(hmf.mass_grid, 0.) n_avgs_1 = np.zeros(hmf.mass_grid.size) for itr in range(0, hmf.mass_grid.size): n_avgs_1[itr] = hmf.n_avg(hmf.mass_grid[itr], 0.) assert np.allclose(n_avgs_0, n_avgs_1) bn_avgs_0 = hmf.bias_n_avg(hmf.mass_grid, 0.) bn_avgs_1 = np.zeros(hmf.mass_grid.size) for itr in range(0, hmf.mass_grid.size): bn_avgs_1[itr] = hmf.bias_n_avg(hmf.mass_grid[itr], 0.) assert np.allclose(bn_avgs_0, bn_avgs_1) print("PASS: sanity passed") do_plot_test2 = True if do_plot_test2: #Ms = 10**(np.linspace(11,14,500)) # dndM_G=hmf.dndM_G(Ms,Gs) do_jenkins_comp = True #should look like dotted line in figure 3 of arXiv:astro-ph/0005260 if do_jenkins_comp: print("jenkins_comp") zs = np.array([0.]) Gs = C.G_norm(zs) dndM_G = hmf.f_sigma(Ms, Gs) input_dndm = np.loadtxt('test_inputs/hmf/dig_jenkins_fig3.csv', delimiter=',') res_j = np.exp(input_dndm[:, 1]) res_i = InterpolatedUnivariateSpline(1. / hmf.sigma[:-1:], dndM_G, k=3, ext=2)(np.exp(input_dndm[:, 0])) assert np.all(np.abs((res_j / res_i - 1.)[0:19]) < 0.03) print("PASS: jenkins_comp") do_sheth_bias_comp1 = True #agrees pretty well, maybe as well as it should #compare to rightmost arXiv:astro-ph/9901122 figure 3 if do_sheth_bias_comp1: print("sheth_bias_comp1") cosmo_fid2 = cosmo_fid.copy() cosmo_fid2['Omegamh2'] = 0.3 * 0.7**2 cosmo_fid2['Omegabh2'] = 0.05 * 0.7**2 cosmo_fid2['OmegaLh2'] = 0.7 * 0.7**2 cosmo_fid2['sigma8'] = 0.9 cosmo_fid2['h'] = 0.7 cosmo_fid2['ns'] = 1.0 cosmo_fid2 = cp.add_derived_pars(cosmo_fid2, p_space='basic') C2 = cp.CosmoPie(cosmo_fid2, 'basic') P2 = mps.MatterPower(C2, power_params) C2.set_power(P2) params2 = params.copy() hmf2 = ST_hmf(C2, params=params2) zs = np.array([0., 1., 2., 4.]) Gs = C2.G_norm(zs) input_bias = np.loadtxt('test_inputs/hmf/dig_sheth_fig3.csv', delimiter=',') bias = hmf2.bias_G(10**input_bias[:, 0], Gs) error = np.abs(1. - 10**input_bias[:, 1:5] / bias**2) error_max = np.array([0.06, 0.4, 0.2, 0.4]) assert np.all(np.max(error, axis=0) < error_max) print("PASS: sheth_bias_comp1") do_sheth_bias_comp2 = True #agrees well #compare to bottom arXiv:astro-ph/9901122 figure 4 if do_sheth_bias_comp2: print("sheth_bias_comp2") cosmo_fid2 = cosmo_fid.copy() cosmo_fid2['Omegamh2'] = 0.3 * 0.7**2 cosmo_fid2['Omegabh2'] = 0.05 * 0.7**2 cosmo_fid2['OmegaLh2'] = 0.7 * 0.7**2 cosmo_fid2['sigma8'] = 0.9 cosmo_fid2['h'] = 0.7 cosmo_fid2['ns'] = 1.0 cosmo_fid2 = cp.add_derived_pars(cosmo_fid2, p_space='basic') C2 = cp.CosmoPie(cosmo_fid2, 'basic') P2 = mps.MatterPower(C2, power_params) C2.set_power(P2) C2.k = P2.k params2 = params.copy() hmf2 = ST_hmf(C2, params=params2) input_bias = np.loadtxt('test_inputs/hmf/dig_sheth2.csv', delimiter=',') bias = hmf2.bias_nu(10**input_bias[:, 0]) observable = 1. + (bias - 1.) * hmf2.delta_c error = np.abs(1. - 10**input_bias[:, 1] / observable) error_max = 0.07 assert np.all(np.max(error) < error_max) print("PASS: sheth_bias_comp2") #should agree, does #match fig 2 arXiv:astro-ph/0203169 do_hu_bias_comp2 = True if do_hu_bias_comp2: print("hu_bias_comp2") zs = np.array([0.]) Gs = C.G_norm(zs) bias = hmf.bias_G(Ms, Gs, 1.)[:, 0] #why 1 #maybe should have k pivot 0.01 input_bias = np.loadtxt('test_inputs/hmf/dig_hu_bias2.csv', delimiter=',') # masses = 10**np.linspace(np.log10(10**11),np.log10(10**16),100) bias_hu_i = 10**input_bias[:, 1] #InterpolatedUnivariateSpline(10**input_bias[:,0],10**input_bias[:,1])(masses) bias_i = hmf.bias_G(10**input_bias[:, 0], Gs, 1.)[:, 0] assert np.max(np.abs(bias_hu_i / bias_i - 1.)) < 0.06 print("PASS: hu_bias_comp2 passed") #should agree, does #match fig 1 arXiv:astro-ph/0203169 do_hu_sigma_comp = True if do_hu_sigma_comp: print("hu_sigma_comp") assert np.isclose(hmf.M_star(), 1.2 * 10**13, rtol=1.e-1) input_sigma = np.loadtxt('test_inputs/hmf/dig_hu_sigma.csv', delimiter=',') res_sigma = InterpolatedUnivariateSpline(hmf.R, hmf.sigma, k=3, ext=2)(10**input_sigma[:, 0]) assert np.all( np.abs((res_sigma / 10**input_sigma[:, 1])[4::] - 1.) < 0.02) print("PASS: hu_sigma_comp")
def test_super_survey(de_model): """run some eigenvalue tests of SuperSurvey pipeline""" t1 = time() z_max = 1.35 l_max = 50 camb_params = defaults.camb_params.copy() camb_params['force_sigma8'] = False camb_params['kmax'] = 5. camb_params['npoints'] = 1000 cosmo_fid = defaults.cosmology_jdem.copy() cosmo_fid['w'] = -1. cosmo_fid['w0'] = cosmo_fid['w'] cosmo_fid['wa'] = 0. cosmo_fid['de_model'] = de_model if cosmo_fid['de_model'] == 'jdem': for i in range(0, 36): cosmo_fid['ws36_' + str(i).zfill(2)] = cosmo_fid['w'] C = cp.CosmoPie(cosmology=cosmo_fid, p_space='jdem') power_params = defaults.power_params.copy() power_params.camb = camb_params P = mps.MatterPower(C, power_params) C.set_power(P) r_max = C.D_comov(z_max) print('this is r max and l_max', r_max, l_max) phi1s = np.array([ -19., -19., -11., -11., 7., 25., 25., 43., 43., 50., 50., 50., 24., 5., 5., 7., 7., -19. ]) * np.pi / 180. theta1s = np.array([ -50., -35., -35., -19., -19., -19., -15.8, -15.8, -40., -40., -55., -78., -78., -78., -55., -55., -50., -50. ]) * np.pi / 180. + np.pi / 2. phi_in1 = 7. / 180. * np.pi theta_in1 = -35. * np.pi / 180. + np.pi / 2. theta0 = np.pi / 4. theta1 = 3. * np.pi / 4. phi0 = 0. phi1 = 3.074096023740458 phi2 = np.pi / 3. phi3 = phi2 + (phi1 - phi0) theta2s = np.array([theta0, theta1, theta1, theta0, theta0]) phi2s = np.array([phi0, phi0, phi1, phi1, phi0]) - phi1 / 2. theta_in2 = np.pi / 2. phi_in2 = 0. res_choose = 7 Theta1 = [theta0, theta1] Phi1 = [phi0, phi1] Theta2 = [theta0, theta1] Phi2 = [phi2, phi3] zs = np.array([0.2, 0.43, .63, 0.9, 1.3]) z_fine = np.linspace(0.001, np.max(zs), 2000) #use linspace use_poly = True use_poly2 = True if use_poly: if use_poly2: geo1 = PolygonGeo(zs, theta1s, phi1s, theta_in1, phi_in1, C, z_fine, l_max, defaults.polygon_params.copy()) geo2 = PolygonGeo(zs, theta2s, phi2s, theta_in2, phi_in2, C, z_fine, l_max, defaults.polygon_params.copy()) else: geo1 = PolygonPixelGeo(zs, theta1s, phi1s, theta_in1, phi_in1, C, z_fine, l_max, res_choose) geo2 = PolygonPixelGeo(zs, theta2s, phi2s, theta_in2, phi_in2, C, z_fine, l_max, res_choose) else: geo1 = RectGeo(zs, Theta1, Phi1, C, z_fine) geo2 = RectGeo(zs, Theta2, Phi2, C, z_fine) len_params = defaults.lensing_params.copy() len_params['pmodel'] = 'halofit' lenless_defaults = defaults.sw_survey_params.copy() lenless_defaults['needs_lensing'] = False if cosmo_fid['de_model'] == 'w0wa': cosmo_par_list = np.array( ['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs', 'w0', 'wa']) cosmo_par_eps = np.array( [0.002, 0.0005, 0.0001, 0.0005, 0.1, 0.01, 0.07]) elif cosmo_fid['de_model'] == 'constant_w': cosmo_par_list = np.array( ['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs', 'w']) cosmo_par_eps = np.array([0.002, 0.0005, 0.0001, 0.0005, 0.1, 0.01]) elif cosmo_fid['de_model'] == 'jdem': cosmo_par_list = ['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs'] cosmo_par_list.extend(cp.JDEM_LIST) cosmo_par_list = np.array(cosmo_par_list, dtype=object) cosmo_par_eps = np.full(41, 0.5) cosmo_par_eps[0:5] = np.array([0.002, 0.0005, 0.0001, 0.0005, 0.1]) else: raise ValueError('not prepared to handle ' + str(cosmo_fid['de_model'])) #note that currently (poorly implemented derivative) in jdem, #OmegaLh2 and LogAs are both almost completely unconstrained but nondegenerate, #while in basic, h and sigma8 are not constrained but are almost completely degenerate nz_params = defaults.nz_params_wfirst_lens.copy() from nz_wfirst import NZWFirst nz_matcher = NZWFirst(nz_params) len_params['smodel'] = 'nzmatcher' sw_params = defaults.sw_survey_params.copy() obs_list = defaults.sw_observable_list.copy() survey_1 = SWSurvey(geo1, 's1', C, sw_params, cosmo_par_list, cosmo_par_eps, obs_list, len_params, nz_matcher) surveys_sw = np.array([survey_1]) geos = np.array([geo1, geo2]) k_cut = 0.005 basis = SphBasisK(r_max, C, k_cut, defaults.basis_params.copy(), l_ceil=100) lw_param_list = defaults.lw_param_list.copy() lw_observable_list = defaults.lw_observable_list.copy() survey_3 = LWSurvey(geos, 'lw_survey1', basis, C, defaults.lw_survey_params.copy(), observable_list=lw_observable_list, param_list=lw_param_list) surveys_lw = np.array([survey_3]) print('main: this is r_max: ' + str(r_max)) SS = SuperSurvey(surveys_sw, surveys_lw, basis, C, defaults.prior_fisher_params.copy(), get_a=False, do_unmitigated=True, do_mitigated=True, include_sw=True) t2 = time() print("main: total run time " + str(t2 - t1) + " s") mit_eigs_sw = SS.eig_set[0, 1] no_mit_eigs_sw = SS.eig_set[0, 0] mit_eigs_par = SS.eig_set[1, 1] no_mit_eigs_par = SS.eig_set[1, 0] #TODO check eigenvalue interlace for this projection SS.print_standard_analysis() # print("main: unmitigated sw lambda1,2: "+str(no_mit_eigs_sw[0][-1])+","+str(no_mit_eigs_sw[0][-2])) # print("main: mitigated sw lambda1,2: "+str(mit_eigs_sw[0][-1])+","+str(mit_eigs_sw[0][-2])) # print("main: n sw mit lambda>1.00000001: "+str(np.sum(np.abs(mit_eigs_sw[0])>1.00000001))) # print("main: n sw no mit lambda>1.00000001: "+str(np.sum(np.abs(no_mit_eigs_sw[0])>1.00000001))) # print("main: unmitigated par lambda1,2: "+str(no_mit_eigs_par[0][-1])+","+str(no_mit_eigs_par[0][-2])) # print("main: mitigated par lambda1,2: "+str(mit_eigs_par[0][-1])+","+str(mit_eigs_par[0][-2])) # print("main: n par mit lambda>1.00000001: "+str(np.sum(np.abs(mit_eigs_par[0])>1.00000001))) # print("main: n par no mit lambda>1.00000001: "+str(np.sum(np.abs(no_mit_eigs_par[0])>1.00000001))) v_no_mit_par = np.dot(SS.f_set_nopriors[0][2].get_cov_cholesky(), no_mit_eigs_par[1]) v_mit_par = np.dot(SS.f_set_nopriors[0][2].get_cov_cholesky(), mit_eigs_par[1]) v_no_mit_sw = np.dot(SS.f_set_nopriors[0][1].get_cov_cholesky(), no_mit_eigs_sw[1]) v_mit_sw = np.dot(SS.f_set_nopriors[0][1].get_cov_cholesky(), mit_eigs_sw[1]) test_v = True v_test_fails = 0 if test_v: m_mat_no_mit_par = np.identity(mit_eigs_par[0].size) + np.dot( SS.f_set_nopriors[1][2].get_covar(), SS.f_set_nopriors[0][2].get_fisher()) m_mat_mit_par = np.identity(mit_eigs_par[0].size) + np.dot( SS.f_set_nopriors[2][2].get_covar(), SS.f_set_nopriors[0][2].get_fisher()) if not np.allclose( np.dot(m_mat_no_mit_par, v_no_mit_par) / (1. + no_mit_eigs_par[0]), v_no_mit_par): v_test_fails += 1 warn('some no mit eig vectors may be bad') if not np.allclose( np.dot(m_mat_mit_par, v_mit_par) / (1. + mit_eigs_par[0]), v_mit_par): v_test_fails += 1 warn('some mit eig vectors may be bad') if v_test_fails == 0: print("PASS: eigenvector decomposition checks passed") else: raise RuntimeError('FAIL: ' + str(v_test_fails) + ' eigenvector decomposition checks failed') get_hold_mats = False if get_hold_mats: no_prior_hold = SS.f_set[0][2].get_fisher() if C.de_model == 'jdem': no_prior_project = prior_fisher.project_w0wa( no_prior_hold, defaults.prior_fisher_params.copy(), prior_fisher.JDEM_LABELS) print('main: r diffs', np.diff(geo1.rs)) print('main: theta width', (geo1.rs[1] + geo1.rs[0]) / 2. * (Theta1[1] - Theta1[0])) print('main: phi width', (geo1.rs[1] + geo1.rs[0]) / 2. * (Phi1[1] - Phi1[0]) * np.sin( (Theta1[1] + Theta1[0]) / 2)) test_perturbation = True pert_test_fails = 0 if test_perturbation: #TOLERANCE below which an eigenvalue less than TOLERANCE*max eigenvalue is considered 0 REL_TOLERANCE = 10**-8 f0 = SS.multi_f.get_fisher(mf.f_spec_no_mit, mf.f_return_lw)[0].get_fisher() f1 = SS.multi_f.get_fisher(mf.f_spec_mit, mf.f_return_lw)[0].get_fisher() if not np.all(f0.T == f0): pert_test_fails += 1 warn("unperturbed fisher matrix not symmetric, unacceptable") if not np.all(f1.T == f1): pert_test_fails += 1 warn("perturbed fisher matrix not symmetric, unacceptable") #get eigenvalues and set numerically zero values to 0 eigf0 = np.linalg.eigh(f0)[0] eigf0[np.abs(eigf0) < REL_TOLERANCE * np.max(np.abs(eigf0))] = 0. eigf1 = np.linalg.eigh(f1)[0] eigf1[np.abs(eigf1) < REL_TOLERANCE * np.max(np.abs(eigf1))] = 0. #check positive semidefinite if np.any(eigf0 < 0.): pert_test_fails += 1 warn( "unperturbed fisher matrix not positive definite within tolerance, unacceptable" ) if np.any(eigf1 < 0.): pert_test_fails += 1 warn( "perturbed fisher matrix not positive definite within tolerance, unacceptable" ) #check nondecresasing diff_eig = eigf1 - eigf0 diff_eig[np.abs(diff_eig) < REL_TOLERANCE * np.max(np.abs(diff_eig))] = 0 if np.any(diff_eig < 0): pert_test_fails += 1 warn("some eigenvalues decreased within tolerance, unacceptable") #check interlace theorem satisfied (eigenvalues cannot be reordered by more than rank of perturbation) n_offset = SS.surveys_lw[0].get_total_rank() rolled_eig = (eigf1[::-1][n_offset:eigf0.size] - eigf0[::-1][0:eigf0.size - n_offset]) rolled_eig[np.abs(rolled_eig) < REL_TOLERANCE * np.max(np.abs(rolled_eig))] = 0. if np.any(rolled_eig > 0): pert_test_fails += 1 warn("some eigenvalues fail interlace theorem, unacceptable") c0 = SS.multi_f.get_fisher(mf.f_spec_no_mit, mf.f_return_lw)[0].get_covar() c1 = SS.multi_f.get_fisher(mf.f_spec_mit, mf.f_return_lw)[0].get_covar() if not np.all(c0 == c0.T): pert_test_fails += 1 warn("unperturbed covariance not symmetric, unacceptable") if not np.all(c1 == c1.T): warn("perturbed covariance not symmetric, unacceptable") eigc0 = np.linalg.eigh(c0)[0] eigc1 = np.linalg.eigh(c1)[0] if np.any(eigc0 < 0): pert_test_fails += 1 warn( "unperturbed covariance not positive semidefinite, unacceptable" ) if np.any(eigc1 < 0): pert_test_fails += 1 warn( "perturbed covariance not positive semidefinite, unacceptable") fdiff_eigc = (eigc1 - eigc0) / eigc0 fdiff_eigc[np.abs(fdiff_eigc) < REL_TOLERANCE] = 0. if np.any(fdiff_eigc > 0): pert_test_fails += 1 warn("some covariance eigenvalues increase, unacceptable") if pert_test_fails == 0: print("PASS: All fisher matrix sanity checks passed") else: raise RuntimeError("FAIL: " + str(pert_test_fails) + " fisher matrix sanity checks failed") test_eigs = True eig_test_fails = 0 if test_eigs: REL_TOLERANCE = 10**-8 c_ssc0 = SS.multi_f.get_fisher( mf.f_spec_SSC_no_mit, mf.f_return_sw)[1].get_covar() #SS.covs_sw[0].get_ssc_covar() if not np.allclose(c_ssc0, c_ssc0.T): eig_test_fails += 1 warn("unperturbed result covariance not symmetric, unacceptable") c_ssc1 = SS.multi_f.get_fisher( mf.f_spec_SSC_mit, mf.f_return_sw)[1].get_covar() #SS.covs_sw[0].get_ssc_covar() if not np.allclose(c_ssc1, c_ssc1.T): eig_test_fails += 1 warn("perturbed result covariance not symmetric, unacceptable") eigsys_ssc0 = np.linalg.eigh(c_ssc0) eigsys_ssc1 = np.linalg.eigh(c_ssc1) eig_ssc0 = eigsys_ssc0[0].copy() eig_ssc1 = eigsys_ssc1[0].copy() eig_ssc0[np.abs(eig_ssc0) < np.max(np.abs(eig_ssc0)) * REL_TOLERANCE] = 0 eig_ssc1[np.abs(eig_ssc0) < np.max(np.abs(eig_ssc0)) * REL_TOLERANCE] = 0 if np.any(eig_ssc0 < 0): eig_test_fails += 1 warn( "unperturbed result cov not positive semidefinite, unacceptable" ) if np.any(eig_ssc1 < 0): eig_test_fails += 1 warn( "perturbed result cov not positive semidefinite, unacceptable") cg = SS.f_set_nopriors[0][1].get_covar() eigsys_cg = np.linalg.eigh(cg) eig_mitprod = np.real( np.linalg.eig(np.dot(np.linalg.inv(c_ssc0 + cg), c_ssc1 + cg))[0]) eig_mitprod[np.abs(eig_mitprod - 1.) < REL_TOLERANCE] = 1. if np.any(eig_mitprod > 1): eig_test_fails += 1 warn("mitigation making covariance worse, unacceptable") n_offset = SS.surveys_lw[0].get_total_rank() if np.sum(eig_mitprod < 1.) > n_offset: eig_test_fails += 1 warn("mitigation changing too many eigenvalues, unacceptable") eig_diff = eig_ssc1 - eig_ssc0 eig_diff[np.abs(eig_diff) < np.max(np.abs(eig_diff)) * REL_TOLERANCE] = 0. if np.any(eig_diff > 0): eig_test_fails += 1 warn("mitigation making covariance worse, unacceptable") if eig_test_fails == 0: print("PASS: All sw eigenvalue sanity checks passed") else: raise RuntimeError("FAIL: " + str(pert_test_fails) + " eigenvalue sanity checks failed") do_eig_interlace_check = True if do_eig_interlace_check: eig_interlace_fails_mit = 0 eig_interlace_fails_no_mit = 0 n_sw = mit_eigs_sw[0].size n_par = mit_eigs_par[0].size d_n = n_sw - n_par eig_l_mit_par = mit_eigs_par[0][::-1] eig_l_no_mit_par = no_mit_eigs_par[0][::-1] eig_l_mit_sw = mit_eigs_sw[0][::-1] eig_l_no_mit_sw = no_mit_eigs_sw[0][::-1] for i in range(0, n_par): if eig_l_mit_par[i] > eig_l_mit_sw[i]: eig_interlace_fails_mit += 1 if eig_l_no_mit_par[i] > eig_l_no_mit_sw[i]: eig_interlace_fails_no_mit += 1 if eig_l_mit_par[i] < eig_l_mit_sw[i + d_n]: eig_interlace_fails_mit += 1 if eig_l_no_mit_par[i] < eig_l_no_mit_sw[i + d_n]: eig_interlace_fails_no_mit += 1 if eig_interlace_fails_mit == 0 and eig_interlace_fails_no_mit == 0: print("PASS: All parameter eigenvalue interlace tests passed") else: raise RuntimeError( "FAIL: " + str(eig_interlace_fails_mit) + " mitigation and " + str(eig_interlace_fails_no_mit) + " no mitigation failures in parameter eigenvalue interlace tests" )
def test_agreement_with_sigma8(): """test sigma8 works basic to jdem""" cosmo_base = defaults.cosmology_wmap.copy() cosmo_base = cp.add_derived_pars(cosmo_base, 'jdem') cosmo_base['de_model'] = 'constant_w' cosmo_base['w'] = -1. cosmo_base['sigma8'] = 0.7925070693605805 power_params = defaults.power_params.copy() power_params.camb['maxkh'] = 3. power_params.camb['kmax'] = 10. power_params.camb['npoints'] = 1000 power_params.camb['accuracy'] = 2 power_params.camb['leave_h'] = False power_params_jdem = deepcopy(power_params) power_params_jdem.camb['force_sigma8'] = False power_params_basi = deepcopy(power_params) power_params_basi.camb['force_sigma8'] = True cosmo_jdem = cosmo_base.copy() cosmo_jdem['p_space'] = 'jdem' C_fid_jdem = cp.CosmoPie(cosmo_jdem, 'jdem') P_jdem = mps.MatterPower(C_fid_jdem, power_params_jdem) C_fid_jdem.set_power(P_jdem) cosmo_basi = cosmo_base.copy() cosmo_basi['p_space'] = 'basic' C_fid_basi = cp.CosmoPie(cosmo_basi, 'basic') P_basi = mps.MatterPower(C_fid_basi, power_params_basi) C_fid_basi.set_power(P_basi) zs = np.arange(0.2, 1.41, 0.40) z_fine = np.linspace(0.001, 1.4, 1000) geo_jdem = FullSkyGeo(zs, C_fid_jdem, z_fine) geo_basi = FullSkyGeo(zs, C_fid_basi, z_fine) jdem_pars = np.array( ['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs', 'w']) jdem_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.01, 0.01]) basi_pars = np.array(['ns', 'Omegamh2', 'Omegabh2', 'h', 'sigma8', 'w']) basi_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.001, 0.01]) sw_params = defaults.sw_survey_params.copy() len_params = defaults.lensing_params.copy() sw_observable_list = defaults.sw_observable_list.copy() nz_wfirst_lens = NZWFirstEff(defaults.nz_params_wfirst_lens.copy()) prior_params = defaults.prior_fisher_params.copy() basis_params = defaults.basis_params.copy() sw_survey_jdem = sws.SWSurvey(geo_jdem, 'wfirst', C_fid_jdem, sw_params, jdem_pars, jdem_eps, sw_observable_list, len_params, nz_wfirst_lens) sw_survey_basi = sws.SWSurvey(geo_basi, 'wfirst', C_fid_basi, sw_params, basi_pars, basi_eps, sw_observable_list, len_params, nz_wfirst_lens) #need to fix As because the code cannot presently do this for itr in range(0, basi_pars.size): for i in range(0, 2): cosmo_alt_basi = sw_survey_basi.len_pow.Cs_pert[ itr, i].cosmology.copy() n_As = 10 logAs = np.linspace(cosmo_alt_basi['LogAs'] * 0.9, cosmo_alt_basi['LogAs'] * 1.1, n_As) As = np.exp(logAs) sigma8s = np.zeros(n_As) for itr2 in xrange(0, n_As): cosmo_alt_basi['As'] = As[itr2] cosmo_alt_basi['LogAs'] = logAs[itr2] sigma8s[itr2] = camb_sigma8(cosmo_alt_basi, power_params_basi.camb) logAs_interp = InterpolatedUnivariateSpline(sigma8s[::-1], logAs[::-1], ext=2, k=3) sw_survey_basi.len_pow.Cs_pert[ itr, i].cosmology['LogAs'] = logAs_interp( sw_survey_basi.len_pow.Cs_pert[itr, i].cosmology['sigma8']) sw_survey_basi.len_pow.Cs_pert[itr, i].cosmology['As'] = np.exp( sw_survey_basi.len_pow.Cs_pert[itr, i].cosmology['LogAs']) dO_dpar_jdem = sw_survey_jdem.get_dO_I_dpar_array() dO_dpar_basi = sw_survey_basi.get_dO_I_dpar_array() response_pars = np.array([ 'ns', 'Omegach2', 'Omegabh2', 'Omegamh2', 'OmegaLh2', 'h', 'LogAs', 'w', 'sigma8' ]) l_max = 24 r_max_jdem = geo_jdem.r_fine[-1] k_cut_jdem = 30. / r_max_jdem basis_jdem = SphBasisK(r_max_jdem, C_fid_jdem, k_cut_jdem, basis_params, l_ceil=l_max, needs_m=True) SS_jdem = SuperSurvey(np.array([sw_survey_jdem]), np.array([]), basis_jdem, C_fid_jdem, prior_params, get_a=False, do_unmitigated=True, do_mitigated=False) r_max_basi = geo_basi.r_fine[-1] k_cut_basi = 30. / r_max_basi basis_basi = SphBasisK(r_max_basi, C_fid_basi, k_cut_basi, basis_params, l_ceil=l_max, needs_m=True) SS_basi = SuperSurvey(np.array([sw_survey_basi]), np.array([]), basis_basi, C_fid_basi, prior_params, get_a=False, do_unmitigated=True, do_mitigated=False) #dO_dpar_jdem_to_basi = np.zeros_like(dO_dpar_jdem) #dO_dpar_basi_to_jdem = np.zeros_like(dO_dpar_basi) project_basi_to_jdem = np.zeros((jdem_pars.size, basi_pars.size)) response_derivs_jdem = np.zeros((response_pars.size, jdem_pars.size)) response_derivs_basi = np.zeros((response_pars.size, basi_pars.size)) for i in range(0, response_pars.size): for j in range(0, jdem_pars.size): response_derivs_jdem[i, j] = ( sw_survey_jdem.len_pow.Cs_pert[j, 0].cosmology[response_pars[i]] - sw_survey_jdem.len_pow.Cs_pert[j, 1].cosmology[ response_pars[i]]) / (jdem_eps[j] * 2.) response_derivs_basi[i, j] = ( sw_survey_basi.len_pow.Cs_pert[j, 0].cosmology[response_pars[i]] - sw_survey_basi.len_pow.Cs_pert[j, 1].cosmology[ response_pars[i]]) / (basi_eps[j] * 2.) project_jdem_to_basi = np.zeros((basi_pars.size, jdem_pars.size)) project_basi_to_jdem = np.zeros((jdem_pars.size, basi_pars.size)) for itr1 in range(0, basi_pars.size): for itr2 in range(0, response_pars.size): if response_pars[itr2] in jdem_pars: name = response_pars[itr2] i = np.argwhere(jdem_pars == name)[0, 0] project_jdem_to_basi[itr1, i] = response_derivs_basi[itr2, itr1] for itr1 in range(0, jdem_pars.size): for itr2 in range(0, response_pars.size): if response_pars[itr2] in basi_pars: name = response_pars[itr2] i = np.argwhere(basi_pars == name)[0, 0] project_basi_to_jdem[itr1, i] = response_derivs_jdem[itr2, itr1] assert np.allclose(np.dot(dO_dpar_jdem, project_jdem_to_basi.T), dO_dpar_basi, rtol=1.e-3, atol=np.max(dO_dpar_basi) * 1.e-4) assert np.allclose(np.dot(dO_dpar_basi, project_basi_to_jdem.T), dO_dpar_jdem, rtol=1.e-3, atol=np.max(dO_dpar_jdem) * 1.e-4) #basi p_space cannot currently do priors by itself f_p_priors_basi = np.dot( project_jdem_to_basi, np.dot(SS_jdem.multi_f.fisher_priors.get_fisher(), project_jdem_to_basi.T)) for i in range(0, 1): f_np_jdem = SS_jdem.f_set_nopriors[i][2].get_fisher().copy() f_np_basi = SS_basi.f_set_nopriors[i][2].get_fisher().copy() f_np_jdem_to_basi = np.dot(project_jdem_to_basi, np.dot(f_np_jdem, project_jdem_to_basi.T)) f_np_basi_to_jdem = np.dot(project_basi_to_jdem, np.dot(f_np_basi, project_basi_to_jdem.T)) assert np.allclose(f_np_jdem_to_basi, f_np_basi, rtol=1.e-2) assert np.allclose(f_np_basi_to_jdem, f_np_jdem, rtol=1.e-2) f_p_jdem = SS_jdem.f_set[i][2].get_fisher().copy() f_p_basi = SS_basi.f_set_nopriors[i][2].get_fisher().copy( ) + f_p_priors_basi.copy() f_p_jdem_to_basi = np.dot(project_jdem_to_basi, np.dot(f_p_jdem, project_jdem_to_basi.T)) f_p_basi_to_jdem = np.dot(project_basi_to_jdem, np.dot(f_p_basi, project_basi_to_jdem.T)) assert np.allclose(f_p_jdem_to_basi, f_p_basi, rtol=1.e-2) assert np.allclose(f_p_basi_to_jdem, f_p_jdem, rtol=1.e-2) print(f_np_jdem / f_np_basi_to_jdem) print(f_np_basi / f_np_jdem_to_basi)
def __init__(self): """ do power derivative comparison""" power_params = defaults.power_params.copy() power_params.camb['force_sigma8'] = True power_params.camb['leave_h'] = False power_params.camb['npoints'] = 1000 C = cp.CosmoPie(cosmology=COSMOLOGY_CHIANG, p_space='basic') epsilon = 0.01 P_a = mps.MatterPower(C, power_params) k_a = P_a.k C.k = k_a k_a_h = P_a.k / C.cosmology['h'] d_chiang_halo = np.loadtxt('test_inputs/dp_1/dp_chiang_halofit.dat') k_chiang_halo = d_chiang_halo[:, 0] * C.cosmology['h'] dc_chiang_halo = d_chiang_halo[:, 1] dc_ch1 = interp1d(k_chiang_halo, dc_chiang_halo, bounds_error=False)(k_a) d_chiang_lin = np.loadtxt('test_inputs/dp_1/dp_chiang_linear.dat') k_chiang_lin = d_chiang_lin[:, 0] * C.cosmology['h'] dc_chiang_lin = d_chiang_lin[:, 1] dc_ch2 = interp1d(k_chiang_lin, dc_chiang_lin, bounds_error=False)(k_a) d_chiang_fpt = np.loadtxt('test_inputs/dp_1/dp_chiang_oneloop.dat') k_chiang_fpt = d_chiang_fpt[:, 0] * C.cosmology['h'] dc_chiang_fpt = d_chiang_fpt[:, 1] dc_ch3 = interp1d(k_chiang_fpt, dc_chiang_fpt, bounds_error=False)(k_a) do_plots = True if do_plots: import matplotlib.pyplot as plt zbar = np.array([3.]) dcalt1, p1a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='linear', epsilon=epsilon) dcalt2, p2a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='halofit', epsilon=epsilon) dcalt3, p3a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='fastpt', epsilon=epsilon) if do_plots: ax = plt.subplot(221) plt.xlim([0., 0.4]) plt.ylim([1.2, 3.2]) plt.grid() plt.title('z=3.0') ax.plot(k_a_h, abs(dcalt1 / p1a)) ax.plot(k_a_h, abs(dcalt2 / p2a)) ax.plot(k_a_h, abs(dcalt3 / p3a)) zbar = np.array([2.]) dcalt1, p1a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='linear', epsilon=epsilon) dcalt2, p2a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='halofit', epsilon=epsilon) dcalt3, p3a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='fastpt', epsilon=epsilon) if do_plots: ax = plt.subplot(222) plt.xlim([0., 0.4]) plt.ylim([1.2, 3.2]) plt.grid() plt.title('z=2.0') ax.plot(k_a_h, abs(dcalt1 / p1a)) ax.plot(k_a_h, abs(dcalt2 / p2a)) ax.plot(k_a_h, abs(dcalt3 / p3a)) zbar = np.array([1.]) dcalt1, p1a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='linear', epsilon=epsilon) dcalt2, p2a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='halofit', epsilon=epsilon) dcalt3, p3a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='fastpt', epsilon=epsilon) if do_plots: ax = plt.subplot(223) plt.xlim([0., 0.4]) plt.ylim([1.2, 3.2]) plt.grid() plt.title('z=1.0') ax.set_xlabel('k h Mpc^-1') ax.set_ylabel('dln(P)/ddeltabar') ax.plot(k_a_h, abs(dcalt1 / p1a)) ax.plot(k_a_h, abs(dcalt2 / p2a)) ax.plot(k_a_h, abs(dcalt3 / p3a)) ax.plot(k_a_h, dc_ch1) ax.plot(k_a_h, dc_ch2) ax.plot(k_a_h, dc_ch3) plt.legend([ 'linear', 'halofit', 'fastpt', 'halo_chiang', "lin_chiang", "fpt_chiang" ], loc=4) mask_mult = (k_a_h > 0.) * (k_a_h < 0.4) rat_halofit = (dc_ch1 / abs(dcalt2 / p2a)[:, 0])[mask_mult] rat_linear = (dc_ch2 / abs(dcalt1 / p1a)[:, 0])[mask_mult] rat_fpt = (dc_ch3 / abs(dcalt3 / p3a)[:, 0])[mask_mult] zbar = np.array([0.]) dcalt1, p1a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='linear', epsilon=epsilon) dcalt2, p2a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='halofit', epsilon=epsilon) dcalt3, p3a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='fastpt', epsilon=epsilon) if do_plots: ax = plt.subplot(224) plt.xlim([0., 0.4]) plt.ylim([1.2, 3.2]) plt.grid() plt.title('z=0.0') ax.plot(k_a_h, abs(dcalt1 / p1a)) ax.plot(k_a_h, abs(dcalt2 / p2a)) ax.plot(k_a_h, abs(dcalt3 / p3a)) #plt.legend(['linear','halofit','fastpt'],loc=4) if do_plots: plt.show() k_a_halofit = k_a_h[mask_mult][~np.isnan(rat_halofit)] k_a_linear = k_a_h[mask_mult][~np.isnan(rat_linear)] k_a_fpt = k_a_h[mask_mult][~np.isnan(rat_fpt)] dkh = 0.05 halofit_bins = np.zeros(7) linear_bins = np.zeros(7) fpt_bins = np.zeros(7) for itr in range(1, 8): mask_loc_hf = (k_a_halofit < dkh * (itr + 1.)) * (k_a_halofit >= dkh * itr) mask_loc_lin = (k_a_linear < dkh * (itr + 1.)) * (k_a_linear >= dkh * itr) mask_loc_fpt = (k_a_fpt < dkh * (itr + 1.)) * (k_a_fpt >= dkh * itr) halofit_bins[itr - 1] = np.average( rat_halofit[~np.isnan(rat_halofit)][mask_loc_hf]) linear_bins[itr - 1] = np.average( rat_linear[~np.isnan(rat_linear)][mask_loc_lin]) fpt_bins[itr - 1] = np.average( rat_fpt[~np.isnan(rat_fpt)][mask_loc_fpt]) #print(np.abs(halofit_bins-1.)) #print(np.abs(linear_bins-1.)) #print(np.abs(fpt_bins-1.)) fails = 0 if np.all(np.abs(halofit_bins - 1.) < 0.02): print("PASS: smoothed z=1 halofit matches chiang") else: fails += 1 print("FAIL: smoothed z=1 halofit does not match chiang") if np.all(np.abs(linear_bins - 1.) < 0.02): print("PASS: smoothed z=1 linear matches chiang") else: fails += 1 print("FAIL: smoothed z=1 linear does not match chiang") if np.all(np.abs(fpt_bins - 1.) < 0.02): print("PASS: smoothed z=1 fastpt matches chiang") else: fails += 1 print("FAIL: smoothed z=1 fastpt does not match chiang") if fails == 0: print("PASS: all tests satisfactory") else: print("FAIL: " + str(fails) + " tests unsatisfactory")
#for some reason the discrepancy is a function of pivot_scalar and is minimizaed around pivot_scalar=0.01-0.0001 rtol = 3.e-3 eps = 0.01 cosmo_fid = defaults.cosmology.copy() camb_params = defaults.camb_params.copy() camb_params['force_sigma8'] = param[0] camb_params['leave_h'] = param[1] camb_params['npoints'] = 3000 camb_params['kmax'] = 2. camb_params['maxkh'] = 2. power_params = defaults.power_params.copy() power_params.camb = camb_params #camb_params['minkh'] = 1e-3 C_fid = cp.CosmoPie(cosmo_fid, p_space='jdem') P_fid = mps.MatterPower(C_fid, power_params) k_fid = P_fid.k C_fid.set_power(P_fid) P_lin1 = P_fid.get_matter_power(np.array([0.]), pmodel='linear')[:, 0] P_res1 = P_fid.get_matter_power(np.array([0.]), pmodel=param[2])[:, 0] if param[2] == 'halofit': nonlinear_model = camb.model.NonLinear_both else: nonlinear_model = camb.model.NonLinear_none k_res2, P_res2 = camb_pow(C_fid.cosmology, zbar=np.array([0.]),
def test_cosmosis_match(): """test agreement with modified cosmosis demo 15 results assuming gaussian matter distribution with sigma=0.4 and average z=1 use halofit power spectrum grid""" TOLERANCE_MAX = 0.2 TOLERANCE_MEAN = 0.2 power_params = defaults.power_params.copy() power_params.camb['force_sigma8'] = True power_params.camb['maxkh'] = 25000 power_params.camb['kmax'] = 100. power_params.camb['npoints'] = 3200 C = cp.CosmoPie(cosmology=COSMOLOGY_COSMOSIS2.copy(), p_space='jdem') P_in = mps.MatterPower(C, power_params) #k_in = P_in.k C.set_power(P_in) zs = np.loadtxt('test_inputs/proj_2/z.txt') zs[0] = 10**-3 ls = np.loadtxt('test_inputs/proj_2/ell.txt') f_sky = np.pi / (3. * np.sqrt(2.)) params = defaults.lensing_params.copy() params['zbar'] = 1.0 params['sigma'] = 0.40 params['smodel'] = 'gaussian' params['l_min'] = np.min(ls) params['l_max'] = np.max(ls) params['n_l'] = ls.size params['n_gal'] = 118000000 * 6. params['pmodel'] = 'halofit' sh_pow1 = np.loadtxt('test_inputs/proj_2/ss_pow.txt') sh_pow1_gg = np.loadtxt('test_inputs/proj_2/gg_pow.txt') sh_pow1_sg = np.loadtxt('test_inputs/proj_2/sg_pow.txt') sh_pow1_mm = np.loadtxt('test_inputs/proj_2/mm_pow.txt') / C.h sp2 = sp.ShearPower(C, zs, f_sky, params, mode='power') q_sh = lw.QShear(sp2) q_num = lw.QNum(sp2) q_mag = lw.QMag(sp2) sh_pow2 = sp.Cll_q_q(sp2, q_sh, q_sh).Cll() sh_pow2_gg = sp.Cll_q_q(sp2, q_num, q_num).Cll() sh_pow2_sg = sp.Cll_q_q(sp2, q_sh, q_num).Cll() sh_pow2_mm = sp.Cll_q_q(sp2, q_mag, q_mag).Cll() #get ratio of calculated value to expected value from cosmosis #use -np.inf as filler for interpolation when l value is not in ls*C.h,filter it later ss_rat = (sh_pow2 - sh_pow1) / sh_pow2 gg_rat = (sh_pow2_gg - sh_pow1_gg) / sh_pow2_gg sg_rat = (sh_pow2_sg - sh_pow1_sg) / sh_pow2_sg mm_rat = (sh_pow2_mm - sh_pow1_mm) / sh_pow2_mm print(sh_pow2) mean_ss_err = np.mean(abs(ss_rat)[abs(ss_rat) < np.inf]) mean_gg_err = np.mean(abs(gg_rat)[abs(gg_rat) < np.inf]) mean_sg_err = np.mean(abs(sg_rat)[abs(sg_rat) < np.inf]) mean_mm_err = np.mean(abs(mm_rat)[abs(mm_rat) < np.inf]) max_ss_err = max((abs(ss_rat))[abs(ss_rat) < np.inf]) max_gg_err = max((abs(gg_rat))[abs(gg_rat) < np.inf]) max_sg_err = max((abs(sg_rat))[abs(sg_rat) < np.inf]) max_mm_err = max((abs(mm_rat))[abs(mm_rat) < np.inf]) print("ss agreement within: " + str(max_ss_err * 100.) + "%" + " mean agreement: " + str(mean_ss_err * 100.) + "%") print("gg agreement within: " + str(max_gg_err * 100.) + "%" + " mean agreement: " + str(mean_gg_err * 100.) + "%") print("sg agreement within: " + str(max_sg_err * 100.) + "%" + " mean agreement: " + str(mean_sg_err * 100.) + "%") print("mm agreement within: " + str(max_mm_err * 100.) + "%" + " mean agreement: " + str(mean_mm_err * 100.) + "%") assert max_ss_err < TOLERANCE_MAX assert max_gg_err < TOLERANCE_MAX assert max_sg_err < TOLERANCE_MAX assert max_mm_err < TOLERANCE_MAX assert mean_ss_err < TOLERANCE_MEAN assert mean_gg_err < TOLERANCE_MEAN assert mean_sg_err < TOLERANCE_MEAN assert mean_mm_err < TOLERANCE_MEAN
def get_perturbed_cosmopies(C_fid, pars, epsilons, log_par_derivs=None, override_safe=False): """get set of 2 perturbed cosmopies, above and below (including camb linear power spectrum) for getting partial derivatives using central finite difference method inputs: C_fid: the fiducial CosmoPie pars: an array of the names of parameters to change epsilons: an array of step sizes correspondings to pars log_par_derivs: if True for a given element of pars will do log derivative in the parameter override_safe: if True do not borrow the growth factor or power spectrum from C_fid even if we could """ cosmo_fid = C_fid.cosmology.copy() P_fid = C_fid.P_lin k_fid = C_fid.k power_params = P_fid.power_params.copy() #default assumption is ordinary derivative, can do log deriv in parameter also #if log_par_derivs[i]==True, will do log deriv if log_par_derivs is not None and log_par_derivs.size != pars.size: raise ValueError('invalid input log_par_derivs ' + str(log_par_derivs)) elif log_par_derivs is None: log_par_derivs = np.zeros(pars.size, dtype=bool) Cs_pert = np.zeros((pars.size, 2), dtype=object) for i in range(0, pars.size): cosmo_a = get_perturbed_cosmology(cosmo_fid, pars[i], epsilons[i], log_par_derivs[i]) cosmo_b = get_perturbed_cosmology(cosmo_fid, pars[i], -epsilons[i], log_par_derivs[i]) #set cosmopie power spectrum appropriately #avoid unnecessarily recomputing growth factors if they won't change. If growth factors don't change neither will w matching if pars[i] in cp.GROW_SAFE and not override_safe: C_a = cp.CosmoPie(cosmo_a, p_space=cosmo_fid['p_space'], G_safe=True, G_in=C_fid.G_p) C_b = cp.CosmoPie(cosmo_b, p_space=cosmo_fid['p_space'], G_safe=True, G_in=C_fid.G_p) P_a = mps.MatterPower(C_a, power_params, k_in=k_fid, wm_in=P_fid.wm, de_perturbative=True) P_b = mps.MatterPower(C_b, power_params, k_in=k_fid, wm_in=P_fid.wm, de_perturbative=True) else: C_a = cp.CosmoPie(cosmo_a, p_space=cosmo_fid['p_space']) C_b = cp.CosmoPie(cosmo_b, p_space=cosmo_fid['p_space']) #avoid unnecessarily recomputing WMatchers for dark energy related parameters, and unnecessarily calling camb if pars[i] in cp.DE_SAFE and not override_safe: P_a = mps.MatterPower(C_a, power_params, k_in=k_fid, wm_in=P_fid.wm, P_fid=P_fid, camb_safe=True) P_b = mps.MatterPower(C_b, power_params, k_in=k_fid, wm_in=P_fid.wm, P_fid=P_fid, camb_safe=True) else: P_a = mps.MatterPower(C_a, power_params, k_in=k_fid, de_perturbative=True) P_b = mps.MatterPower(C_b, power_params, k_in=k_fid, de_perturbative=True) #k_a = P_a.k #k_b = P_b.k C_a.set_power(P_a) C_b.set_power(P_b) Cs_pert[i, 0] = C_a Cs_pert[i, 1] = C_b return Cs_pert
def test_pipeline_consistency(): """test full pipeline consistency with rotation jdem vs lihu""" cosmo_base = defaults.cosmology_wmap.copy() cosmo_base = cp.add_derived_pars(cosmo_base, 'jdem') cosmo_base['de_model'] = 'constant_w' cosmo_base['w'] = -1. power_params = defaults.power_params.copy() power_params.camb['maxkh'] = 1. power_params.camb['kmax'] = 1. power_params.camb['npoints'] = 1000 power_params.camb['accuracy'] = 2 power_params.camb['leave_h'] = False cosmo_jdem = cosmo_base.copy() cosmo_jdem['p_space'] = 'jdem' C_fid_jdem = cp.CosmoPie(cosmo_jdem, 'jdem') P_jdem = mps.MatterPower(C_fid_jdem, power_params.copy()) C_fid_jdem.set_power(P_jdem) cosmo_lihu = cosmo_base.copy() cosmo_lihu['p_space'] = 'lihu' C_fid_lihu = cp.CosmoPie(cosmo_lihu, 'lihu') P_lihu = mps.MatterPower(C_fid_lihu, power_params.copy()) C_fid_lihu.set_power(P_lihu) zs = np.arange(0.2, 1.41, 0.40) z_fine = np.linspace(0.001, 1.4, 1000) geo_jdem = FullSkyGeo(zs, C_fid_jdem, z_fine) geo_lihu = FullSkyGeo(zs, C_fid_lihu, z_fine) jdem_pars = np.array( ['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs', 'w']) jdem_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.1, 0.01]) lihu_pars = np.array(['ns', 'Omegach2', 'Omegabh2', 'h', 'LogAs', 'w']) lihu_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.1, 0.01]) sw_params = defaults.sw_survey_params.copy() len_params = defaults.lensing_params.copy() sw_observable_list = defaults.sw_observable_list.copy() nz_wfirst_lens = NZWFirstEff(defaults.nz_params_wfirst_lens.copy()) prior_params = defaults.prior_fisher_params.copy() basis_params = defaults.basis_params.copy() sw_survey_jdem = sws.SWSurvey(geo_jdem, 'wfirst', C_fid_jdem, sw_params, jdem_pars, jdem_eps, sw_observable_list, len_params, nz_wfirst_lens) sw_survey_lihu = sws.SWSurvey(geo_lihu, 'wfirst', C_fid_lihu, sw_params, lihu_pars, lihu_eps, sw_observable_list, len_params, nz_wfirst_lens) #dO_dpar_jdem = sw_survey_jdem.get_dO_I_dpar_array() #dO_dpar_lihu = sw_survey_lihu.get_dO_I_dpar_array() response_pars = np.array([ 'ns', 'Omegach2', 'Omegabh2', 'Omegamh2', 'OmegaLh2', 'h', 'LogAs', 'w' ]) response_derivs_jdem_pred = np.array( [[1., 0., 0., 0., 0., 0., 0., 0.], [0., 1., 0., 1., 0., 1. / (2. * C_fid_jdem.cosmology['h']), 0., 0.], [0., -1., 1., 0., 0., 0., 0., 0.], [0., 0., 0., 0., 1., 1. / (2. * C_fid_jdem.cosmology['h']), 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 1.]]).T response_derivs_lihu_pred = np.array( [[1., 0., 0., 0., 0., 0., 0., 0.], [0., 1., 0., 1., -1., 0., 0., 0.], [0., 0., 1., 1., -1., 0., 0., 0.], [0., 0., 0., 0., 2. * C_fid_lihu.cosmology['h'], 1., 0., 0.], [0., 0., 0., 0., 0., 0., 1., 0.], [0., 0., 0., 0., 0., 0., 0., 1.]]).T l_max = 24 r_max_jdem = geo_jdem.r_fine[-1] k_cut_jdem = 30. / r_max_jdem basis_jdem = SphBasisK(r_max_jdem, C_fid_jdem, k_cut_jdem, basis_params, l_ceil=l_max, needs_m=True) SS_jdem = SuperSurvey(np.array([sw_survey_jdem]), np.array([]), basis_jdem, C_fid_jdem, prior_params, get_a=False, do_unmitigated=True, do_mitigated=False) r_max_lihu = geo_lihu.r_fine[-1] k_cut_lihu = 30. / r_max_lihu basis_lihu = SphBasisK(r_max_lihu, C_fid_lihu, k_cut_lihu, basis_params, l_ceil=l_max, needs_m=True) SS_lihu = SuperSurvey(np.array([sw_survey_lihu]), np.array([]), basis_lihu, C_fid_lihu, prior_params, get_a=False, do_unmitigated=True, do_mitigated=False) #dO_dpar_jdem_to_lihu = np.zeros_like(dO_dpar_jdem) #dO_dpar_lihu_to_jdem = np.zeros_like(dO_dpar_lihu) project_lihu_to_jdem = np.zeros((jdem_pars.size, lihu_pars.size)) #f_g_jdem_to_lihu = np.zeros((lihu_pars.size,lihu_pars.size)) #f_g_lihu_to_jdem = np.zeros((jdem_pars.size,jdem_pars.size)) response_derivs_jdem = np.zeros((response_pars.size, jdem_pars.size)) response_derivs_lihu = np.zeros((response_pars.size, lihu_pars.size)) for i in range(0, response_pars.size): for j in range(0, jdem_pars.size): response_derivs_jdem[i, j] = ( sw_survey_jdem.len_pow.Cs_pert[j, 0].cosmology[response_pars[i]] - sw_survey_jdem.len_pow.Cs_pert[j, 1].cosmology[ response_pars[i]]) / (jdem_eps[j] * 2.) response_derivs_lihu[i, j] = ( sw_survey_lihu.len_pow.Cs_pert[j, 0].cosmology[response_pars[i]] - sw_survey_lihu.len_pow.Cs_pert[j, 1].cosmology[ response_pars[i]]) / (lihu_eps[j] * 2.) assert np.allclose(response_derivs_jdem, response_derivs_jdem_pred) assert np.allclose(response_derivs_lihu, response_derivs_lihu_pred) project_jdem_to_lihu = np.zeros((lihu_pars.size, jdem_pars.size)) project_lihu_to_jdem = np.zeros((jdem_pars.size, lihu_pars.size)) for itr1 in range(0, lihu_pars.size): for itr2 in range(0, response_pars.size): if response_pars[itr2] in jdem_pars: name = response_pars[itr2] i = np.argwhere(jdem_pars == name)[0, 0] project_jdem_to_lihu[itr1, i] = response_derivs_lihu[itr2, itr1] for itr1 in range(0, jdem_pars.size): for itr2 in range(0, response_pars.size): if response_pars[itr2] in lihu_pars: name = response_pars[itr2] i = np.argwhere(lihu_pars == name)[0, 0] project_lihu_to_jdem[itr1, i] = response_derivs_jdem[itr2, itr1] #assert np.allclose(np.dot(dO_dpar_jdem,project_jdem_to_lihu.T),dO_dpar_lihu,rtol=1.e-3,atol=np.max(dO_dpar_lihu)*1.e-4) #assert np.allclose(np.dot(dO_dpar_lihu,project_lihu_to_jdem.T),dO_dpar_jdem,rtol=1.e-3,atol=np.max(dO_dpar_jdem)*1.e-4) #lihu p_space cannot currently do priors by itself f_p_priors_lihu = np.dot( project_jdem_to_lihu, np.dot(SS_jdem.multi_f.fisher_priors.get_fisher(), project_jdem_to_lihu.T)) f_set_jdem_in = np.zeros(3, dtype=object) f_set_lihu_in = np.zeros(3, dtype=object) for i in range(0, 3): f_set_jdem_in[i] = SS_jdem.f_set_nopriors[i][2].get_fisher().copy() f_set_lihu_in[i] = SS_lihu.f_set_nopriors[i][2].get_fisher().copy() f_np_lihu2 = rotate_jdem_to_lihu(f_set_jdem_in, C_fid_jdem) f_np_jdem2 = rotate_lihu_to_jdem(f_set_lihu_in, C_fid_lihu) f_np_lihu3 = rotate_jdem_to_lihu(f_np_jdem2, C_fid_jdem) f_np_jdem3 = rotate_lihu_to_jdem(f_np_lihu2, C_fid_lihu) for i in range(0, 3): f_np_jdem = SS_jdem.f_set_nopriors[i][2].get_fisher().copy() f_np_lihu = SS_lihu.f_set_nopriors[i][2].get_fisher().copy() f_np_jdem_to_lihu = np.dot(project_jdem_to_lihu, np.dot(f_np_jdem, project_jdem_to_lihu.T)) f_np_lihu_to_jdem = np.dot(project_lihu_to_jdem, np.dot(f_np_lihu, project_lihu_to_jdem.T)) assert np.allclose(f_set_lihu_in[i], f_np_lihu3[i]) assert np.allclose(f_set_jdem_in[i], f_np_jdem3[i]) assert np.allclose(f_np_jdem_to_lihu, f_np_lihu2[i]) assert np.allclose(f_np_lihu_to_jdem, f_np_jdem2[i]) assert np.allclose(f_np_jdem_to_lihu, f_np_lihu, rtol=1.e-3) assert np.allclose(f_np_lihu_to_jdem, f_np_jdem, rtol=1.e-3) assert np.allclose(f_set_lihu_in[i], f_np_lihu2[i], rtol=1.e-3) assert np.allclose(f_set_jdem_in[i], f_np_jdem2[i], rtol=1.e-3) assert np.allclose(f_np_jdem3[i], f_np_jdem2[i], rtol=1.e-3) assert np.allclose(f_np_lihu3[i], f_np_lihu2[i], rtol=1.e-3) f_p_jdem = SS_jdem.f_set[i][2].get_fisher().copy() f_p_lihu = SS_lihu.f_set_nopriors[i][2].get_fisher().copy( ) + f_p_priors_lihu.copy() f_p_jdem_to_lihu = np.dot(project_jdem_to_lihu, np.dot(f_p_jdem, project_jdem_to_lihu.T)) f_p_lihu_to_jdem = np.dot(project_lihu_to_jdem, np.dot(f_p_lihu, project_lihu_to_jdem.T)) assert np.allclose(f_p_jdem_to_lihu, f_p_lihu, rtol=1.e-3) assert np.allclose(f_p_lihu_to_jdem, f_p_jdem, rtol=1.e-3)
def test_power_derivative(): """test that the power derivatives agree with chiang&wagner arxiv:1403.3411v2 figure 4-5""" power_params = defaults.power_params.copy() power_params.camb['force_sigma8'] = True power_params.camb['leave_h'] = False power_params.camb['npoints'] = 1000 C = cp.CosmoPie(cosmology=COSMOLOGY_CHIANG, p_space='basic') #d = np.loadtxt('camb_m_pow_l.dat') #k_in = d[:,0] epsilon = 0.01 #k_a,P_a = cpow.camb_pow(cosmo_a) P_a = mps.MatterPower(C, power_params) k_a = P_a.k C.k = k_a k_a_h = P_a.k / C.cosmology['h'] pmodels = ['linear', 'halofit', 'fastpt'] for pmodel in pmodels: z0 = 0. hold0 = shp.dp_ddelta(P_a, z0, C, pmodel, epsilon) z1 = np.array([0.]) hold1 = shp.dp_ddelta(P_a, z1, C, pmodel, epsilon) z2 = np.array([0., 0.001]) hold2 = shp.dp_ddelta(P_a, z2, C, pmodel, epsilon) z3 = np.arange(0., 1., 0.001) hold3 = shp.dp_ddelta(P_a, z3, C, pmodel, epsilon) assert np.allclose(hold0[0], hold1[0][:, 0]) assert np.allclose(hold1[0][:, 0], hold2[0][:, 0]) assert np.allclose(hold1[0][:, 0], hold3[0][:, 0]) assert np.allclose(hold2[0][:, 1], hold3[0][:, 1]) assert np.allclose(hold1[1][:, 0], hold1[1][:, 0]) assert np.allclose(hold1[1][:, 0], hold2[1][:, 0]) assert np.allclose(hold1[1][:, 0], hold3[1][:, 0]) assert np.allclose(hold2[1][:, 1], hold3[1][:, 1]) d_chiang_halo = np.loadtxt('test_inputs/dp_1/dp_chiang_halofit.dat') k_chiang_halo = d_chiang_halo[:, 0] * C.cosmology['h'] dc_chiang_halo = d_chiang_halo[:, 1] dc_ch1 = interp1d(k_chiang_halo, dc_chiang_halo, bounds_error=False)(k_a) d_chiang_lin = np.loadtxt('test_inputs/dp_1/dp_chiang_linear.dat') k_chiang_lin = d_chiang_lin[:, 0] * C.cosmology['h'] dc_chiang_lin = d_chiang_lin[:, 1] dc_ch2 = interp1d(k_chiang_lin, dc_chiang_lin, bounds_error=False)(k_a) d_chiang_fpt = np.loadtxt('test_inputs/dp_1/dp_chiang_oneloop.dat') k_chiang_fpt = d_chiang_fpt[:, 0] * C.cosmology['h'] dc_chiang_fpt = d_chiang_fpt[:, 1] dc_ch3 = interp1d(k_chiang_fpt, dc_chiang_fpt, bounds_error=False)(k_a) zbar = np.array([1.]) dcalt1, p1a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='linear', epsilon=epsilon) dcalt2, p2a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='halofit', epsilon=epsilon) dcalt3, p3a = shp.dp_ddelta(P_a, zbar, C=C, pmodel='fastpt', epsilon=epsilon) mask_mult = (k_a_h > 0.) * (k_a_h < 0.4) rat_halofit = (dc_ch1 / abs(dcalt2 / p2a)[:, 0])[mask_mult] rat_linear = (dc_ch2 / abs(dcalt1 / p1a)[:, 0])[mask_mult] rat_fpt = (dc_ch3 / abs(dcalt3 / p3a)[:, 0])[mask_mult] k_a_halofit = k_a_h[mask_mult][~np.isnan(rat_halofit)] k_a_linear = k_a_h[mask_mult][~np.isnan(rat_linear)] k_a_fpt = k_a_h[mask_mult][~np.isnan(rat_fpt)] dkh = 0.05 halofit_bins = np.zeros(7) linear_bins = np.zeros(7) fpt_bins = np.zeros(7) for itr in range(1, 8): mask_loc_hf = (k_a_halofit < dkh * (itr + 1.)) * (k_a_halofit >= dkh * itr) mask_loc_lin = (k_a_linear < dkh * (itr + 1.)) * (k_a_linear >= dkh * itr) mask_loc_fpt = (k_a_fpt < dkh * (itr + 1.)) * (k_a_fpt >= dkh * itr) halofit_bins[itr - 1] = np.average( rat_halofit[~np.isnan(rat_halofit)][mask_loc_hf]) linear_bins[itr - 1] = np.average( rat_linear[~np.isnan(rat_linear)][mask_loc_lin]) fpt_bins[itr - 1] = np.average( rat_fpt[~np.isnan(rat_fpt)][mask_loc_fpt]) assert np.all(np.abs(halofit_bins - 1.) < 0.02) assert np.all(np.abs(linear_bins - 1.) < 0.02) assert np.all(np.abs(fpt_bins - 1.) < 0.02)
def test_power_agreement(): """test agreement of powers extracted in two different cosmological parametrizations""" cosmo_base = defaults.cosmology_wmap.copy() cosmo_base = cp.add_derived_pars(cosmo_base, 'jdem') cosmo_base['de_model'] = 'constant_w' cosmo_base['w'] = -1. power_params = defaults.power_params.copy() power_params.camb['maxkh'] = 3. power_params.camb['kmax'] = 10. power_params.camb['npoints'] = 1000 power_params.camb['accuracy'] = 2 power_params.camb['leave_h'] = False cosmo_jdem = cosmo_base.copy() cosmo_jdem['p_space'] = 'jdem' C_fid_jdem = cp.CosmoPie(cosmo_jdem, 'jdem') P_jdem = mps.MatterPower(C_fid_jdem, power_params.copy()) C_fid_jdem.set_power(P_jdem) cosmo_lihu = cosmo_base.copy() cosmo_lihu['p_space'] = 'lihu' C_fid_lihu = cp.CosmoPie(cosmo_lihu, 'lihu') P_lihu = mps.MatterPower(C_fid_lihu, power_params.copy()) C_fid_lihu.set_power(P_lihu) jdem_pars = np.array(['ns', 'Omegamh2', 'Omegabh2', 'OmegaLh2', 'LogAs']) jdem_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.1]) C_pert_jdem = ppr.get_perturbed_cosmopies(C_fid_jdem, jdem_pars, jdem_eps) lihu_pars = np.array(['ns', 'Omegach2', 'Omegabh2', 'h', 'LogAs']) lihu_eps = np.array([0.002, 0.00025, 0.0001, 0.00025, 0.1]) C_pert_lihu = ppr.get_perturbed_cosmopies(C_fid_lihu, lihu_pars, lihu_eps) response_pars = np.array( ['Omegach2', 'Omegabh2', 'Omegamh2', 'OmegaLh2', 'h']) response_derivs_jdem = np.zeros((response_pars.size, 3)) response_derivs_jdem_pred = np.array( [[1., 0., 1., 0., 1. / (2. * C_fid_jdem.cosmology['h'])], [-1., 1., 0., 0., 0.], [0., 0., 0., 1., 1. / (2. * C_fid_jdem.cosmology['h'])]]).T response_derivs_lihu = np.zeros((response_pars.size, 3)) response_derivs_lihu_pred = np.array( [[1., 0., 1., -1., 0.], [0., 1., 1., -1., 0.], [0., 0., 0., 2. * C_fid_lihu.cosmology['h'], 1.]]).T for i in range(0, response_pars.size): for j in range(1, 4): response_derivs_jdem[i, j - 1] = ( C_pert_jdem[j, 0].cosmology[response_pars[i]] - C_pert_jdem[j, 1].cosmology[response_pars[i]]) / (jdem_eps[j] * 2.) response_derivs_lihu[i, j - 1] = ( C_pert_lihu[j, 0].cosmology[response_pars[i]] - C_pert_lihu[j, 1].cosmology[response_pars[i]]) / (lihu_eps[j] * 2.) assert np.allclose(response_derivs_jdem_pred, response_derivs_jdem) assert np.allclose(response_derivs_lihu_pred, response_derivs_lihu) power_derivs_jdem = np.zeros((3, C_fid_jdem.k.size)) power_derivs_lihu = np.zeros((3, C_fid_lihu.k.size)) for pmodel in ['linear', 'fastpt', 'halofit']: for j in range(1, 4): power_derivs_jdem[ j - 1] = (C_pert_jdem[j, 0].P_lin.get_matter_power( [0.], pmodel=pmodel)[:, 0] - C_pert_jdem[j, 1].P_lin.get_matter_power( [0.], pmodel=pmodel)[:, 0]) / (jdem_eps[j] * 2.) power_derivs_lihu[ j - 1] = (C_pert_lihu[j, 0].P_lin.get_matter_power( [0.], pmodel=pmodel)[:, 0] - C_pert_lihu[j, 1].P_lin.get_matter_power( [0.], pmodel=pmodel)[:, 0]) / (lihu_eps[j] * 2.) assert np.allclose((power_derivs_jdem[1] + power_derivs_jdem[0] - power_derivs_jdem[2]), power_derivs_lihu[1], rtol=1.e-2, atol=1.e-4 * np.max(np.abs(power_derivs_lihu[1]))) assert np.allclose((power_derivs_jdem[0] - power_derivs_jdem[2]), power_derivs_lihu[0], rtol=1.e-2, atol=1.e-4 * np.max(np.abs(power_derivs_lihu[0]))) assert np.allclose(power_derivs_jdem[2] * 2 * C_fid_lihu.cosmology['h'], power_derivs_lihu[2], rtol=1.e-2, atol=1.e-4 * np.max(np.abs(power_derivs_lihu[2])))
def get_matter_power(self, zs_in, pmodel='linear', const_pow_mult=1., get_one_loop=False): """get a matter power spectrum P(z) Inputs: pmodel: nonlinear power spectrum model to use, options are 'linear','halofit', and 'fastpt' const_pow_mult: multiplier to adjust sigma8 without creating a whole new power spectrum get_one_loop: If True and pmodel=='fastpt', return the one loop contribution in addition to the nonlinear power spectrum """ if isinstance(zs_in, np.ndarray): zs = zs_in else: zs = np.array([zs_in]) n_z = zs.size G_norms = self.C.G_norm(zs) if self.use_match_grid: w_match_grid = self.w_match_interp(zs) pow_mult_grid = self.growth_match_interp(zs) * const_pow_mult Pbases = np.zeros((self.k.size, n_z)) if self.use_camb_grid: for i in range(0, n_z): Pbases[:, i] = pow_mult_grid[i] * self.camb_w_interp( self.k, w_match_grid[i]).flatten() else: Pbases = np.outer(self.P_lin, pow_mult_grid) else: Pbases = np.outer(self.P_lin, np.full(n_z, 1.)) * const_pow_mult P_nonlin = np.zeros((self.k.size, n_z)) if pmodel == 'linear': P_nonlin = Pbases * G_norms**2 elif pmodel == 'halofit': if self.use_match_grid: for i in range(0, n_z): cosmo_hf_i = self.cosmology.copy() cosmo_hf_i['de_model'] = 'constant_w' cosmo_hf_i['w'] = w_match_grid[i] G_hf = InterpolatedUnivariateSpline(self.C.z_grid, self.wm.growth_interp( w_match_grid[i], self.C.a_grid), ext=2, k=3) hf_C_calc = cp.CosmoPie(cosmo_hf_i, self.C.p_space, silent=True, G_safe=True, G_in=G_hf) hf_C_calc.k = self.k hf_calc = halofit.HalofitPk(hf_C_calc, Pbases[:, i], self.power_params.halofit, self.camb_params['leave_h']) P_nonlin[:, i] = 2. * np.pi**2 * ( hf_calc.D2_NL(self.k, zs[i]).T / self.k**3) else: hf_calc = halofit.HalofitPk(self.C, self.P_lin * const_pow_mult, self.power_params.halofit, self.camb_params['leave_h']) P_nonlin = 2. * np.pi**2 * (hf_calc.D2_NL(self.k, zs).T / self.k**3).T elif pmodel == 'fastpt': if self.use_match_grid: one_loops = np.zeros((self.k.size, n_z)) for i in range(0, n_z): G_i = G_norms[i] one_loops[:, i] = self.fpt.one_loop( Pbases[:, i], C_window=self.power_params.fpt['C_window']) * G_i**4 P_nonlin[:, i] = Pbases[:, i] * G_i**2 + one_loops[:, i] else: one_loops = np.outer( self.fpt.one_loop( self.P_lin, C_window=self.power_params.fpt['C_window']), G_norms**4) P_nonlin = np.outer(self.P_lin, G_norms**2) + one_loops if pmodel == 'fastpt' and get_one_loop: return P_nonlin, one_loops elif get_one_loop: raise ValueError( 'could not get one loop power spectrum for pmodel ' + str(pmodel)) else: return P_nonlin