def cov_g_diag(self, qs, ns_in, rcs=None): """Get diagonal elements of gaussian covariance between observables inputs: qs: a list of 4 QWeight objects [q1,q2,q3,q4] ns_in: an input list of shape noise [n13,n24,n14,n23] [0.,0.,0.,0.] if no noise rcs: correlation parameters, [r13,r24,r14,r23]. Optional. """ if rcs is None: rcs = np.full(4, 1.) ns = np.zeros(ns_in.size) ns[0] = ns_in[0] / trapz2( (((self.zs >= qs[0].z_min) & (self.zs <= qs[0].z_max)) * self.ps), self.rs) ns[1] = ns_in[1] / trapz2( (((self.zs >= qs[0].z_min) & (self.zs <= qs[0].z_max)) * self.ps), self.rs) ns[2] = ns_in[2] / trapz2( (((self.zs >= qs[1].z_min) & (self.zs <= qs[1].z_max)) * self.ps), self.rs) ns[3] = ns_in[3] / trapz2( (((self.zs >= qs[1].z_min) & (self.zs <= qs[1].z_max)) * self.ps), self.rs) #could exploit/cache symmetries to reduce time c_ac = Cll_q_q(self, qs[0], qs[2], rcs[0]).Cll() c_bd = Cll_q_q(self, qs[1], qs[3], rcs[1]).Cll() c_ad = Cll_q_q(self, qs[0], qs[3], rcs[2]).Cll() c_bc = Cll_q_q(self, qs[1], qs[2], rcs[3]).Cll() cov_diag = 1. / (self.f_sky * (2. * self.l_mids + 1.) * self.delta_ls) * ( (c_ac + ns[0]) * (c_bd + ns[2]) + (c_ad + ns[1]) * (c_bc + ns[3])) #(C13*C24+ C13*N24+N13*C24 + C14*C23+C14*N23+N14*C23+N13*N24+N14*N23) return cov_diag
def Cll(self, z_min=0., z_max=np.inf): """get lensing power spectrum integrated in a specified range""" if z_min == 0. and z_max == np.inf: return trapz2(self.integrand, self.rs) else: mask = ((self.zs <= z_max) & (self.zs >= z_min)) return trapz2((mask * self.integrand.T).T, self.rs)
def bias_n_avg(self, min_mass, z): """ get average b(z)n(z) for Sheth Tormen mass function min_mass can be a function of z or a constant if min_mass is a function of z it should be an array of the same size as z z can be scalar or an array""" if isinstance(z, np.ndarray): if isinstance(min_mass, np.ndarray) and not min_mass.size == z.size: raise ValueError('min_mass and z must be the same size') if np.any(min_mass < self.mass_grid[0]): raise ValueError( 'specified minimum mass too low, increase log10_min_mass') G = self.Growth(z) if isinstance(min_mass, np.ndarray) and isinstance(z, np.ndarray): result = np.zeros(min_mass.size) for i in range(0, min_mass.size): mass = _mass_cut(self.mass_grid, min_mass[i]) mf_i = self.dndM_G(mass, G[i]) b_i = self.bias_G(mass, G[i]) result[i] = trapz2(mf_i * b_i, mass) else: if isinstance(min_mass, np.ndarray): mf = self.dndM_G(self.mass_grid, G) b_array = self.bias_G(self.mass_grid, G) mf_b = mf * b_array mf_b_int = -cumtrapz( mf_b[::-1], self.mass_grid[::-1], initial=0.)[::-1] if np.array_equal(min_mass, self.mass_grid): #no need to extrapolate if already is result result = mf_b_int else: cut_itrs = np.zeros(min_mass.size, dtype=np.int) for i in range(0, min_mass.size): cut_itrs[i] = np.argmax(self.mass_grid >= min_mass[i]) dm = self.mass_grid[cut_itrs] - min_mass mf_b_ext = self.dndM_G(min_mass, G) * self.bias_G( min_mass, G) + mf_b[cut_itrs] result = mf_b_int[cut_itrs] + (mf_b_ext) * dm / 2. else: mass = _mass_cut(self.mass_grid, min_mass) mf = self.dndM_G(mass, G) b_array = self.bias_G(mass, G) result = trapz2(b_array * mf, mass) if DEBUG: assert np.all(result >= 0.) return result
def get_gaussian_smoothed_dN_dz(z_grid, zs_chosen, params, normalize): """ apply gaussian smoothing width smooth_sigma to get number density over z_grid with galaxies at locations specified by zs_chosen, mirror boundary at z=0 if mirror_boundary=True, if normalize=True then normalize so density integrates to total number of galaxies (in limit as maximum z_grid is much larger than maximum zs_chosen normalizing should have no effect) if suppress=True cut off z below z_cut, to avoid numerical issues elsewhere""" dN_dz = np.zeros(z_grid.size) sigma = params['smooth_sigma'] delta_dist = np.zeros(z_grid.size) for itr in range(0, zs_chosen.size): delta_dist[np.argmax(z_grid >= zs_chosen[itr])] += 1. #assume z_grid uniform, in case first bin is set to something else to avoid going to 0. dz = (z_grid[2] - z_grid[1]) delta_dist = delta_dist / dz if params['mirror_boundary']: dN_dz = gaussian_filter1d(delta_dist, sigma / dz, truncate=params['n_right_extend'], mode='mirror') else: dN_dz = gaussian_filter1d(delta_dist, sigma / dz, truncate=params['n_right_extend'], mode='constant') if normalize: dN_dz = zs_chosen.size * dN_dz / (au.trapz2(dN_dz, z_grid)) dN_dz = dN_dz / params['area_sterad'] return dN_dz
def test_trapz2_no_dx(trapz_test): """test algebra_utils reimplementation of trapz with array of dxs""" integrand = trapz_test.integrand atol_use = atol_rel_use*np.max(integrand) integrated1 = np.trapz(integrand,axis=0) integrated2 = trapz2(integrand) assert np.allclose(integrated1,integrated2,atol=atol_use,rtol=rtol_use)
def test_trapz2_constant_dx(trapz_test): """test algebra_utils reimplementation of trapz with array of dxs""" print(trapz_test.key) xs = trapz_test.xs integrand = trapz_test.integrand atol_use = atol_rel_use*np.max(integrand) dx = np.average(np.diff(xs,axis=0)) integrated1 = np.trapz(integrand,dx=dx,axis=0) integrated2 = trapz2(integrand,dx=dx) assert np.allclose(integrated1,integrated2,atol=atol_use,rtol=rtol_use)
def _gs(sp,z_min=0.,z_max=np.inf): """helper function for QShear""" g_vals = np.zeros(sp.n_z) low_mask = (sp.zs>=z_min)*1. #so only integrate from max(z,z_min) high_mask = (sp.zs<=z_max)*1. #so only integrate to min(z,z_max) ps_mask = sp.ps*high_mask*low_mask ps_norm = ps_mask/trapz2(ps_mask,sp.rs) #norm1 = trapz2(ps_mask,sp.rs) #norm2 = trapz2(ps_mask[(sp.zs>=z_min) & (sp.zs<=z_max)],sp.rs[(sp.zs>=z_min) & (sp.zs<=z_max)]) if sp.C.Omegak==0.0: g_vals = -cumtrapz(ps_norm[::-1],sp.rs[::-1],initial=0.)[::-1]+sp.rs*cumtrapz((ps_norm/sp.rs)[::-1],sp.rs[::-1],initial=0.)[::-1] else: for i in range(0,sp.n_z): if z_max<sp.zs[i]: break if sp.C.Omegak>0.0: sqrtK = np.sqrt(sp.C.K) g_vals[i] = trapz2(ps_norm[i:sp.n_z]*sp.rs[i]*1./sqrtK*(1./np.tan(sqrtK*sp.rs[i])-1./np.tan(sqrtK*sp.rs[i:sp.n_z])),sp.rs[i:sp.n_z]) else: sqrtK = np.sqrt(abs(sp.C.K)) g_vals[i] = trapz2(ps_norm[i:sp.n_z]*sp.rs[i]*1./sqrtK*(1./np.tanh(sqrtK*sp.rs[i])-1./np.tanh(sqrtK*sp.rs[i:sp.n_z])),sp.rs[i:sp.n_z]) return g_vals
def f_norm(self, G): """ get normalization factor to ensure all mass is in a halo accepts either a numpy array or a scalar input for G""" if isinstance(G, np.ndarray): nu = np.outer(self.nu_array, 1. / G**2) # sigma_inv = np.outer(1./self.sigma,1./G**2) else: nu = self.nu_array / G**2 # sigma_inv = 1./self.sigma*1./G**2 #f = self.A*np.sqrt(2.*self.a/np.pi)*(1.+(1./self.a/nu)**self.p)*np.sqrt(nu)*np.exp(-self.a*nu/2.) f = self.f_nu(nu) #norm = np.trapz(f,np.log(sigma_inv),axis=0) norm = trapz2(f, np.log(1. / self.sigma)) return norm
def get_N_projected(self, z_fine, omega_tot): """get total number of galaxies in area omega_tot steradians""" return au.trapz2(self.dN_dz_interp(z_fine), z_fine) * omega_tot
def __init__(self, geos, params, survey_id, C, basis, nz_params, mf_params): """ inputs: geos: a numpy array of Geo objects the [geo1,geo2], where geo2 will be used for mitigation of covariance in geo1 geometry params: a dict of parameters, nz_select: 'CANDELS','WFIRST','LSST' to use for NZMatcher survey_id: an id for the associated LWSurvey C: as CosmoPie object nz_params: parameters needed by NZMatcher object mf_params: params needed by ST_hmf """ print("Dn: initializing") LWObservable.__init__(self, geos, params, survey_id, C) self.fisher_type = False self.basis = basis self.nz_select = params['nz_select'] self.nz_params = nz_params #self.geo2 should be area in mitigation survey but not in original survey #assume overlap is total and use AlmDifferenceGeo to avoid complications with calculating intersecions self.geo2 = AlmDifferenceGeo(geos[1], geos[0], geos[1].C, geos[1].zs, geos[1].z_fine) #self.geo1 should be intersect of mitigation survey and original survey self.geo1 = geos[0] # if isinstance(geos[0],PolygonGeo): # if isinstance(geos[1],PolygonGeo): # self.geo2 = PolygonUnionGeo(np.array([geos[1]]),np.array([geos[0]]),zs=geos[1].zs,z_fine=geos[1].z_fine) # elif isinstance(geos[1],PolygonUnionGeo): # self.geo2 = PolygonUnionGeo(geos[1].geos,np.append(geos[0],geos[1].masks),zs=geos[1].zs,z_fine=geos[1].z_fine) # else: # raise ValueError('unsupported type for geo2') # elif isinstance(self.geos[0],PixelGeo): ## if isinstance(geos[1],PolygonPixelUnionGeo): ## self.geo2 = PolygonPixelUnionGeo(geos[1].geos,np.append(geos[0],geos[1].masks),zs=geos[1].zs,z_fine=geos[1].z_fine) # if isinstance(geos[1],PixelGeo): # self.geo2 = PolygonPixelUnionGeo(np.array([geos[1]]),np.array([geos[0]]),zs=geos[1].zs,z_fine=geos[1].z_fine) # else: # raise ValueError('unsupported type for geo2') # #if isinstance(geos[1],PolygonPixelGeo): # # self.geo2 = PolygonPixelUnionGeo(np.array([geos[1]]),np.array([geos[0]])) # #else: # # raise ValueError('unsupported type for geo2') # elif isinstance(self.geos[0],HalfSkyGeo) and isinstance(self.geos[1],FullSkyGeo): # #self.geo2 = HalfSkyGeo(geos[1].zs,geos[1].C,geos[1].z_fine,not geos[0].top) # self.geo2 = AlmDifferenceGeo(geos[1],geos[0],geos[1].C,geos[1].zs,geos[1].z_fine) # else: # raise ValueError('unsupported type for geo1') #TODO add ability to check overlap # if np.isclose(geos[0].get_overlap_fraction(geos[1]),1.): # self.geo1 = geos[0] # else: # raise RuntimeError('partial overlap not yet implemented') # if isinstance(geos[0],PolygonGeo): # raise RuntimeError('partial overlap not yet implemented') # # self.geo1 = PolygonUnionGeo(np.array([geos[0]]),np.array([self.geo2])) # elif isinstance(self.geos[0],PolygonPixelGeo) or isinstance(self.geos[0],PolygonPixelUnionGeo): # self.geo1 = PolygonPixelUnionGeo(np.array([geos[0]]),np.array([self.geo2])) # else: # raise ValueError('unrecognized type for geo1') #should be using r bin structure of mitigation survey self.r_fine = self.geo2.r_fine self.z_fine = self.geo2.z_fine assert np.all(self.r_fine == geos[1].r_fine) assert np.all(self.z_fine == geos[1].z_fine) assert np.all(geos[0].z_fine == geos[1].z_fine) assert np.all(geos[0].r_fine == geos[1].r_fine) dz = self.z_fine[2] - self.z_fine[1] print(mf_params) self.mf = ST_hmf(self.C, params=mf_params) if self.nz_select == 'CANDELS': self.nzc = NZCandel(self.nz_params) elif self.nz_select == 'WFIRST': self.nzc = NZWFirst(self.nz_params) elif self.nz_select == 'LSST': self.nzc = NZLSST(self.z_fine, self.nz_params) elif self.nz_select == 'constant': self.nzc = NZConstant(self.geo2, self.nz_params) else: raise ValueError('unrecognized nz_select ' + str(self.nz_select)) self.n_bins = self.geo2.fine_indices.shape[0] self.n_avgs = self.nzc.get_nz(self.geo2) self.dNdzs = self.n_avgs / self.geo2.dzdr * self.r_fine**2 self.M_cuts = self.nzc.get_M_cut(self.mf, self.geo2) self.dn_ddelta_bar = self.mf.bias_n_avg(self.M_cuts, self.z_fine) / C.h**3 self.integrand = np.expand_dims(self.dn_ddelta_bar * self.r_fine**2, axis=1) #note effect of mitigation converged to ~0.3% if cut off integral at for z>1.5, 10% for z>0.6,20% for z>0.5 self.r_vols = 3. / np.diff(self.geo2.rbins**3) self.n_avg_integrand = self.r_fine**2 * self.n_avgs self.Nab_i = np.zeros(self.n_bins) self.vs = np.zeros((self.n_bins, basis.get_size())) self.b_ns = np.zeros(self.n_bins) self.n_avg_bin = np.zeros(self.n_bins) self.bias = self.dn_ddelta_bar / self.n_avgs self.sigma0 = params['sigma0'] self.z_extra = np.hstack([ self.z_fine, np.arange( self.z_fine[-1] + dz, self.z_fine[-1] + params['n_extend'] * self.sigma0 * (1. + self.z_fine[-1]), dz) ]) self.integrands_smooth = np.zeros((self.z_fine.size, self.n_bins)) self.V1s = np.diff(self.geo2.rs**3) / 3. * self.geo1.angular_area() self.V2s = self.geo2.volumes assert np.all(self.V1s >= 0.) assert np.all(self.V2s >= 0.) #NOTE this whole loop could be pulled apart with a small change in sph_klim for itr in range(0, self.n_bins): bounds1 = self.geo2.fine_indices[itr] range1 = np.arange(bounds1[0], bounds1[1]) print("Dn: getting d1,d2") #multiplier for integrand n_avg = self.r_vols[itr] * trapz2(self.n_avg_integrand[range1], self.r_fine[range1]) self.n_avg_bin[itr] = n_avg assert n_avg >= 0. if self.V1s[itr] == 0. or self.V2s[itr] == 0.: continue elif n_avg == 0.: warn( 'Dn: variance had a value which was exactly 0; fixing inverse to np.inf ' + str(itr)) self.Nab_i[itr] = np.inf else: self.b_ns[itr] = self.r_vols[itr] * trapz2( self.integrand[range1], self.r_fine[range1]) #need a bit extra z so does not reflect back from right boundary dN_wind = np.zeros(self.z_extra.size) dN_wind[range1] = self.dNdzs[range1] sigma = self.sigma0 * (1. + self.geo2.zs[itr]) / ( self.z_fine[2] - self.z_fine[1]) dN_smooth = gaussian_filter1d(dN_wind, sigma, mode='mirror', truncate=10.) #must cut off at z_max by construction of basis decomposition dN_smooth = dN_smooth[0:self.z_fine.size] print( "tot acc", np.trapz(dN_smooth, self.z_fine) / np.trapz(dN_wind, self.z_extra)) print( "outside", np.trapz(dN_smooth[range1], self.z_fine[range1]) / np.trapz(dN_wind, self.z_extra)) n_smooth = dN_smooth / self.r_fine**2 * self.geo2.dzdr bn_smooth = n_smooth * self.bias integrand_smooth = np.expand_dims(bn_smooth * self.r_fine**2, axis=1) self.integrands_smooth[:, itr] = integrand_smooth[:, 0] d1 = self.basis.get_dO_I_ddelta_alpha(self.geo1, integrand_smooth) d2 = self.basis.get_dO_I_ddelta_alpha(self.geo2, integrand_smooth) DO_a = (d2 - d1) * self.r_vols[itr] Nab_itr = n_avg * (1. / self.V1s[itr] + 1. / self.V2s[itr]) self.Nab_i[itr] = 1. / Nab_itr self.vs[itr] = DO_a.flatten()
def _dz_to_dchi(p_in,zs,rs,C): """put a z distribution into a distribution in comoving distance""" ps = p_in*C.Ez(zs)/C.DH ps = ps/trapz2(ps,rs) #normalize galaxy probability distribution return ps