def xi_to_pk(r, xi, ell=0, extrap=False): r""" Return a callable function returning the power spectrum multipole of degree :math:`\ell`, as computed from the Fourier transform of the input :math:`r` and :math:`\xi_\ell(r)` arrays. This uses the :mod:`mcfit` package perform the FFT. Parameters ---------- r : array_like separation values where ``xi`` is evaluated xi : array_like the array holding the correlation function multipole values ell : int multipole degree of the input correlation function and the output power spectrum; monopole by default extrap : bool, optional whether to extrapolate the power spectrum with a power law; can improve the smoothness of the FFT Returns ------- InterpolatedUnivariateSpline : a spline holding the interpolated power spectrum values """ P = mcfit.xi2P(r, l=ell, lowring=True) kk, Pk = P(xi, extrap=extrap) return InterpolatedUnivariateSpline(kk, Pk)
def _load_p_theta_interp(self, cs2, R): """Compute and return an interpolator for :math:`\left[P\ast \Theta\right](k,R_\mathrm{ex})` where is an exclusion window function. Args: cs2 (float): Squared-speed-of-sound :math:`c_s^2` counterterm in :math:`(h^{-1}\mathrm{Mpc})^2` units. (Unused if non_linear = False) R (float): Smoothing scale in :math:h^{-1}`\mathrm{Mpc}`. (Unused if non_linear = False) Returns: interp1d: Interpolator for :math:`\left[P\ast \Theta\right]` as a function of exclusion radius. This is evaluated for all k values. """ if self.verb: print("Computing interpolation grid for P * Theta convolution") # Define a k grid kk = np.logspace(-4,1,10000) # Define a power spectrum hm2 = HaloModel(self.cosmology, self.mass_function, self.halo_physics, kk, kh_min = self.kh_min) pp = hm2.non_linear_power(cs2, R, self.pt_type, self.pade_resum, self.smooth_density, self.IR_resum) # Transform to real space for convolution r,xi = P2xi(kk,lowring=False)(pp) # Define interpolation grid RR = np.linspace(0,200,1000) # Multiply in real-space and transform xi = np.vstack([xi for _ in range(len(RR))]) xi[r.reshape(1,-1)>RR.reshape(-1,1)]=0. # Interpolate into one dimension and return kk,pp = xi2P(r,lowring=False)(xi) int2d = interp2d(kk,RR,pp) int1d = interp1d(RR,int2d(self.kh_vector,RR).T) return lambda rr: int1d(rr.ravel())
def pip_convert(fname, cumulative=False): ## Given a pip file, get the P(k) multipoles. data = np.loadtxt(fname) s = data[:,0] xi0 = data[:,1] xi2 = data[:,2] xi4 = data[:,3] if cumulative: ## ss = np.logspace(np.log(5.), np.log(150.), num=100, base=np.exp(1)) ss = numpy.logspace(-3, 3, num=60, endpoint=False) A = 1 / (1 + x*x)**1.5 ''' steps = np.diff(np.log(ss)) step = steps[0] ## Interpolate on a grid. Ci0 = interp1d(s, s ** 3. * xi0, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) Ci2 = interp1d(s, s ** 3. * xi2, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) Ci4 = interp1d(s, s ** 3. * xi4, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) Ci0 = np.sum(Ci0(ss)) * step Ci2 = np.sum(Ci2(ss)) * step Ci4 = np.sum(Ci4(ss)) * step result = [] for nu, phase, CC in zip([0, 2, 4], [1., -1., 1.], [Ci0, Ci2, Ci4]): print('Solving for nu: %d' % nu) H1 = mcfit.mcfit(ss, mcfit.kernels.Mellin_SphericalBesselJ(nu - 1, deriv=0), q=0) H2 = mcfit.mcfit(ss, mcfit.kernels.Mellin_SphericalBesselJ(nu, deriv=0), q=0) y1, B1 = H1(ss * CC) y2, B2 = H2(ss * CC / ss) Pl = 4. * np.pi * phase * (-B1 + (nu + 1.0) * B2 / y2) result.append(Pl) ''' return y1, result[0], result[1], result[2] else: ## New logarithmic r binning. rs = np.logspace(np.log10(0.2), np.log10(165.), 45, endpoint=True, base=10.) ## Interpolate on a grid. xi0 = interp1d(s, xi0, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) xi2 = interp1d(s, xi2, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) xi4 = interp1d(s, xi4, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) xi0 = xi0(rs) xi2 = xi2(rs) xi4 = xi4(rs) ## And conversion to Fourier. ks, P0 = xi2P(rs, l=0, lowring=False, deriv=0)(xi0, extrap=False) ks, P2 = xi2P(rs, l=2, lowring=False, deriv=0)(xi2, extrap=False) ks, P4 = xi2P(rs, l=4, lowring=False, deriv=0)(xi4, extrap=False) return ks, P0, P2, P4
Q0 = interp1d(ss, Q0, kind='linear', copy=True, bounds_error=False, fill_value=(Q0[0], Q0[-1]), assume_sorted=False) Q0 = Q0(s) xi *= Q0 plt.loglog(k, linb * linb * Plin(k), c='k', linestyle='--') plt.loglog(k, linb * linb * Pnl(k), c='k') k, P = xi2P(s)(xi) plt.loglog(k, linb * linb * P, alpha=0.5, c='tab:blue') # for marker, weighted in zip(['s', '^'], [0, 1]): output_dir = "/global/homes/m/mjwilson/desi/survey-validation/svdc-spring2020f-onepercent/clustering/pk/" output = os.path.join(output_dir, "oneper_weighted_{:d}.json".format(weighted)) r = ConvolvedFFTPower.load(output) poles = r.poles pl.axhline(r.attrs['shotnoise'], xmin=0.0, xmax=1.0, c='k', alpha=0.3)
## Load linear P(k), lin_pmm.dat data = np.loadtxt('../dat/lin_pmm.dat') ks = data[:,0] P0 = data[:,1] P0 = interp1d(ks, P0, kind='linear', copy=True, bounds_error=False, fill_value=0.0, assume_sorted=False) ## New logarithmic k binning. ks = np.logspace(-3.0, np.log10(3.), 45, endpoint=True, base=10.) P0 = P0(ks) pl.loglog(ks, P0, 'k-') rs, x2 = P2xi(ks, l=2)(P0) ks, P2 = xi2P(rs, l=2)(x2) pl.loglog(ks, P2, 'k^', markersize=3) ## Noise realisations. result = [] upper = 25 print rs[upper] for i in np.arange(3000): noise = np.copy(x2) noise[3:upper] += np.random.uniform(0.0, .2 * np.abs(x2[3:upper]), len(x2[3:upper])) ks, PN = xi2P(rs, l=2)(noise)