def takeCurl(vecStampX, vecStampY, lxMap, lyMap): fX = fft(vecStampX, axes=[-2, -1]) fY = fft(vecStampY, axes=[-2, -1]) return ifft((lxMap * fY - lyMap * fX) * 1j, axes=[-2, -1], normalize=True).real
def TQUtoFourierTEB(T_map, Q_map, U_map, modLMap, angLMap): fT = fft(T_map, axes=[-2, -1]) fQ = fft(Q_map, axes=[-2, -1]) fU = fft(U_map, axes=[-2, -1]) fE = fT.copy() fB = fT.copy() fE[:] = fQ[:] * np.cos(2. * angLMap) + fU * np.sin(2. * angLMap) fB[:] = -fQ[:] * np.sin(2. * angLMap) + fU * np.cos(2. * angLMap) return (fT, fE, fB)
def smooth(data, modLMap, gauss_sigma_arcmin): kMap = fft(data, axes=[-2, -1]) sigma = np.deg2rad(gauss_sigma_arcmin / 60.) beamTemplate = np.nan_to_num(1. / np.exp( (sigma**2.) * (modLMap**2.) / (2.))) kMap[:, :] = np.nan_to_num(kMap[:, :] * beamTemplate[:, :]) return ifft(kMap, axes=[-2, -1], normalize=True).real
def __call__(self, x): xmap = self.dof.unzip(x) res = xmap*0 for info in self.infos: t = [time.time()] work = xmap*info.H t.append(time.time()) umap = info.U.apply(work) t.append(time.time()) fmap = fft.fft(umap+0j, axes=[-2,-1]) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) if info.W is not None: fmap = info.W.apply(fmap) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) umap = fft.ifft(fmap, umap+0j, axes=[-2,-1], normalize=True).real t.append(time.time()) work = enmap.samewcs(info.U.trans(umap, work),work) t.append(time.time()) work *= info.H t.append(time.time()) t = np.array(t) print " %4.2f"*(len(t)-1) % tuple(t[1:]-t[:-1]) res += work res = utils.allreduce(res,comm) return self.dof.zip(res)
def __call__(self, x): xmap = self.dof.unzip(x) res = xmap * 0 for info in self.infos: t = [time.time()] work = xmap * info.H t.append(time.time()) umap = info.U.apply(work) t.append(time.time()) fmap = fft.fft(umap + 0j, axes=[-2, -1]) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) if info.W is not None: fmap = info.W.apply(fmap) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) umap = fft.ifft(fmap, umap + 0j, axes=[-2, -1], normalize=True).real t.append(time.time()) work = enmap.samewcs(info.U.trans(umap, work), work) t.append(time.time()) work *= info.H t.append(time.time()) t = np.array(t) print " %4.2f" * (len(t) - 1) % tuple(t[1:] - t[:-1]) res += work res = utils.allreduce(res, comm) return self.dof.zip(res)
def l(cseed,kseed,returnk=False,index=None): cname = fout_dir+"lensed_covseed_"+str(args.covseed).zfill(3)+"_cmbseed_"+str(cseed).zfill(5)+"_kseed_"+str(kseed).zfill(5)+".hdf" if unlensed: seedroot = (covseed)*Nsets*Nsims lensedt = parray_dat.get_unlensed_cmb(seed=seedroot+cseed,scalar=False) else: lensedt = enmap.read_map(cname)[0] if polsims else enmap.read_map(cname) # -- add beam and noise if you want -- if "noiseless" not in expf_name: assert index is not None if rank==0: print("Adding beam...") flensed = fftfast.fft(lensedt,axes=[-2,-1]) flensed *= parray_dat.lbeam lensedt = fftfast.ifft(flensed,axes=[-2,-1],normalize=True).real if rank==0: print("Adding noise...") seedroot = (covseed+1)*Nsets*Nsims # WARNING: noise sims will be correlated with CMB from the next covseed nseed = seedroot+index noise = parray_dat.get_noise_sim(seed=nseed) if paper: cents, noise1d = lbinner.bin(power(noise)[0]) mpibox.add_to_stats('noisett',noise1d) lensedt += noise lensedt = enmap.ndmap(lensedt,wcs_dat) if returnk: kname = fout_dir+"kappa_covseed_"+str(args.covseed).zfill(3)+"_kseed_"+str(kseed).zfill(5)+".hdf" return lensedt,enmap.read_map(kname) else: return lensedt
def takeGrad(stamp, lyMap, lxMap): f = fft(stamp, axes=[-2, -1]) return ifft(lyMap * f * 1j, axes=[-2, -1], normalize=True).real, ifft(lxMap * f * 1j, axes=[-2, -1], normalize=True).real
def stepFunctionFilterLiteMap(map2d, modLMap, ellMax, ellMin=None): kmap = fft(map2d.copy(), axes=[-2, -1]) kmap[modLMap > ellMax] = 0. if ellMin is not None: kmap[modLMap < ellMin] = 0. retMap = ifft(kmap, axes=[-2, -1], normalize=True).real return retMap
def __init__(self, shape, inspec, scale, corrfun): freqs = np.abs(fft.fftfreq(shape[-2]) * scale) spec_full = utils.interpol(inspec, np.abs(freqs[None])) # Build our 2d noise spectrum. First get the stripy part. ps_stripe = np.tile(1 / spec_full[:, None], [1, shape[-1]]) # Then get our common mode ps_cmode = fft.fft(corrfun * 1j, axes=[-2, -1]) # Scale common mode to have the same DC level as the striping ps_cmode *= ps_stripe[0, 0] / ps_cmode[0, 0] ps_tot = ps_stripe + ps_cmode self.inv_ps = 1 / ps_tot
def apply(self, arr, inplace=False): # Because of our padding and multiplication by the hitcount # before this, we should be safely apodized, and can assume # periodic boundaries if not inplace: arr = np.array(arr) carr = arr.astype(complex) ft = fft.fft(carr, axes=[-2,-1]) ft *= self.inv_ps carr = fft.ifft(ft, carr, axes=[-2,-1], normalize=True) arr = carr.real return arr
def __init__(self, shape, inspec, scale, corrfun): freqs = np.abs(fft.fftfreq(shape[-2]) * scale) spec_full = utils.interpol(inspec, np.abs(freqs[None])) # Build our 2d noise spectrum. First get the stripy part. ps_stripe = np.tile(1/spec_full[:,None], [1,shape[-1]]) # Then get our common mode ps_cmode = fft.fft(corrfun*1j,axes=[-2,-1]) # Scale common mode to have the same DC level as the striping ps_cmode *= ps_stripe[0,0]/ps_cmode[0,0] ps_tot = ps_stripe + ps_cmode self.inv_ps = 1/ps_tot
def apply(self, arr, inplace=False): # Because of our padding and multiplication by the hitcount # before this, we should be safely apodized, and can assume # periodic boundaries if not inplace: arr = np.array(arr) carr = arr.astype(complex) ft = fft.fft(carr, axes=[-2, -1]) ft *= self.inv_ps carr = fft.ifft(ft, carr, axes=[-2, -1], normalize=True) arr = carr.real return arr
def updateTEB_X(self, T2DData, E2DData=None, B2DData=None, alreadyFTed=False): ''' Masking and windowing and apodizing and beam deconvolution has to be done beforehand! Maps must have units corresponding to those of theory Cls and noise power ''' self._hasX = True self.kGradx = {} self.kGrady = {} lx = self.N.lxMap ly = self.N.lyMap if alreadyFTed: self.kT = T2DData else: self.kT = fft(T2DData, axes=[-2, -1]) self.kGradx['T'] = lx * self.kT.copy() * 1j self.kGrady['T'] = ly * self.kT.copy() * 1j if E2DData is not None: if alreadyFTed: self.kE = E2DData else: self.kE = fft(E2DData, axes=[-2, -1]) self.kGradx['E'] = 1.j * lx * self.kE.copy() self.kGrady['E'] = 1.j * ly * self.kE.copy() if B2DData is not None: if alreadyFTed: self.kB = B2DData else: self.kB = fft(B2DData, axes=[-2, -1]) self.kGradx['B'] = 1.j * lx * self.kB.copy() self.kGrady['B'] = 1.j * ly * self.kB.copy()
def resample_fft(d, n, axes=None): """Resample numpy array d via fourier-reshaping. Requires periodic data. n indicates the desired output lengths of the axes that are to be resampled. By the fault the last len(n) axes are resampled, but this can be controlled via the axes argument.""" d = np.asanyarray(d) # Compute output lengths from factors if necessary n = np.atleast_1d(n) if axes is None: axes = np.arange(-len(n), 0) else: axes = np.atleast_1d(axes) if len(n) == 1: n = np.repeat(n, len(axes)) else: assert len(n) == len(axes) assert len(n) <= d.ndim # Nothing to do? if np.all(d.shape[-len(n):] == n): return d # Use the simple version if we can. It has lower memory overhead if d.ndim == 2 and len(n) == 1 and (axes[0] == 1 or axes[0] == -1): return resample_fft_simple(d, n[0]) # Perform the fourier transform fd = fft.fft(d, axes=axes) # Frequencies are 0 1 2 ... N/2 (-N)/2 (-N)/2+1 .. -1 # Ex 0* 1 2* -1 for n=4 and 0* 1 2 -2 -1 for n=5 # To upgrade, insert (n_new-n_old) zeros after n_old/2 # To downgrade, remove (n_old-n_new) values after n_new/2 # The idea is simple, but arbitrary dimensionality makes it # complicated. norm = 1.0 for ax, nnew in zip(axes, n): ax %= d.ndim nold = d.shape[ax] dn = nnew - nold if dn > 0: padvals = np.zeros(fd.shape[:ax] + (dn, ) + fd.shape[ax + 1:], fd.dtype) spre = tuple([slice(None)] * ax + [slice(0, nold // 2)] + [slice(None)] * (fd.ndim - ax - 1)) spost = tuple([slice(None)] * ax + [slice(nold // 2, None)] + [slice(None)] * (fd.ndim - ax - 1)) fd = np.concatenate([fd[spre], padvals, fd[spost]], axis=ax) elif dn < 0: spre = tuple([slice(None)] * ax + [slice(0, nnew // 2)] + [slice(None)] * (fd.ndim - ax - 1)) spost = tuple([slice(None)] * ax + [slice(nnew // 2 - dn, None)] + [slice(None)] * (fd.ndim - ax - 1)) fd = np.concatenate([fd[spre], fd[spost]], axis=ax) norm *= float(nnew) / nold # And transform back res = fft.ifft(fd, axes=axes, normalize=True) del fd res *= norm return res if np.issubdtype(d.dtype, np.complexfloating) else res.real
def updateTEB_Y(self, T2DData=None, E2DData=None, B2DData=None, alreadyFTed=False): assert self._hasX, "Need to initialize gradient first." self._hasY = True self.kHigh = {} if T2DData is not None: if alreadyFTed: self.kHigh['T'] = T2DData else: self.kHigh['T'] = fft(T2DData, axes=[-2, -1]) else: self.kHigh['T'] = self.kT.copy() if E2DData is not None: if alreadyFTed: self.kHigh['E'] = E2DData else: self.kHigh['E'] = fft(E2DData, axes=[-2, -1]) else: try: self.kHigh['E'] = self.kE.copy() except: pass if B2DData is not None: if alreadyFTed: self.kHigh['B'] = B2DData else: self.kHigh['B'] = fft(B2DData, axes=[-2, -1]) else: try: self.kHigh['B'] = self.kB.copy() except: pass
def deconvolveBeam(data, modLMap, beamTemplate, lowPass=None, returnFTOnly=False): kMap = fft(data, axes=[-2, -1]) kMap[:, :] = (kMap[:, :] / beamTemplate[:, :]) if lowPass is not None: kMap[modLMap > lowPass] = 0. if returnFTOnly: return kMap else: return ifft(kMap, axes=[-2, -1], normalize=True).real
def downsample_fft_simple(d, factor=0.5, ngroup=100): """Resample 2d numpy array d via fourier-reshaping along last axis.""" if factor == 1: return d nold = d.shape[1] nnew = int(nold * factor) res = np.zeros([d.shape[0], nnew], dtype=d.dtype) dn = nnew - nold for di in range(0, d.shape[0], ngroup): fd = fft.fft(d[di:di + ngroup]) fd = np.concatenate([fd[:, :nnew / 2], fd[:, nnew / 2 - dn:]], 1) res[di:di + ngroup] = fft.ifft(fd, normalize=True).real del fd res *= factor return res
def filter_map(data2d, filter2d, modLMap, lowPass=None, highPass=None, keep_mean=True): kMap = fft(data2d, axes=[-2, -1]) if keep_mean: mean_val = kMap[modLMap < 1] kMap[:, :] = np.nan_to_num(kMap[:, :] * filter2d[:, :]) if lowPass is not None: kMap[modLMap > lowPass] = 0. if highPass is not None: kMap[modLMap < highPass] = 0. if keep_mean: kMap[modLMap < 1] = mean_val return ifft(kMap, axes=[-2, -1], normalize=True).real
def resample_fft(d, factors=[0.5], axes=None): """Resample numpy array d via fourier-reshaping. Requires periodic data. "factors" indicates the factors by which the axis lengths should be increased. If less factors are specified than the number of axes, the numbers apply to the last N axes, unless the "axes" argument is used to specify which ones.""" if np.allclose(factors, 1): return d factors = np.atleast_1d(factors) assert len(factors) <= d.ndim if axes is None: axes = np.arange(-len(factors), 0) assert len(axes) == len(factors) if d.ndim == 2 and len(factors) == 1 and factors[0] < 1: return downsample_fft_simple(d, factors[0]) fd = fft.fft(d, axes=axes) # Frequencies are 0 1 2 ... N/2 (-N)/2 (-N)/2+1 .. -1 # Ex 0* 1 2* -1 for n=4 and 0* 1 2 -2 -1 for n=5 # To upgrade, insert (n_new-n_old) zeros after n_old/2 # To downgrade, remove (n_old-n_new) values after n_new/2 # The idea is simple, but arbitrary dimensionality makes it # complicated. for ax, factor in zip(axes, factors): ax %= d.ndim nold = d.shape[ax] nnew = int(nold * factor + 0.5) dn = nnew - nold if dn > 0: padvals = np.zeros(fd.shape[:ax] + (dn, ) + fd.shape[ax + 1:]) spre = tuple([slice(None)] * ax + [slice(0, nold / 2)] + [slice(None)] * (fd.ndim - ax - 1)) spost = tuple([slice(None)] * ax + [slice(nold / 2, None)] + [slice(None)] * (fd.ndim - ax - 1)) fd = np.concatenate([fd[spre], padvals, fd[spost]], axis=ax) elif dn < 0: spre = tuple([slice(None)] * ax + [slice(0, nnew / 2)] + [slice(None)] * (fd.ndim - ax - 1)) spost = tuple([slice(None)] * ax + [slice(nnew / 2 - dn, None)] + [slice(None)] * (fd.ndim - ax - 1)) fd = np.concatenate([fd[spre], fd[spost]], axis=ax) # And transform back res = fft.ifft(fd, axes=axes, normalize=True) del fd res *= np.product(factors) return res if np.issubdtype(d.dtype, np.complexfloating) else res.real
def resample_fft_simple(d, n, ngroup=100): """Resample 2d numpy array d via fourier-reshaping along last axis.""" nold = d.shape[1] if n == nold: return d res = np.zeros([d.shape[0], n], dtype=d.dtype) dn = n - nold for di in range(0, d.shape[0], ngroup): fd = fft.fft(d[di:di + ngroup]) if n < nold: fd = np.concatenate([fd[:, :n / 2], fd[:, n / 2 - dn:]], 1) else: fd = np.concatenate([ fd[:, :nold / 2], np.zeros([len(fd), n - nold], fd.dtype), fd[:, nold / 2:] ], -1) res[di:di + ngroup] = fft.ifft(fd, normalize=True).real del fd res *= float(n) / nold return res
def __init__(self, data, beam, dr, prior="uniform", verbose=False): self.data = data # Beam setup self.beam = beam self.beam_pre = utils.interpol_prefilter(beam) self.dr = dr self.pos = self.data.rhs.posmap() self.box = np.sort(self.data.rhs.box(),1) # Noise setup self.hdiv = data.div**0.5 self.ihdiv = self.hdiv.copy() self.ihdiv[self.ihdiv>0] **= -1 # Build fourier-version of correlation self.fcorr = fft.fft(self.data.corr, axes=(-2,-1)) if np.any(self.fcorr==0): raise ValueError("Invalid noise correlations") self.verbose = verbose # Prior self.prior = prior self.i = 0 self.post0 = None self.min_profile = 0.2
def convolve(map, fmap): return fft.ifft(fft.fft(map, axes=(-2,-1))*fmap,axes=(-2,-1), normalize=True).real
def __init__(self, shape, corrfun, ndet): ps = fft.fft(corrfun + 0j, axes=[-2, -1]).real ps *= (ndet - 1) / np.max(ps) self.weight = 1 / (1 + ps)
for i in range(Nstack): unlensed = enmap.rand_map(shape_sim, wcs_sim, ps) lensed = lensing.lens_map_flat_pix(unlensed, alpha_pix, order=lens_order) if noiseless_cmb: measured = lensed else: klteb = enmap.map2harm(lensed) klteb_beam = klteb * kbeam_sim lteb_beam = enmap.ifft(klteb_beam).real noise = enmap.rand_map(shape_sim, wcs_sim, ps_noise, scalar=True) observed = lteb_beam + noise fkmaps = fftfast.fft(observed, axes=[-2, -1]) fkmaps = np.nan_to_num(fkmaps / kbeam_sim) * fMaskCMB_T measured = enmap.samewcs( fftfast.ifft(fkmaps, axes=[-2, -1], normalize=True).real, observed) if i == 0: io.quickPlot2d((measured - lensed), out_dir + "test2.png") grad_phi = grad_phi_true delensed = lensing.delens_map(measured.copy(), grad_phi, nstep=nstep, order=lens_order, mode="spline", border="cyclic") residual = delensed - unlensed res_stack += residual
# uEqualsL=not(cluster)) qest = EstimatorSmooth(shape_dat, wcs_dat, theory, theory, noiseX2dTEB=[nT, nP, nP], noiseY2dTEB=[nT, nP, nP], kBeamX=kbeam_dat, kBeamY=kbeam_dat, doCurl=False, TOnly=not (pol), gradCut=grad_cut, uEqualsL=not (cluster)) fkmaps = fftfast.fft(measured, axes=[-2, -1]) if pol: qest.updateTEB_X(fkmaps[0], fkmaps[1], fkmaps[2], alreadyFTed=True) else: qest.updateTEB_X(fkmaps, alreadyFTed=True) qest.updateTEB_Y() for polcomb in pol_list: print(("Reconstructing", polcomb, " for ", i, " ...")) kappa_recon = enmap.samewcs(qest.getKappa(polcomb).real, measured) if i == 0: io.quickPlot2d(kappa_recon, out_dir + "kappa_recon_single.png") kappa_recon -= kappa_recon.mean() if cluster:
pl.add(cents, lee * cents**2., color="C1", marker="o", ls="none") pl.add(cents, lbb * cents**2., color="C2", marker="o", ls="none") pl.add(cents, ntt * cents**2., color="C0", ls="-.", alpha=0.4) pl.add(cents, nee * cents**2., color="C1", ls="-.", alpha=0.4) pl.add(cents, nbb * cents**2., color="C2", ls="-.", alpha=0.4) pl.add(fine_ells, lcltt * fine_ells**2., color="C0", ls="--") pl.add(fine_ells, lclee * fine_ells**2., color="C1", ls="--") pl.add(fine_ells, lclbb * fine_ells**2., color="C2", ls="--") pl.done(out_dir + "lccomp.png") pl = io.Plotter(scaleX='log') pl.add(cents, lte * cents**2., color="C0", ls="-") pl.add(fine_ells, lclte * fine_ells**2., color="C0", ls="--") pl.done(out_dir + "lccompte.png") fkmaps = fftfast.fft(measured, axes=[-2, -1]) if deconvolve_beam: fkmaps = np.nan_to_num(fkmaps / kbeam_dat) if maxlike and cluster: polcomb = "TT" fkmapsdc = np.nan_to_num(fkmaps / kbeam_dat) maps = enmap.samewcs( fftfast.ifft(fkmapsdc * fMaskCMB_T, normalize=True, axes=[-2, -1]).real, measured) #kappa_model = init_kappa_model k = 0 io.quickPlot2d(maps, out_dir + "map_iter_" + str(k).zfill(3) + ".png") from scipy.integrate import simps Ny, Nx = shape_dat[-2:] pixScaleY, pixScaleX = enmap.pixshape(shape_dat, wcs_dat)
def f(rmap): fk = fftfast.fft(rmap, axes=[-2, -1]) fk = np.nan_to_num(fk) * fMaskCMB_T return enmap.samewcs( fftfast.ifft(fk, axes=[-2, -1], normalize=True).real, rmap)
# olensed = enmap.ndmap(lensed.copy() if abs(pixratio-1.)<1.e-3 else resample.resample_fft(lensed.copy(),shape_dat),wcs_dat) # flensed = fftfast.fft(lensed,axes=[-2,-1]) # flensed *= parray_sim.lbeam # lensed = fftfast.ifft(flensed,axes=[-2,-1],normalize=True).real # if rank==0: print "Adding noise..." # noise = parray_sim.get_noise_sim(seed=index+20000) # lensed += noise # if rank==0: print "Downsampling..." # cmb = lensed if abs(pixratio-1.)<1.e-3 else resample.resample_fft(lensed,shape_dat) # === ADD NOISE AFTER DOWNSAMPLE if rank == 0: print "Beam convolving..." olensed = enmap.ndmap( lensed.copy() if abs(pixratio - 1.) < 1.e-3 else resample.resample_fft( lensed.copy(), shape_dat), wcs_dat) flensed = fftfast.fft(olensed, axes=[-2, -1]) flensed *= parray_dat.lbeam lensed = fftfast.ifft(flensed, axes=[-2, -1], normalize=True).real if rank == 0: print "Adding noise..." noise = parray_dat.get_noise_sim(seed=index + 20000) lcents, noise1d = lbinner_dat.bin(fmaps.get_simple_power_enmap(noise)) mpibox.add_to_stats('noisett', noise1d) lensed += noise if rank == 0: print "Downsampling..." cmb = lensed cmb = enmap.ndmap(cmb, wcs_dat) if rank == 0: print "Calculating powers for diagnostics..." utt2d = fmaps.get_simple_power_enmap(
from __future__ import print_function import numpy as np from sympy import Symbol, Function import sympy from enlib import fft as efft, enmap, bench from orphics import maps, io, stats, cosmology, lensing import os, sys """ Routines to reduce and evaluate symbolic mode coupling integrals """ ifft = lambda x: efft.ifft(x, axes=[-2, -1], normalize=True) fft = lambda x: efft.fft(x, axes=[-2, -1]) def factorize_2d_convolution_integral(expr, l1funcs=None, l2funcs=None, groups=None, validate=True): """Reduce a sympy expression of variables l1x,l1y,l2x,l2y,l1,l2 into a sum of products of factors that depend only on vec(l1) and vec(l2) and neither, each. If the expression appeared as the integrand in an integral over vec(l1), where vec(l2) = vec(L) - vec(l1) then this reduction allows one to evaluate the integral as a function of vec(L) using FFTs instead of as a convolution. """ # Generic message if validation fails val_fail_message = "Validation failed. This expression is likely not reducible to FFT form." # Get the 2D convolution cartesian variables l1x, l1y, l2x, l2y, l1, l2 = get_ells()
s,logdet = np.linalg.slogdet(cov) print((k,M,s,logdet,np.linalg.cond(cov))) assert s>0 Ms.append( M ) logdets.append( logdet ) cinvs.append( pinv2(cov) ) mrange = np.array(Ms) for i in range(N): lnlikes = [] cmb_map = pa.get_unlensed_cmb(seed=2*i+100000000) lensed = lensing.lens_map_flat_pix(cmb_map, alpha_pix,order=lens_order) if np.abs(M)>1.e-3 else cmb_map flensed = fftfast.fft(lensed,axes=[-2,-1]) flensed *= pa.lbeam lensed = fftfast.ifft(flensed,axes=[-2,-1],normalize=True).real noise = pa.get_noise_sim(seed=2*i+1+100000000) measured = lensed + noise for k,M in enumerate(mrange): logdet = logdets[k] cinv = cinvs[k] lnlikeval = lnlike(logdet,cinv,measured) lnlikes.append(lnlikeval)
rhs, U) corrfun = calc_cmode_corrfun(U.ushape, U.uwcs, offset_upos, corrfun_smoothing) W = WeightMat(U.ushape, corrfun, 4) #ndet) else: W = None # The H in our equation is related to the hitcount, but isn't exactly it. # normalize_hits approximates it using the hitcounts. H = normalize_hits(hits) # Apply weight to rhs if W is not None: iH = 1 / np.maximum(H, np.max(H) * 1e-2) urhs = U.apply(rhs * iH) ft = fft.fft(urhs + 0j, axes=[-2, -1]) ft = W.apply(ft) urhs = fft.ifft(ft, urhs + 0j, axes=[-2, -1], normalize=True).real rhs = U.trans(urhs, rhs) * H if rhs_tot is None: rhs_tot = rhs else: rhs_tot += rhs infos.append( bunch.Bunch(U=U, N=N, H=H, W=W, pattern=pattern, site=site, srate=srate,
def TQUtoPureTEB(T_map,Q_map,U_map,modLMap,angLMap,windowDict,method='pure'): window = windowDict win =window['Win'] dWin_dx=window['dWin_dx'] dWin_dy=window['dWin_dy'] d2Win_dx2=window['d2Win_dx2'] d2Win_dy2=window['d2Win_dy2'] d2Win_dxdy=window['d2Win_dxdy'] T_temp=T_map.copy()*win fT=fft(T_temp,axes=[-2,-1]) Q_temp=Q_map.copy()*win fQ=fft(Q_temp,axes=[-2,-1]) U_temp=U_map.copy()*win fU=fft(U_temp,axes=[-2,-1]) fE=fT.copy() fB=fT.copy() fE=fQ[:]*np.cos(2.*angLMap)+fU[:]*np.sin(2.*angLMap) fB=-fQ[:]*np.sin(2.*angLMap)+fU[:]*np.cos(2.*angLMap) if method=='standard': return fT, fE, fB Q_temp=Q_map.copy()*dWin_dx QWx=fft(Q_temp,axes=[-2,-1]) Q_temp=Q_map.copy()*dWin_dy QWy=fft(Q_temp,axes=[-2,-1]) U_temp=U_map.copy()*dWin_dx UWx=fft(U_temp,axes=[-2,-1]) U_temp=U_map.copy()*dWin_dy UWy=fft(U_temp,axes=[-2,-1]) U_temp=2.*Q_map*d2Win_dxdy-U_map*(d2Win_dx2-d2Win_dy2) QU_B=fft(U_temp,axes=[-2,-1]) U_temp=-Q_map*(d2Win_dx2-d2Win_dy2)-2.*U_map*d2Win_dxdy QU_E=fft(U_temp,axes=[-2,-1]) modLMap=modLMap+2 fB[:] += QU_B[:]*(1./modLMap)**2 fB[:]-= (2.*1j)/modLMap*(np.sin(angLMap)*(QWx[:]+UWy[:])+np.cos(angLMap)*(QWy[:]-UWx[:])) if method=='hybrid': return fT, fE, fB fE[:]+= QU_E[:]*(1./modLMap)**2 fE[:]-= (2.*1j)/modLMap*(np.sin(angLMap)*(QWy[:]-UWx[:])-np.cos(angLMap)*(QWx[:]+UWy[:])) if method=='pure': return fT, fE, fB
# Set up the weight matrix W if args.cmode > 0: offset_upos = calc_offset_upos(pattern, offset_array, offset_det, site, rhs, U) corrfun = calc_cmode_corrfun(U.ushape, U.uwcs, offset_upos, corrfun_smoothing) W = WeightMat(U.ushape, corrfun, 4)#ndet) else: W = None # The H in our equation is related to the hitcount, but isn't exactly it. # normalize_hits approximates it using the hitcounts. H = normalize_hits(hits) # Apply weight to rhs if W is not None: iH = 1/np.maximum(H,np.max(H)*1e-2) urhs= U.apply(rhs*iH) ft = fft.fft(urhs+0j, axes=[-2,-1]) ft = W.apply(ft) urhs= fft.ifft(ft, urhs+0j, axes=[-2,-1], normalize=True).real rhs = U.trans(urhs, rhs)*H if rhs_tot is None: rhs_tot = rhs else: rhs_tot += rhs infos.append(bunch.Bunch(U=U,N=N,H=H,W=W,pattern=pattern,site=site,srate=srate,scale=scale,speed=speed)) rhs = utils.allreduce(rhs_tot, comm) #info = infos[0] #foo = rhs*info.H #enmap.write_map("test1.fits", foo) #bar = enmap.samewcs(info.U.apply(foo),foo)
def __init__(self, shape, corrfun, ndet): ps = fft.fft(corrfun+0j,axes=[-2,-1]).real ps *= (ndet-1)/np.max(ps) self.weight = 1/(1+ps)
fmaskY2dTEB=[fMaskCMB] * 3, fmaskKappa=fMask, doCurl=False, TOnly=True, halo=True, gradCut=gradCut, verbose=False, loadPickledNormAndFilters=None, savePickledNormAndFilters=None) lensedMapX = stamp.copy() * win lensedMapY = stamp.copy() * win try: fotX = np.nan_to_num( fft(lensedMapX, axes=[-2, -1]) / beamTemplate[:, :]) except: print(("skipping ", i, ra, dec)) i -= 1 continue fotY = np.nan_to_num(fft(lensedMapY, axes=[-2, -1]) / beamTemplate[:, :]) if i % 10 == 0: print(("Reconstructing", i, " ...")) qest.updateTEB_X(fotX, alreadyFTed=True) qest.updateTEB_Y(fotY, alreadyFTed=True) kappa = qest.getKappa(polCombList[0]).real / w2 kappaStack += kappa N = i kappaStack /= N
def kfilter_map(imap, kfilter): return np.real( ifft(fft(imap, axes=[-2, -1]) * kfilter, axes=[-2, -1], normalize=True))