Exemple #1
0
def smooth(tod, srate, fknee=10, alpha=10):
    ft = fft.rfft(tod)
    freq = fft.rfftfreq(tod.shape[-1]) * srate
    flt = 1 / (1 + (freq / fknee)**alpha)
    ft *= flt
    fft.ifft(ft, tod, normalize=True)
    return tod
Exemple #2
0
def calc_cmode_corrfun(ushape, uwcs, offset_upos, sigma, nsigma=10):
	"""Compute the real-space correlation function for the atmospheric
	common mode in unskewed coordinates. The result has an arbitrary
	overall scaling."""
	res    = enmap.zeros(ushape, uwcs)
	# Generate corrfun around center of map
	upos   = offset_upos + np.mean(res.box(),0)[:,None]
	# We will work on a smaller cutout to speed things up
	pad    = sigma*nsigma
	box    = np.array([np.min(upos,1)-pad,np.max(upos,1)+pad])
	pixbox = res.sky2pix(box.T).T
	work   = res[pixbox[0,0]:pixbox[1,0],pixbox[0,1]:pixbox[1,1]]
	posmap = work.posmap()
	# Generate each part of the corrfun as a gaussian in real space.
	# Could do this in fourier space, but easier to get subpixel precision this way
	# (not that that is very important, though)
	for p in upos.T:
		r2    = np.sum((posmap-p[:,None,None])**2,0)
		work += np.exp(-0.5*r2/sigma**2)
	# Convolute with itself mirrored to get the actual correlation function
	fres  = fft.rfft(res, axes=[-2,-1])
	fres *= np.conj(fres)
	fft.ifft(fres, res, axes=[-2,-1])
	res /= np.max(res)
	return res
Exemple #3
0
def smooth(tod, srate):
	ft   = fft.rfft(tod)
	freq = fft.rfftfreq(tod.shape[-1])*srate
	flt  = 1/(1+(freq/model_fknee)**model_alpha)
	ft  *= flt
	fft.ifft(ft, tod, normalize=True)
	return tod
Exemple #4
0
def calc_cmode_corrfun(ushape, uwcs, offset_upos, sigma, nsigma=10):
    """Compute the real-space correlation function for the atmospheric
	common mode in unskewed coordinates. The result has an arbitrary
	overall scaling."""
    res = enmap.zeros(ushape, uwcs)
    # Generate corrfun around center of map
    upos = offset_upos + np.mean(res.box(), 0)[:, None]
    # We will work on a smaller cutout to speed things up
    pad = sigma * nsigma
    box = np.array([np.min(upos, 1) - pad, np.max(upos, 1) + pad])
    pixbox = res.sky2pix(box.T).T
    work = res[pixbox[0, 0]:pixbox[1, 0], pixbox[0, 1]:pixbox[1, 1]]
    posmap = work.posmap()
    # Generate each part of the corrfun as a gaussian in real space.
    # Could do this in fourier space, but easier to get subpixel precision this way
    # (not that that is very important, though)
    for p in upos.T:
        r2 = np.sum((posmap - p[:, None, None])**2, 0)
        work += np.exp(-0.5 * r2 / sigma**2)
    # Convolute with itself mirrored to get the actual correlation function
    fres = fft.rfft(res, axes=[-2, -1])
    fres *= np.conj(fres)
    fft.ifft(fres, res, axes=[-2, -1])
    res /= np.max(res)
    return res
Exemple #5
0
def smooth(tod, srate, fknee=10, alpha=10):
	ft   = fft.rfft(tod)
	freq = fft.rfftfreq(tod.shape[-1])*srate
	flt  = 1/(1+(freq/fknee)**alpha)
	ft  *= flt
	fft.ifft(ft, tod, normalize=True)
	return tod
Exemple #6
0
def smooth(tod, srate):
    ft = fft.rfft(tod)
    freq = fft.rfftfreq(tod.shape[-1]) * srate
    flt = 1 / (1 + (freq / model_fknee)**model_alpha)
    ft *= flt
    fft.ifft(ft, tod, normalize=True)
    return tod
Exemple #7
0
 def __call__(self, poss, amps, taus, dets=None):
     if dets is None: dets = np.arange(len(self.rdata.dets))
     rmask = np.in1d(self.rdata.detmap, dets)
     if np.sum(rmask) == 0:
         return np.zeros([0, self.rdata.pos.shape[1]], float)
     rpos = self.rdata.pos[rmask]
     mask = self.mask[rmask]
     detinds = build_detinds(self.rdata.detmap[rmask], dets)
     # Evaluate the plain beam
     r = np.sum((rpos - poss[detinds][:, None])**2, -1)**0.5
     bpix = r / self.dr
     model = utils.interpol(self.rdata.beam[1],
                            bpix[None],
                            mask_nan=False,
                            order=1)
     # Must mask invalid regions *before* fourier stuff
     model *= mask
     # Apply the butterworth filter and time constants
     fmodel = fft.rfft(model)
     tfilters = filters.tconst_filter(self.rdata.freqs[None],
                                      taus[:, None]) * self.rdata.butter
     fmodel *= tfilters[detinds]
     fft.ifft(fmodel, model, normalize=True)
     # Apply the amplitudes
     model *= amps[detinds, None]
     return model
Exemple #8
0
def broaden_beam_hor(tod, d, ibeam, obeam):
	ft    = fft.rfft(tod)
	k     = 2*np.pi*fft.rfftfreq(d.nsamp, 1/d.srate)
	el    = np.mean(d.boresight[2,::100])
	skyspeed = d.speed*np.cos(el)
	sigma = (obeam**2-ibeam**2)**0.5
	ft *= np.exp(-0.5*(sigma/skyspeed)**2*k**2)
	fft.ifft(ft, tod, normalize=True)
Exemple #9
0
def estimate_atmosphere(tod, region_cut, srate, fknee, alpha):
    model = gapfill.gapfill_joneig(tod, region_cut, inplace=False)
    ft = fft.rfft(model)
    freq = fft.rfftfreq(model.shape[-1]) * srate
    flt = 1 / (1 + (freq / fknee)**alpha)
    ft *= flt
    fft.ifft(ft, model, normalize=True)
    return model
Exemple #10
0
def broaden_beam_hor(tod, d, ibeam, obeam):
    ft = fft.rfft(tod)
    k = 2 * np.pi * fft.rfftfreq(d.nsamp, 1 / d.srate)
    el = np.mean(d.boresight[2, ::100])
    skyspeed = d.speed * np.cos(el)
    sigma = (obeam**2 - ibeam**2)**0.5
    ft *= np.exp(-0.5 * (sigma / skyspeed)**2 * k**2)
    fft.ifft(ft, tod, normalize=True)
Exemple #11
0
def takeGrad(stamp, lyMap, lxMap):

    f = fft(stamp, axes=[-2, -1])

    return ifft(lyMap * f * 1j, axes=[-2, -1],
                normalize=True).real, ifft(lxMap * f * 1j,
                                           axes=[-2, -1],
                                           normalize=True).real
Exemple #12
0
def estimate_atmosphere(tod, region_cut, srate, fknee, alpha):
	model = gapfill.gapfill_joneig(tod, region_cut, inplace=False)
	ft   = fft.rfft(model)
	freq = fft.rfftfreq(model.shape[-1])*srate
	flt  = 1/(1+(freq/fknee)**alpha)
	ft  *= flt
	fft.ifft(ft, model, normalize=True)
	return model
Exemple #13
0
def deproject_vecs_smooth(tods, dark, nmode=50, cuts=None, deslope=True, inplace=True):
	if not inplace: tods=tods.copy()
	dark = dark.copy()
	ftod  = fft.rfft(tods)
	fdark = fft.rfft(dark)
	fdark = todops.smooth_basis_fourier(ftod, fdark)
	smooth= np.zeros((fdark.shape[0],dark.shape[1]),dtype=dark.dtype)
	fft.ifft(fdark, smooth, normalize=True)
	todops.fit_basis(tods, smooth, highpass=nmode, cuts=cuts, clean_tod=True)
	if deslope: utils.deslope(tods, w=8, inplace=True)
Exemple #14
0
	def __call__(self, x):
		xmap = self.dof.unzip(x)
		res  = xmap*0
		for info in self.infos:
			t  = [time.time()]
			work  = xmap*info.H
			t.append(time.time())
			umap  = info.U.apply(work)
			t.append(time.time())
			fmap  = fft.fft(umap+0j, axes=[-2,-1])
			t.append(time.time())
			fmap  = info.N.apply(fmap, exp=0.5)
			t.append(time.time())
			if info.W is not None:
				fmap = info.W.apply(fmap)
			t.append(time.time())
			fmap  = info.N.apply(fmap, exp=0.5)
			t.append(time.time())
			umap  = fft.ifft(fmap, umap+0j, axes=[-2,-1], normalize=True).real
			t.append(time.time())
			work = enmap.samewcs(info.U.trans(umap, work),work)
			t.append(time.time())
			work *= info.H
			t.append(time.time())
			t = np.array(t)
			print " %4.2f"*(len(t)-1) % tuple(t[1:]-t[:-1])
			res  += work
		res = utils.allreduce(res,comm)
		return self.dof.zip(res)
Exemple #15
0
def smooth(data, modLMap, gauss_sigma_arcmin):
    kMap = fft(data, axes=[-2, -1])
    sigma = np.deg2rad(gauss_sigma_arcmin / 60.)
    beamTemplate = np.nan_to_num(1. / np.exp(
        (sigma**2.) * (modLMap**2.) / (2.)))
    kMap[:, :] = np.nan_to_num(kMap[:, :] * beamTemplate[:, :])
    return ifft(kMap, axes=[-2, -1], normalize=True).real
Exemple #16
0
    def getMap(self, stepFilterEll=None):
        """
        Modified from sudeepdas/flipper
        Generates a GRF from an input power spectrum specified as ell, Cell 
        BufferFactor =1 means the map will be periodic boundary function
        BufferFactor > 1 means the map will be genrated on  a patch bufferFactor times 
        larger in each dimension and then cut out so as to have non-periodic bcs.

        Fills the data field of the map with the GRF realization
        """

        realPart = self.sqp * np.random.randn(self.bNy, self.bNx)
        imgPart = self.sqp * np.random.randn(self.bNy, self.bNx)

        kMap = realPart + 1.j * imgPart

        if stepFilterEll is not None:
            kMap[self.modLMap > stepFilterEll] = 0.

        data = np.real(ifft(kMap, axes=[-2, -1], normalize=True))

        data = data[int((self.b - 1) / 2) * self.Ny:int((self.b + 1) / 2) *
                    self.Ny,
                    int((self.b - 1) / 2) * self.Nx:int((self.b + 1) / 2) *
                    self.Nx]

        return data - data.mean()
Exemple #17
0
 def __call__(self, x):
     xmap = self.dof.unzip(x)
     res = xmap * 0
     for info in self.infos:
         t = [time.time()]
         work = xmap * info.H
         t.append(time.time())
         umap = info.U.apply(work)
         t.append(time.time())
         fmap = fft.fft(umap + 0j, axes=[-2, -1])
         t.append(time.time())
         fmap = info.N.apply(fmap, exp=0.5)
         t.append(time.time())
         if info.W is not None:
             fmap = info.W.apply(fmap)
         t.append(time.time())
         fmap = info.N.apply(fmap, exp=0.5)
         t.append(time.time())
         umap = fft.ifft(fmap, umap + 0j, axes=[-2, -1],
                         normalize=True).real
         t.append(time.time())
         work = enmap.samewcs(info.U.trans(umap, work), work)
         t.append(time.time())
         work *= info.H
         t.append(time.time())
         t = np.array(t)
         print " %4.2f" * (len(t) - 1) % tuple(t[1:] - t[:-1])
         res += work
     res = utils.allreduce(res, comm)
     return self.dof.zip(res)
Exemple #18
0
def takeCurl(vecStampX, vecStampY, lxMap, lyMap):

    fX = fft(vecStampX, axes=[-2, -1])
    fY = fft(vecStampY, axes=[-2, -1])

    return ifft((lxMap * fY - lyMap * fX) * 1j, axes=[-2, -1],
                normalize=True).real
Exemple #19
0
def l(cseed,kseed,returnk=False,index=None):
    cname = fout_dir+"lensed_covseed_"+str(args.covseed).zfill(3)+"_cmbseed_"+str(cseed).zfill(5)+"_kseed_"+str(kseed).zfill(5)+".hdf"
    if unlensed:
        seedroot = (covseed)*Nsets*Nsims
        lensedt = parray_dat.get_unlensed_cmb(seed=seedroot+cseed,scalar=False)
    else:
        lensedt = enmap.read_map(cname)[0] if polsims else enmap.read_map(cname)
    # -- add beam and noise if you want --
    if "noiseless" not in expf_name:
        assert index is not None
        if rank==0: print("Adding beam...")
        flensed = fftfast.fft(lensedt,axes=[-2,-1])
        flensed *= parray_dat.lbeam
        lensedt = fftfast.ifft(flensed,axes=[-2,-1],normalize=True).real
        if rank==0: print("Adding noise...")
        seedroot = (covseed+1)*Nsets*Nsims # WARNING: noise sims will be correlated with CMB from the next covseed
        nseed = seedroot+index
        noise = parray_dat.get_noise_sim(seed=nseed)
        if paper:
            cents, noise1d = lbinner.bin(power(noise)[0])
            mpibox.add_to_stats('noisett',noise1d)        
        lensedt += noise
        
    lensedt = enmap.ndmap(lensedt,wcs_dat)
    
    if returnk:
        kname = fout_dir+"kappa_covseed_"+str(args.covseed).zfill(3)+"_kseed_"+str(kseed).zfill(5)+".hdf"
        return lensedt,enmap.read_map(kname)
    else:
        return lensedt
Exemple #20
0
 def apply(self, arr, inplace=False):
     # Because of our padding and multiplication by the hitcount
     # before this, we should be safely apodized, and can assume
     # periodic boundaries
     if not inplace: arr = np.array(arr)
     ft = fft.rfft(arr, axes=[-2])
     ft *= self.spec_full[:, None]
     return fft.ifft(ft, arr, axes=[-2], normalize=True)
Exemple #21
0
	def apply(self, arr, inplace=False):
		# Because of our padding and multiplication by the hitcount
		# before this, we should be safely apodized, and can assume
		# periodic boundaries
		if not inplace: arr = np.array(arr)
		ft = fft.rfft(arr, axes=[-2])
		ft *= self.spec_full[:,None]
		return fft.ifft(ft, arr, axes=[-2], normalize=True)
Exemple #22
0
	def A(self, x):
		map = self.dof.unzip(x)
		res = map*0
		for work in self.workspaces:
			# This is normall P'N"P. In our case 
			wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs, work.geometry.dtype)
			work.pmat.forward(wmap, map)
			#wmap[:] = array_ops.matmul(work.hdiv_norm_sqrt, wmap, [0,1])
			wmap *= work.hdiv_norm_sqrt
			ft  = fft.rfft(wmap)
			ft *= work.wfilter
			fft.ifft(ft, wmap, normalize=True)
			wmap *= work.hdiv_norm_sqrt
			# Noise weighting would go here. No weighting for now
			#wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv_norm_sqrt,1), wmap, [0,1])
			work.pmat.backward(wmap, res)
		res = utils.allreduce(res, self.comm)
		return self.dof.zip(res)
Exemple #23
0
 def A(self, x):
     map = self.dof.unzip(x)
     res = map * 0
     for work in self.workspaces:
         # This is normall P'N"P. In our case
         wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs,
                            work.geometry.dtype)
         work.pmat.forward(wmap, map)
         #wmap[:] = array_ops.matmul(work.hdiv_norm_sqrt, wmap, [0,1])
         wmap *= work.hdiv_norm_sqrt
         ft = fft.rfft(wmap)
         ft *= work.wfilter
         fft.ifft(ft, wmap, normalize=True)
         wmap *= work.hdiv_norm_sqrt
         # Noise weighting would go here. No weighting for now
         #wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv_norm_sqrt,1), wmap, [0,1])
         work.pmat.backward(wmap, res)
     res = utils.allreduce(res, self.comm)
     return self.dof.zip(res)
Exemple #24
0
def stepFunctionFilterLiteMap(map2d, modLMap, ellMax, ellMin=None):

    kmap = fft(map2d.copy(), axes=[-2, -1])
    kmap[modLMap > ellMax] = 0.
    if ellMin is not None:
        kmap[modLMap < ellMin] = 0.

    retMap = ifft(kmap, axes=[-2, -1], normalize=True).real

    return retMap
Exemple #25
0
	def apply(self, arr, inplace=False):
		# Because of our padding and multiplication by the hitcount
		# before this, we should be safely apodized, and can assume
		# periodic boundaries
		if not inplace: arr = np.array(arr)
		carr = arr.astype(complex)
		ft   = fft.fft(carr, axes=[-2,-1])
		ft  *= self.inv_ps
		carr = fft.ifft(ft, carr, axes=[-2,-1], normalize=True)
		arr  = carr.real
		return arr
Exemple #26
0
 def apply(self, arr, inplace=False):
     # Because of our padding and multiplication by the hitcount
     # before this, we should be safely apodized, and can assume
     # periodic boundaries
     if not inplace: arr = np.array(arr)
     carr = arr.astype(complex)
     ft = fft.fft(carr, axes=[-2, -1])
     ft *= self.inv_ps
     carr = fft.ifft(ft, carr, axes=[-2, -1], normalize=True)
     arr = carr.real
     return arr
Exemple #27
0
def resample_fft(d, n, axes=None):
    """Resample numpy array d via fourier-reshaping. Requires periodic data.
	n indicates the desired output lengths of the axes that are to be
	resampled. By the fault the last len(n) axes are resampled, but this
	can be controlled via the axes argument."""
    d = np.asanyarray(d)
    # Compute output lengths from factors if necessary
    n = np.atleast_1d(n)
    if axes is None: axes = np.arange(-len(n), 0)
    else: axes = np.atleast_1d(axes)
    if len(n) == 1: n = np.repeat(n, len(axes))
    else: assert len(n) == len(axes)
    assert len(n) <= d.ndim
    # Nothing to do?
    if np.all(d.shape[-len(n):] == n): return d
    # Use the simple version if we can. It has lower memory overhead
    if d.ndim == 2 and len(n) == 1 and (axes[0] == 1 or axes[0] == -1):
        return resample_fft_simple(d, n[0])
    # Perform the fourier transform
    fd = fft.fft(d, axes=axes)
    # Frequencies are 0 1 2 ... N/2 (-N)/2 (-N)/2+1 .. -1
    # Ex 0* 1 2* -1 for n=4 and 0* 1 2 -2 -1 for n=5
    # To upgrade,   insert (n_new-n_old) zeros after n_old/2
    # To downgrade, remove (n_old-n_new) values after n_new/2
    # The idea is simple, but arbitrary dimensionality makes it
    # complicated.
    norm = 1.0
    for ax, nnew in zip(axes, n):
        ax %= d.ndim
        nold = d.shape[ax]
        dn = nnew - nold
        if dn > 0:
            padvals = np.zeros(fd.shape[:ax] + (dn, ) + fd.shape[ax + 1:],
                               fd.dtype)
            spre = tuple([slice(None)] * ax + [slice(0, nold // 2)] +
                         [slice(None)] * (fd.ndim - ax - 1))
            spost = tuple([slice(None)] * ax + [slice(nold // 2, None)] +
                          [slice(None)] * (fd.ndim - ax - 1))
            fd = np.concatenate([fd[spre], padvals, fd[spost]], axis=ax)
        elif dn < 0:
            spre = tuple([slice(None)] * ax + [slice(0, nnew // 2)] +
                         [slice(None)] * (fd.ndim - ax - 1))
            spost = tuple([slice(None)] * ax + [slice(nnew // 2 - dn, None)] +
                          [slice(None)] * (fd.ndim - ax - 1))
            fd = np.concatenate([fd[spre], fd[spost]], axis=ax)
        norm *= float(nnew) / nold
    # And transform back
    res = fft.ifft(fd, axes=axes, normalize=True)
    del fd
    res *= norm
    return res if np.issubdtype(d.dtype, np.complexfloating) else res.real
Exemple #28
0
def deconvolveBeam(data,
                   modLMap,
                   beamTemplate,
                   lowPass=None,
                   returnFTOnly=False):

    kMap = fft(data, axes=[-2, -1])

    kMap[:, :] = (kMap[:, :] / beamTemplate[:, :])
    if lowPass is not None: kMap[modLMap > lowPass] = 0.
    if returnFTOnly:
        return kMap
    else:
        return ifft(kMap, axes=[-2, -1], normalize=True).real
Exemple #29
0
def downsample_fft_simple(d, factor=0.5, ngroup=100):
    """Resample 2d numpy array d via fourier-reshaping along
	last axis."""
    if factor == 1: return d
    nold = d.shape[1]
    nnew = int(nold * factor)
    res = np.zeros([d.shape[0], nnew], dtype=d.dtype)
    dn = nnew - nold
    for di in range(0, d.shape[0], ngroup):
        fd = fft.fft(d[di:di + ngroup])
        fd = np.concatenate([fd[:, :nnew / 2], fd[:, nnew / 2 - dn:]], 1)
        res[di:di + ngroup] = fft.ifft(fd, normalize=True).real
    del fd
    res *= factor
    return res
Exemple #30
0
def filter_map(data2d,
               filter2d,
               modLMap,
               lowPass=None,
               highPass=None,
               keep_mean=True):
    kMap = fft(data2d, axes=[-2, -1])
    if keep_mean:
        mean_val = kMap[modLMap < 1]

    kMap[:, :] = np.nan_to_num(kMap[:, :] * filter2d[:, :])
    if lowPass is not None: kMap[modLMap > lowPass] = 0.
    if highPass is not None: kMap[modLMap < highPass] = 0.

    if keep_mean: kMap[modLMap < 1] = mean_val
    return ifft(kMap, axes=[-2, -1], normalize=True).real
Exemple #31
0
 def __init__(self,
              fwhm,
              vel,
              dec_ref,
              fknee,
              alpha,
              nsigma=1000,
              nsub=50,
              order=3):
     # Vel is [dra,ddec]/s.
     sigma = fwhm / (8 * np.log(2))**0.5
     res = sigma / nsub
     vel = np.array(vel)
     vel[0] *= np.cos(dec_ref)
     speed = np.sum(vel**2)**0.5
     # Build coordinate system along velocity
     npoint = 2 * nsigma * nsub
     x = (np.arange(npoint) - npoint / 2) * res
     # Build the beam along the velocity
     vbeam = np.exp(-0.5 * (x**2 / sigma**2))
     # Apply fourier filter. Our angular step size is res radians. This
     # corresponds to a time step of res/speed in seconds.
     fbeam = fft.rfft(vbeam)
     freq = fft.rfftfreq(npoint, res / speed)
     fbeam[1:] /= 1 + (freq[1:] / fknee)**-alpha
     vbeam = fft.ifft(fbeam, vbeam, normalize=True)
     # Beam should be zero at large distances
     vbeam -= vbeam[0]
     # Prefilter for fast lookups
     vbeam = utils.interpol_prefilter(vbeam, npre=0, order=order)
     # The total beam will be this beam times a normal one in the
     # perpendicular direction.
     self.dec_ref = dec_ref
     self.e_para = vel / np.sum(vel**2)**0.5
     self.e_orto = np.array([-self.e_para[1], self.e_para[0]])
     self.sigma = sigma
     self.res = res
     self.vbeam = vbeam
     self.order = order
     # Not really necessary to store these
     self.fwhm = fwhm
     self.vel = vel  # physical
     self.fknee = fknee
     self.alpha = alpha
Exemple #32
0
def resample_fft(d, factors=[0.5], axes=None):
    """Resample numpy array d via fourier-reshaping. Requires periodic data.
	"factors" indicates the factors by which the axis lengths should be
	increased. If less factors are specified than the number of axes,
	the numbers apply to the last N axes, unless the "axes" argument
	is used to specify which ones."""
    if np.allclose(factors, 1): return d
    factors = np.atleast_1d(factors)
    assert len(factors) <= d.ndim
    if axes is None: axes = np.arange(-len(factors), 0)
    assert len(axes) == len(factors)
    if d.ndim == 2 and len(factors) == 1 and factors[0] < 1:
        return downsample_fft_simple(d, factors[0])
    fd = fft.fft(d, axes=axes)
    # Frequencies are 0 1 2 ... N/2 (-N)/2 (-N)/2+1 .. -1
    # Ex 0* 1 2* -1 for n=4 and 0* 1 2 -2 -1 for n=5
    # To upgrade,   insert (n_new-n_old) zeros after n_old/2
    # To downgrade, remove (n_old-n_new) values after n_new/2
    # The idea is simple, but arbitrary dimensionality makes it
    # complicated.
    for ax, factor in zip(axes, factors):
        ax %= d.ndim
        nold = d.shape[ax]
        nnew = int(nold * factor + 0.5)
        dn = nnew - nold
        if dn > 0:
            padvals = np.zeros(fd.shape[:ax] + (dn, ) + fd.shape[ax + 1:])
            spre = tuple([slice(None)] * ax + [slice(0, nold / 2)] +
                         [slice(None)] * (fd.ndim - ax - 1))
            spost = tuple([slice(None)] * ax + [slice(nold / 2, None)] +
                          [slice(None)] * (fd.ndim - ax - 1))
            fd = np.concatenate([fd[spre], padvals, fd[spost]], axis=ax)
        elif dn < 0:
            spre = tuple([slice(None)] * ax + [slice(0, nnew / 2)] +
                         [slice(None)] * (fd.ndim - ax - 1))
            spost = tuple([slice(None)] * ax + [slice(nnew / 2 - dn, None)] +
                          [slice(None)] * (fd.ndim - ax - 1))
            fd = np.concatenate([fd[spre], fd[spost]], axis=ax)
    # And transform back
    res = fft.ifft(fd, axes=axes, normalize=True)
    del fd
    res *= np.product(factors)
    return res if np.issubdtype(d.dtype, np.complexfloating) else res.real
Exemple #33
0
def resample_fft_simple(d, n, ngroup=100):
    """Resample 2d numpy array d via fourier-reshaping along
	last axis."""
    nold = d.shape[1]
    if n == nold: return d
    res = np.zeros([d.shape[0], n], dtype=d.dtype)
    dn = n - nold
    for di in range(0, d.shape[0], ngroup):
        fd = fft.fft(d[di:di + ngroup])
        if n < nold:
            fd = np.concatenate([fd[:, :n / 2], fd[:, n / 2 - dn:]], 1)
        else:
            fd = np.concatenate([
                fd[:, :nold / 2],
                np.zeros([len(fd), n - nold], fd.dtype), fd[:, nold / 2:]
            ], -1)
        res[di:di + ngroup] = fft.ifft(fd, normalize=True).real
    del fd
    res *= float(n) / nold
    return res
Exemple #34
0
	def __init__(self, fwhm, vel, dec_ref, fknee, alpha, nsigma=1000, nsub=50, order=3):
		# Vel is [dra,ddec]/s.
		sigma  = fwhm/(8*np.log(2))**0.5
		res    = sigma/nsub
		vel    = np.array(vel)
		vel[0]*= np.cos(dec_ref)
		speed  = np.sum(vel**2)**0.5
		# Build coordinate system along velocity
		npoint = 2*nsigma*nsub
		x      = (np.arange(npoint)-npoint/2)*res
		# Build the beam along the velocity
		vbeam  = np.exp(-0.5*(x**2/sigma**2))
		# Apply fourier filter. Our angular step size is res radians. This
		# corresponds to a time step of res/speed in seconds.
		fbeam  = fft.rfft(vbeam)
		freq   = fft.rfftfreq(npoint, res/speed)
		fbeam[1:] /= 1 + (freq[1:]/fknee)**-alpha
		vbeam  = fft.ifft(fbeam, vbeam, normalize=True)
		# Beam should be zero at large distances
		vbeam -= vbeam[0]
		# Prefilter for fast lookups
		vbeam  = utils.interpol_prefilter(vbeam, npre=0, order=order)
		# The total beam will be this beam times a normal one in the
		# perpendicular direction.
		self.dec_ref = dec_ref
		self.e_para  = vel/np.sum(vel**2)**0.5
		self.e_orto  = np.array([-self.e_para[1],self.e_para[0]])
		self.sigma   = sigma
		self.res     = res
		self.vbeam   = vbeam
		self.order   = order
		# Not really necessary to store these
		self.fwhm  = fwhm
		self.vel   = vel # physical
		self.fknee = fknee
		self.alpha = alpha
Exemple #35
0
	# Generate planet cut
	with bench.show("planet cut"):
		planet_cut = cuts.avoidance_cut(d.boresight, d.point_offset, d.site,
				args.planet, R)
	# Subtract atmospheric model
	with bench.show("atm model"):
		model= gapfill.gapfill_joneig(tod, planet_cut, inplace=False)
	# Estimate noise level
	asens = np.sum(ivar)**-0.5 / d.srate**0.5
	print asens
	with bench.show("smooth"):
		ft   = fft.rfft(model)
		freq = fft.rfftfreq(model.shape[-1])*d.srate
		flt  = 1/(1+(freq/model_fknee)**model_alpha)
		ft  *= flt
		fft.ifft(ft, model, normalize=True)
		del ft, flt, freq
	with bench.show("atm subtract"):
		tod -= model
		del model
		tod  = tod.astype(dtype, copy=False)
	# Should now be reasonably clean of correlated noise.
	# Proceed to make simple binned map
	with bench.show("actscan"):
		scan = actscan.ACTScan(entry, d=d)
	with bench.show("pmat"):
		pmap = pmat.PmatMap(scan, area, sys=sys)
		pcut = pmat.PmatCut(scan)
		rhs  = enmap.zeros((ncomp,)+shape, area.wcs, dtype)
		div  = enmap.zeros((ncomp,ncomp)+shape, area.wcs, dtype)
		junk = np.zeros(pcut.njunk, dtype)
Exemple #36
0
	def hpass(a, n):
		f = fft.rfft(a)
		f[...,:n] = 0
		return fft.ifft(f,a.copy(),normalize=True)
Exemple #37
0
def convolve(map, fmap):
	return fft.ifft(fft.fft(map, axes=(-2,-1))*fmap,axes=(-2,-1), normalize=True).real
Exemple #38
0
				d = d[:,:]
			except errors.DataMissing as e:
				L.debug("Skipped %s (%s)" % (id, e.message))
				continue
			L.debug("Processing %s" % id)
			# Get the actual tod
			tod = d.get_samples()
			tod -= np.mean(tod,1)[:,None]
			tod = tod.astype(dtype)
			# Compute the per-detector spectrum
			ft  = fft.rfft(tod) * d.nsamp ** -0.5
			tfilter, binds = measure_inv_noise_spectrum(ft, nbin)
			# Apply inverse noise weighting to the tod
			ft *= tfilter[:,binds]
			ft *= d.nsamp ** -0.5
			fft.ifft(ft, tod)
			del ft
			my_rhs, my_hdiv, my_yhits = project_tod_on_workspace(d, tod, wgeo)
			my_wfilter = project_binned_spec_on_workspace(tfilter, d.srate, my_yhits, wgeo)
			# Add to the totals
			tot_work.rhs  += my_rhs
			tot_work.hdiv += my_hdiv
			tot_work.wfilter += my_wfilter
			tot_work.ids.append(id)
			del my_rhs, my_hdiv, my_yhits, my_wfilter
		# Reduce
		tot_work = tot_work.reduce(comm)
		if comm.rank == 0:
			write_workspace(oname, tot_work)

elif command == "solve":
Exemple #39
0
		offset_upos = calc_offset_upos(pattern, offset_array, offset_det, site, rhs, U)
		corrfun     = calc_cmode_corrfun(U.ushape, U.uwcs, offset_upos, corrfun_smoothing)
		W  = WeightMat(U.ushape, corrfun, 4)#ndet)
	else: W = None

	# The H in our equation is related to the hitcount, but isn't exactly it.
	# normalize_hits approximates it using the hitcounts.
	H = normalize_hits(hits)

	# Apply weight to rhs
	if W is not None:
		iH  = 1/np.maximum(H,np.max(H)*1e-2)
		urhs= U.apply(rhs*iH)
		ft  = fft.fft(urhs+0j, axes=[-2,-1])
		ft  = W.apply(ft)
		urhs= fft.ifft(ft, urhs+0j, axes=[-2,-1], normalize=True).real
		rhs = U.trans(urhs, rhs)*H
	
	if rhs_tot is None: rhs_tot = rhs
	else: rhs_tot += rhs

	infos.append(bunch.Bunch(U=U,N=N,H=H,W=W,pattern=pattern,site=site,srate=srate,scale=scale,speed=speed))

rhs = utils.allreduce(rhs_tot, comm)

#info = infos[0]
#foo  = rhs*info.H
#enmap.write_map("test1.fits", foo)
#bar  = enmap.samewcs(info.U.apply(foo),foo)
#enmap.write_map("test2.fits", bar)
#foo  = enmap.samewcs(info.U.trans(bar, foo),foo)
Exemple #40
0
		ft    = fft.rfft(tod)
		ps    = np.abs(ft)**2
		rpows = [measure_power(ps,rfreq,drfreq,d.srate) for rfreq,drfreq in zip(rfreqs, drfreqs)]
		rpows = np.array(rpows)
		# Determine the fknee to use. First get a typical spectrum.
		# This does not work well with s16, which currently doesn't
		# have time constants.
		ps     = np.median(ps,0)
		bps    = bin_spectrum(ps, bsize_fknee)
		fknee  = measure_fknee(bps, d.srate/2/ps.size*bsize_fknee)
		#np.savetxt("ps.txt", ps)
		#1/0
		fknee *= args.fknee_mul
		ft[:,0]   = 0
		ft[:,1:] /= 1 + (freqs[1:]/fknee)**-args.alpha
		fft.ifft(ft, tod, normalize=True)
		del ft

	# Estimate white noise level in bins, and weight tod by it
	ivar  = 1/np.mean(tod**2,-1)
	#bivar = 1/np.mean(tod[:,:d.nsamp/bsize_ivar*bsize_ivar].reshape(d.ndet,-1,bsize_ivar)**2,-1)
	#tod   = apply_bivar(tod, bivar, bsize_ivar, inplace=True)
	tod  *= ivar[:,None]

	# Kill ivar outliers
	ivar_tol = 10
	medivar  = np.median(ivar)
	good = (ivar > medivar / ivar_tol)*(ivar < medivar * ivar_tol)
	d.restrict(d.dets[good])
	tod  = tod[good]
	ivar = ivar[good]
Exemple #41
0
def highpass(tod, f, srate=400):
	tod = tod.copy()
	ft = fft.rfft(tod)
	ft[:,:int(f/float(srate)*tod.shape[1])] = 0
	fft.ifft(ft, tod, normalize=True)
	return tod
Exemple #42
0
                d = d[:, :]
            except errors.DataMissing as e:
                L.debug("Skipped %s (%s)" % (id, e.message))
                continue
            L.debug("Processing %s" % id)
            # Get the actual tod
            tod = d.get_samples()
            tod -= np.mean(tod, 1)[:, None]
            tod = tod.astype(dtype)
            # Compute the per-detector spectrum
            ft = fft.rfft(tod) * d.nsamp**-0.5
            tfilter, binds = measure_inv_noise_spectrum(ft, nbin)
            # Apply inverse noise weighting to the tod
            ft *= tfilter[:, binds]
            ft *= d.nsamp**-0.5
            fft.ifft(ft, tod)
            del ft
            my_rhs, my_hdiv, my_yhits = project_tod_on_workspace(d, tod, wgeo)
            my_wfilter = project_binned_spec_on_workspace(
                tfilter, d.srate, my_yhits, wgeo)
            # Add to the totals
            tot_work.rhs += my_rhs
            tot_work.hdiv += my_hdiv
            tot_work.wfilter += my_wfilter
            tot_work.ids.append(id)
            del my_rhs, my_hdiv, my_yhits, my_wfilter
        # Reduce
        tot_work = tot_work.reduce(comm)
        if comm.rank == 0:
            write_workspace(oname, tot_work)
Exemple #43
0
    # flensed *= parray_sim.lbeam
    # lensed = fftfast.ifft(flensed,axes=[-2,-1],normalize=True).real
    # if rank==0: print "Adding noise..."
    # noise = parray_sim.get_noise_sim(seed=index+20000)
    # lensed += noise
    # if rank==0: print "Downsampling..."
    # cmb = lensed if abs(pixratio-1.)<1.e-3 else resample.resample_fft(lensed,shape_dat)

    # === ADD NOISE AFTER DOWNSAMPLE
    if rank == 0: print "Beam convolving..."
    olensed = enmap.ndmap(
        lensed.copy() if abs(pixratio - 1.) < 1.e-3 else resample.resample_fft(
            lensed.copy(), shape_dat), wcs_dat)
    flensed = fftfast.fft(olensed, axes=[-2, -1])
    flensed *= parray_dat.lbeam
    lensed = fftfast.ifft(flensed, axes=[-2, -1], normalize=True).real
    if rank == 0: print "Adding noise..."
    noise = parray_dat.get_noise_sim(seed=index + 20000)

    lcents, noise1d = lbinner_dat.bin(fmaps.get_simple_power_enmap(noise))
    mpibox.add_to_stats('noisett', noise1d)

    lensed += noise
    if rank == 0: print "Downsampling..."
    cmb = lensed

    cmb = enmap.ndmap(cmb, wcs_dat)
    if rank == 0: print "Calculating powers for diagnostics..."
    utt2d = fmaps.get_simple_power_enmap(
        enmap.ndmap(
            unlensed if abs(pixratio - 1.) < 1.e-3 else resample.resample_fft(
Exemple #44
0
        pl.add(fine_ells, lclbb * fine_ells**2., color="C2", ls="--")
        pl.done(out_dir + "lccomp.png")

        pl = io.Plotter(scaleX='log')
        pl.add(cents, lte * cents**2., color="C0", ls="-")
        pl.add(fine_ells, lclte * fine_ells**2., color="C0", ls="--")
        pl.done(out_dir + "lccompte.png")

    fkmaps = fftfast.fft(measured, axes=[-2, -1])
    if deconvolve_beam: fkmaps = np.nan_to_num(fkmaps / kbeam_dat)

    if maxlike and cluster:
        polcomb = "TT"
        fkmapsdc = np.nan_to_num(fkmaps / kbeam_dat)
        maps = enmap.samewcs(
            fftfast.ifft(fkmapsdc * fMaskCMB_T, normalize=True,
                         axes=[-2, -1]).real, measured)
        #kappa_model = init_kappa_model
        k = 0
        io.quickPlot2d(maps, out_dir + "map_iter_" + str(k).zfill(3) + ".png")

        from scipy.integrate import simps
        Ny, Nx = shape_dat[-2:]
        pixScaleY, pixScaleX = enmap.pixshape(shape_dat, wcs_dat)
        Ukappa = init_kappa_model
        Uft = fftfast.fft(Ukappa, axes=[-2, -1])
        Upower = np.real(Uft * Uft.conjugate())
        Nl2d = qest_maxlike.N.Nlkk[polcomb]
        area = Nx * Ny * pixScaleX * pixScaleY
        Upower = Upower * area / (Nx * Ny)**2
        wfilter = np.nan_to_num(Upower / Nl2d)
Exemple #45
0
	# Generate planet cut
	with bench.show("planet cut"):
		planet_cut = cuts.avoidance_cut(d.boresight, d.point_offset, d.site,
				args.planet, R)
	# Subtract atmospheric model
	with bench.show("atm model"):
		model= gapfill.gapfill_joneig(tod, planet_cut, inplace=False)
	# Estimate noise level
	asens = np.sum(ivar)**-0.5 / d.srate**0.5
	print(asens)
	with bench.show("smooth"):
		ft   = fft.rfft(model)
		freq = fft.rfftfreq(model.shape[-1])*d.srate
		flt  = 1/(1+(freq/model_fknee)**model_alpha)
		ft  *= flt
		fft.ifft(ft, model, normalize=True)
		del ft, flt, freq
	with bench.show("atm subtract"):
		tod -= model
		del model
		tod  = tod.astype(dtype, copy=False)
	# Should now be reasonably clean of correlated noise, so we can from now on use
	# a white noise model.
	with bench.show("pmat"):
		P = PmatTot(scan, srcpos, sys=sys)
		N = NmatWhite(ivar)




	with bench.show("pmat"):
Exemple #46
0
from __future__ import print_function
import numpy as np
from sympy import Symbol, Function
import sympy
from enlib import fft as efft, enmap, bench
from orphics import maps, io, stats, cosmology, lensing
import os, sys
"""
Routines to reduce and evaluate symbolic mode coupling integrals
"""

ifft = lambda x: efft.ifft(x, axes=[-2, -1], normalize=True)
fft = lambda x: efft.fft(x, axes=[-2, -1])


def factorize_2d_convolution_integral(expr,
                                      l1funcs=None,
                                      l2funcs=None,
                                      groups=None,
                                      validate=True):
    """Reduce a sympy expression of variables l1x,l1y,l2x,l2y,l1,l2 into a sum of 
    products of factors that depend only on vec(l1) and vec(l2) and neither, each. If the expression
    appeared as the integrand in an integral over vec(l1), where 
    vec(l2) = vec(L) - vec(l1) then this reduction allows one to evaluate the 
    integral as a function of vec(L) using FFTs instead of as a convolution.
    """

    # Generic message if validation fails
    val_fail_message = "Validation failed. This expression is likely not reducible to FFT form."
    # Get the 2D convolution cartesian variables
    l1x, l1y, l2x, l2y, l1, l2 = get_ells()