示例#1
0
def get_coadded_tile(mapinfo, box, obeam=None, ncomp=1, dump_dir=None, verbose=False):
	if not overlaps_any(box, boxes): return None
	mapset = mapinfo.read(box, pad=pad, dtype=dtype, verbose=verbose, ncomp=ncomp)
	if mapset is None: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.sanitize_maps(mapset)
	jointmap.build_noise_model(mapset)
	if len(mapset.datasets) == 0: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.setup_beams(mapset)
	jointmap.setup_target_beam(mapset, obeam)
	jointmap.setup_filter(mapset, mode=args.filter_mode)
	jointmap.setup_background_spectrum(mapset)
	mask    = jointmap.get_mask_insufficient(mapset)
	coadder = jointmap.Coadder(mapset)
	rhs     = coadder.calc_rhs()
	if dump_dir:
		enmap.write_map(dump_dir + "/rhs.fits", rhs)
		enmap.write_map(dump_dir + "/ps_rhs.fits", np.abs(enmap.fft(rhs.preflat[0]))**2)
	map     = coadder.calc_coadd(rhs, dump_dir=dump_dir, verbose=verbose)#, maxiter=1)
	if dump_dir:
		enmap.write_map(dump_dir + "/ps_map.fits", np.abs(enmap.fft(mapdiag(map)))**2)
	div     = coadder.tot_div
	#C       = 1/mapset.datasets[0].iN
	res = bunch.Bunch(rhs=rhs*mask, map=map*mask, div=div*mask)#, C=C)
	#res = bunch.Bunch(rhs=rhs, map=map, div=div)#, C=C)
	return res
示例#2
0
def get_coadded_tile(mapinfo, box, obeam=None, ncomp=1, dump_dir=None, verbose=False):
	if not overlaps_any(box, boxes): return None
	mapset = mapinfo.read(box, pad=pad, dtype=dtype, verbose=verbose, ncomp=ncomp)
	if mapset is None: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.sanitize_maps(mapset)
	jointmap.build_noise_model(mapset)
	if len(mapset.datasets) == 0: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.setup_beams(mapset)
	jointmap.setup_target_beam(mapset, obeam)
	jointmap.setup_filter(mapset, mode=args.filter_mode)
	jointmap.setup_background_spectrum(mapset)
	mask    = jointmap.get_mask_insufficient(mapset)
	if args.wiener: coadder = jointmap.Wiener(mapset)
	else:           coadder = jointmap.Coadder(mapset)
	rhs     = coadder.calc_rhs()
	if dump_dir:
		enmap.write_map(dump_dir + "/rhs.fits", rhs)
		enmap.write_map(dump_dir + "/ps_rhs.fits", np.abs(enmap.fft(rhs.preflat[0]))**2)
	map     = coadder.calc_map(rhs, dump_dir=dump_dir, verbose=verbose, cg_tol=args.cg_tol)#, maxiter=1)
	if dump_dir:
		enmap.write_map(dump_dir + "/ps_map.fits", np.abs(enmap.fft(mapdiag(map)))**2)
	div     = coadder.tot_div
	#C       = 1/mapset.datasets[0].iN
	res = bunch.Bunch(rhs=rhs*mask, map=map*mask, div=div*mask)#, C=C)
	#res = bunch.Bunch(rhs=rhs, map=map, div=div)#, C=C)
	return res
示例#3
0
def get_coadded_tile(mapinfo,
                     box,
                     obeam=None,
                     ncomp=1,
                     dump_dir=None,
                     verbose=False):
    if not overlaps_any(np.sort(box, 0), boxes): return None
    mapset = mapinfo.read(box,
                          pad=pad,
                          dtype=dtype,
                          verbose=verbose,
                          ncomp=ncomp)
    if mapset is None: return None
    if all([d.insufficient for d in mapset.datasets]): return None
    jointmap.sanitize_maps(mapset, detrend=args.detrend)
    jointmap.build_noise_model(mapset)
    if len(mapset.datasets) == 0: return None
    if all([d.insufficient for d in mapset.datasets]): return None
    jointmap.setup_beams(mapset)
    jointmap.setup_target_beam(mapset, obeam)
    jointmap.setup_filter(mapset, mode=args.filter_mode)
    jointmap.setup_background_spectrum(mapset)
    mask = jointmap.get_mask_insufficient(mapset)
    if args.wiener: coadder = jointmap.Wiener(mapset)
    else: coadder = jointmap.Coadder(mapset)
    rhs = coadder.calc_rhs()
    if dump_dir:
        enmap.write_map(dump_dir + "/rhs.fits", rhs)
        enmap.write_map(dump_dir + "/ps_rhs.fits",
                        np.abs(enmap.fft(rhs.preflat[0]))**2)
        with open(dump_dir + "/names.txt", "w") as nfile:
            for name in coadder.names:
                nfile.write(name + "\n")
        ls, weights = coadder.calc_debug_weights()
        np.savetxt(
            dump_dir + "/weights_1d.txt",
            np.concatenate(
                [ls[None], weights.reshape(-1, weights.shape[-1])], 0).T,
            fmt="%15.7e")
        ls, noisespecs = coadder.calc_debug_noise()
        np.savetxt(
            dump_dir + "/noisespecs_1d.txt",
            np.concatenate(
                [ls[None],
                 noisespecs.reshape(-1, noisespecs.shape[-1])], 0).T,
            fmt="%15.7e")
    map = coadder.calc_map(rhs,
                           dump_dir=dump_dir,
                           verbose=verbose,
                           cg_tol=args.cg_tol)  #, maxiter=1)
    if dump_dir:
        enmap.write_map(dump_dir + "/ps_map.fits",
                        np.abs(enmap.fft(mapdiag(map)))**2)
    div = coadder.tot_div
    #C       = 1/mapset.datasets[0].iN
    res = bunch.Bunch(rhs=rhs * mask, map=map * mask, div=div * mask)  #, C=C)
    #res = bunch.Bunch(rhs=rhs, map=map, div=div)#, C=C)
    return res
示例#4
0
def smooth_tophat(map, rad):
	# Will use flat sky approximation here. It's not a good approximation for
	# our big maps, but this doesn't need to be accurate anyway
	ny,nx = map.shape[-2:]
	refy, refx = ny/2,nx/2
	pos   = map.posmap()
	pos[0] -= pos[0,refy,refx]
	pos[1] -= pos[1,refy,refx]
	r2     = np.sum(pos**2,0)
	kernel = (r2 < rad**2).astype(dtype) / (np.pi*rad**2) / map.size**0.5 * map.area()
	kernel = np.roll(kernel,-refy,0)
	kernel = np.roll(kernel,-refx,1)
	res = enmap.ifft(enmap.fft(map)*np.conj(enmap.fft(kernel))).real
	return res
示例#5
0
def smooth_pix(map, pixrad):
    fmap = enmap.fft(map)
    ky = np.fft.fftfreq(map.shape[-2])
    kx = np.fft.fftfreq(map.shape[-1])
    kr2 = ky[:, None]**2 + kx[None, :]**2
    fmap *= np.exp(-0.5 * kr2 * pixrad**2)
    map = enmap.ifft(fmap).real
    return map
示例#6
0
def sim_points(shape, wcs, info):
	# Simulate the point amplitudes
	N = enmap.area(shape, wcs) * info.density*(180/np.pi)**2
	n = N*(info.minamp/info.amp)**(info.alpha+1)
	amps = info.minamp*np.random.uniform(0,1,n)**(1/(info.alpha+1))
	amps = np.maximum(-100*np.abs(info.amp),np.minimum(100*np.abs(info.amp), amps))
	# Simulate the polarization
	psi  = np.random.uniform(0,np.pi,n)
	amps = amps[None,:] * np.array([psi*0+1,np.cos(2*psi)*info.pol,np.sin(2*psi)*info.pol])
	# Simulate positions uniformly in pixels
	ipos = np.array([np.random.uniform(0,shape[-2],n),np.random.uniform(0,shape[-1],n)])
	pos  = enmap.pix2sky(wcs, ipos)
	# Draw the points on a canvas using convolution. This requires all points
	# to have integer pixel positions and the same radius.
	rawmap = np.zeros(shape)
	for i in range(shape[0]):
		rawmap[i][tuple(ipos.astype(int))] = amps[i]
	l = np.sum(enmap.lmap(shape,wcs)**2,0)**0.5
	kernel = np.exp(-0.5*l**2*info.rad**2)
	# actually perform the convolution
	pmap = enmap.ifft(enmap.fft(rawmap)*kernel[None]).real
	print np.max(pmap), np.min(pmap)
	return pmap
示例#7
0
def map_fft(x):
    return enmap.fft(x)
示例#8
0
def smooth_gauss(m, sigma):
    l = np.sum(m.lmap()**2, 0)**0.5
    return np.real(en.ifft(en.fft(m) * np.exp(-0.5 * (l * sigma)**2)))
示例#9
0
import numpy as np, argparse
from scipy import ndimage
from enlib import enmap

parser = argparse.ArgumentParser()
parser.add_argument("ifile")
parser.add_argument("ofile")
parser.add_argument("-r", "--apod-radius", type=int, default=64)
args = parser.parse_args()


def make_apod(shape, rad):
    mask = np.zeros(shape[-2:])
    mask[rad:-rad, rad:-rad] = 1
    w = np.maximum(1 - ndimage.distance_transform_edt(1 - mask) / rad, 0) ** 3
    return w


teb = enmap.read_map(args.ifile)
tqu = enmap.harm2map(enmap.fft(teb))
mask = make_apod(teb.shape, args.apod_radius)

tqu_mask = tqu * mask[None]
teb_mask = enmap.ifft(enmap.map2harm(tqu_mask)).real

res = enmap.samewcs([teb, tqu, tqu_mask, teb_mask], teb)
enmap.write_map(args.ofile, res)
示例#10
0
    pix = thetaMap.sky2pix(pos, safe=False)





    lensedTQU = lensing.displace_map(map, pix,order=5)
    lensedMapX = enmap.ifft(enmap.map2harm(lensedTQU)).real 
    lensedMapY = lensedMapX.copy()

    if szX:
        lensedMapX += (szMap/TCMB)
    if szY:
        lensedMapY += (szMap/TCMB)
    
    fotX = enmap.fft(lensedMapX,normalize=False)
    fotY = enmap.fft(lensedMapY,normalize=False)

    print "Reconstructing" , i , " ..."
    qest.updateTEB_X(fotX,alreadyFTed=True)
    qest.updateTEB_Y(fotY,alreadyFTed=True)
    kappa = enmap.samewcs(qest.getKappa(polCombList[0]).real,thetaMap)

    enmap.write_map(saveName+"_kappa_"+str(i)+"_"+str(snap)+".hdf",kappa)
    enmap.write_map(saveName+"_inpkappa_"+str(i)+"_"+str(snap)+".hdf",inputKappaMap)
    enmap.write_map(saveName+"_sz_"+str(i)+"_"+str(snap)+".hdf",szMap)
        



        rgrad_phitt = enmap.grad(rphitt)

        # delens original lensed with current model
        delensed = lensing.delens_map(lensed.copy(),
                                      rgrad_phitt,
                                      nstep=delens_steps,
                                      order=lens_order)
        delensed = enmap.ndmap(
            fmaps.filter_map(delensed,
                             delensed * 0. + 1.,
                             parray_sim.modlmap,
                             lowPass=tellmax,
                             highPass=tellmin), wcs_sim)

        # get fft of delensed map and reconstruct
        llteb = enmap.fft(delensed, normalize=False)
        qest.updateTEB_X(llteb, alreadyFTed=True)
        qest.updateTEB_Y(llteb, alreadyFTed=True)
        with io.nostdout():
            rawkappa = qest.getKappa("TT").real
        kappa_recon = enmap.ndmap(rawkappa, wcs_dat)

        if j == 0:
            # ps_noise = np.zeros((1,1,modlmap_dat.shape[0],modlmap_dat.shape[1]))
            # ps_noise[0,0] = qest.N.Nlkk['TT']
            # ngen = enmap.MapGen(shape_dat,wcs_dat,ps_noise)

            # cents,kp1d = lbinner_dat.bin(qest.N.Nlkk['TT'])
            # # pl.add(cents,kp1d,ls="-")

            # #cluster_power = theory.gCl('kk',modlmap_dat)