Exemplo n.º 1
0
def apply_apod(div):
	if apod_params is None: return div
	weight = div.preflat[0]
	moo = enmap.downgrade(weight,50)
	maxval = np.max(enmap.downgrade(weight,50))
	apod   = np.minimum(1,weight/maxval/apod_params[0])**apod_params[1]
	return div*apod
Exemplo n.º 2
0
def apply_apod(div):
    if apod_params is None: return div
    weight = div.preflat[0]
    moo = enmap.downgrade(weight, 50)
    maxval = np.max(enmap.downgrade(weight, 50))
    apod = np.minimum(1, weight / maxval / apod_params[0])**apod_params[1]
    return div * apod
Exemplo n.º 3
0
def highResPlot2d(array,
                  outPath,
                  down=None,
                  verbose=True,
                  overwrite=True,
                  crange=None):
    if not (overwrite):
        if os.path.isfile(outPath): return
    try:
        from enlib import enmap, enplot
    except:
        traceback.print_exc()
        printC(
            "Could not produce plot " + outPath +
            ". High resolution plotting requires enlib, which couldn't be imported. Continuing without plotting.",
            color='fail')
        return

    if (down is not None) and (down != 1):
        downmap = enmap.downgrade(enmap.enmap(array)[None], down)
    else:
        downmap = enmap.enmap(array)[None]
    img = enplot.draw_map_field(downmap,
                                enplot.parse_args("-vvvg moo"),
                                crange=crange)
    img.save(outPath)
    if verbose:
        print(bcolors.OKGREEN + "Saved high-res plot to",
              outPath + bcolors.ENDC)
Exemplo n.º 4
0
def high_res_plot_img(array,
                      filename=None,
                      down=None,
                      verbose=True,
                      overwrite=True,
                      crange=None,
                      cmap="planck"):
    if not (overwrite):
        if os.path.isfile(filename): return
    try:
        from enlib import enmap, enplot
    except:
        traceback.print_exc()
        cprint(
            "Could not produce plot " + filename +
            ". High resolution plotting requires enlib, which couldn't be imported. Continuing without plotting.",
            color='fail')
        return

    if (down is not None) and (down != 1):
        downmap = enmap.downgrade(enmap.enmap(array)[None], down)
    else:
        downmap = enmap.enmap(array)[None]
    img = enplot.draw_map_field(downmap,
                                enplot.parse_args("-c " + cmap + " -vvvg moo"),
                                crange=crange)
    #img = enplot.draw_map_field(downmap,enplot.parse_args("--grid 1"),crange=crange)
    if filename is None:
        img.show()
    else:
        img.save(filename)
        if verbose:
            print(bcolors.OKGREEN + "Saved high-res plot to",
                  filename + bcolors.ENDC)
Exemplo n.º 5
0
def flatFitsToHealpix(fitsFile, nside, downgrade=1):

    from enlib import enmap

    imap = enmap.read_map(fitsFile)
    if downgrade > 1:
        imap = enmap.downgrade(imap, args.downgrade)
    omap = imap.to_healpix(nside=args.nside)
    return omap
Exemplo n.º 6
0
def prepare(map, hitmap=False):
    """Prepare a map for input by cutting off one pixel along each edge,
	as out-of-bounds data accumulates there, and downgrading to the
	target resolution."""
    # Get rid of polarization for now. Remove this later.
    if map.ndim == 3: map = map[:1]
    # Cut off edge pixels
    map[..., :1, :] = 0
    map[..., -1:, :] = 0
    map[..., :, :1] = 0
    map[..., :, -1:] = 0
    # Downsample
    map = enmap.downgrade(map, args.downgrade)
    if hitmap: map *= args.downgrade**2
    # Pad to fourier-friendly size. Because no cropping
    # is used, this will result in the same padding for
    # all maps.
    map = map.autocrop(method="fft", value="none")
    return map
Exemplo n.º 7
0
def prepare(map, hitmap=False):
	"""Prepare a map for input by cutting off one pixel along each edge,
	as out-of-bounds data accumulates there, and downgrading to the
	target resolution."""
	# Get rid of polarization for now. Remove this later.
	if map.ndim == 3: map = map[:1]
	# Cut off edge pixels
	map[...,:1,:]  = 0
	map[...,-1:,:] = 0
	map[...,:,:1]  = 0
	map[...,:,-1:] = 0
	# Downsample
	map = enmap.downgrade(map, args.downgrade)
	if hitmap: map *= args.downgrade**2
	# Pad to fourier-friendly size. Because no cropping
	# is used, this will result in the same padding for
	# all maps.
	map = map.autocrop(method="fft", value="none")
	return map
Exemplo n.º 8
0
parser.add_argument("-d", "--downgrade", type=int, default=1)
parser.add_argument("-t", "--thin", type=int, default=1000)
parser.add_argument(
    "-A",
    "--area-model",
    type=str,
    default="exact",
    help=
    "How to model pixel area. exact: Compute shape of each pixel. average: Use a single average number for all"
)
parser.add_argument("--already-arcmin", action="store_true")
args = parser.parse_args()

div = enmap.read_fits(args.div)
if args.downgrade:
    div = enmap.downgrade(div, args.downgrade)
    div *= args.downgrade**2

div = div.reshape((-1, ) + div.shape[-2:])[0]
# Individual pixel area
if args.area_model == "average":
    pix_area = div * 0 + div.area() / div.size * (180 * 60 / np.pi)**2
else:
    pos = div.posmap()
    diffs = utils.rewind(pos[:, 1:, 1:] - pos[:, :-1, :-1], 0)
    pix_area = np.abs(diffs[0] * diffs[1]) * np.cos(pos[0, :-1, :-1])
    del diffs
    # Go to square arcmins
    pix_area /= utils.arcmin**2
    # Pad to recover edge pixels
    pix_area = np.concatenate([pix_area, pix_area[-1:]], 0)
Exemplo n.º 9
0
    def analyze(self,
                ref_beam=None,
                mode="weight",
                map_max=1e8,
                div_tol=20,
                apod_val=0.2,
                apod_alpha=5,
                apod_edge=120,
                beam_tol=1e-4,
                ps_spec_tol=0.5,
                ps_smoothing=10,
                filter_kxrad=20,
                filter_highpass=200,
                filter_kx_ymax_scale=1):
        # Find the typical noise levels. We will use this to decide where
        # divs and beams etc. can be truncated to improve convergence.
        datasets = self.datasets
        ncomp = max([
            split.data.map.preflat.shape[0] for dataset in datasets
            for split in dataset.splits
        ])
        for dataset in datasets:
            for split in dataset.splits:
                split.ref_div = robust_ref(split.data.div)
                # Avoid single, crazy pixels
                split.data.div = np.minimum(split.data.div,
                                            split.ref_div * div_tol)
                split.data.div = filter_div(split.data.div)
                split.data.map = np.maximum(
                    -map_max, np.minimum(map_max, split.data.map))
                # Expand map to ncomp components
                split.data.map = add_missing_comps(split.data.map, ncomp)
                # Build apodization
                apod = np.minimum(split.data.div / (split.ref_div * apod_val),
                                  1.0)**apod_alpha
                apod = apod.apod(apod_edge)
                split.data.div *= apod
                split.data.H = split.data.div**0.5
            dataset.ref_div = np.sum(
                [split.ref_div for split in dataset.splits])
        tot_ref_div = np.sum([dataset.ref_div for dataset in datasets])

        ly, lx = enmap.laxes(self.shape, self.wcs)
        lr = (ly[:, None]**2 + lx[None, :]**2)**0.5
        bmin = np.min([beam_size(dataset.beam) for dataset in datasets])
        # Deconvolve all the relative beams. These should ideally include pixel windows.
        # This could matter for planck
        if ref_beam is not None:
            for dataset in datasets:
                rel_beam = beam_ratio(dataset.beam, ref_beam)
                # Avoid division by zero
                bspec = np.maximum(eval_beam(rel_beam, lr), 1e-10)
                # We don't want to divide by tiny numbers, so we will cap the relative
                # beam. The goal is just to make sure that the deconvolved noise ends up
                # sufficiently high that anything beyond that is negligible. This will depend
                # on the div ratios between the different datasets. We can stop deconvolving
                # when beam*my_div << (tot_div-my_div). But deconvolving even by a factor
                # 1000 leads to strange numberical errors
                bspec = np.maximum(
                    bspec, beam_tol * (tot_ref_div / dataset.ref_div - 1))
                bspec_dec = np.maximum(bspec, 0.1)
                for split in dataset.splits:
                    split.data.map = map_ifft(
                        map_fft(split.data.map) / bspec_dec)
                # In theory we don't need to worry about the beam any more by this point.
                # But the pixel window might be unknown or missing. So we save the beam so
                # we can make sure the noise model makes sense
                dataset.bspec = bspec
                # We classify this as a low-resolution dataset if we did an appreciable amount of
                # deconvolution
                dataset.lowres = np.min(bspec) < 0.5

        # Can now build the noise model and rhs for each dataset.
        # The noise model is N = HCH, where H = div**0.5 and C is the mean 2d noise spectrum
        # of the whitened map, after some smoothing.
        for dataset in datasets:
            nsplit = 0
            dset_map, dset_div = None, None
            for split in dataset.splits:
                if dset_map is None:
                    dset_map = split.data.map * 0
                    dset_div = split.data.div * 0
                dset_map += split.data.map * split.data.div
                dset_div += split.data.div
            # Form the mean map for this dataset
            dset_map[:, dset_div > 0] /= dset_div[dset_div > 0]
            # Then use it to build the diff maps and noise spectra
            dset_ps = None
            #i=0
            for split in dataset.splits:
                if split.data.empty: continue
                diff = split.data.map - dset_map
                wdiff = diff * split.data.H
                #i+=1
                # What is the healthy area of wdiff? Wdiff should have variance
                # 1 or above. This tells us how to upweight the power spectrum
                # to take into account missing regions of the diff map.
                ndown = 10
                wvar = enmap.downgrade(wdiff**2, ndown)
                goodfrac = np.sum(wvar > 1e-3) / float(wvar.size)
                if goodfrac < 0.1: goodfrac = 0
                ps = np.abs(map_fft(wdiff))**2
                # correct for unhit areas, which can't be whitened
                with utils.nowarn():
                    ps /= goodfrac
                if dset_ps is None:
                    dset_ps = enmap.zeros(ps.shape, ps.wcs, ps.dtype)
                dset_ps += ps
                nsplit += 1
            if nsplit < 2: continue
            # With n splits, mean map has var 1/n, so diff has var (1-1/n) + (n-1)/n = 2*(n-1)/n
            # Hence tot-ps has var 2*(n-1)
            dset_ps /= 2 * (nsplit - 1)
            dset_ps = smooth_pix(dset_ps, ps_smoothing)
            # Use the beam we saved from earlier to make sure we don't have a remaining
            # pixel window giving our high-l parts too high weight. If everything has
            # been correctly deconvolved, we expect high-l dset_ps to go as
            # 1/beam**2. The lower ls will realistically be no lower than this either.
            # So we can simply take the max
            dset_ps_ref = np.min(
                np.maximum(dset_ps, dataset.bspec**-2 * ps_spec_tol * 0.1))
            dset_ps = np.maximum(dset_ps,
                                 dset_ps_ref * dataset.bspec**-2 * ps_spec_tol)
            # Our fourier-space inverse noise matrix is the inverse of this
            if np.all(np.isfinite(dset_ps)):
                iN = 1 / dset_ps
            else:
                iN = enmap.zeros(dset_ps.shape, dset_ps.wcs, dset_ps.dtype)

            # Add any fourier-space masks to this
            if dataset.highpass:
                kxmask = butter(lx, filter_kxrad, -5)
                kxmask = 1 - (1 - kxmask[None, :]) * (
                    np.abs(ly) < bmin * filter_kx_ymax_scale)[:, None]
                highpass = butter(lr, filter_highpass, -10)
                filter = highpass * kxmask
                del kxmask, highpass
            else:
                filter = 1
            if mode != "filter": iN *= filter
            dataset.iN = iN
            dataset.filter = filter
            self.mode = mode
Exemplo n.º 10
0
        profiles[polcomb] = []
    else:
        apowers[polcomb] = []
        cpowers[polcomb] = []

for i in range(Nsims):
    print(i)

    unlensed = enmap.rand_map(shape_sim, wcs_sim, ps)
    lensed = lensing.lens_map_flat_pix(unlensed, alpha_pix, order=lens_order)
    klteb = enmap.map2harm(lensed)
    klteb_beam = klteb * kbeam_sim
    lteb_beam = enmap.ifft(klteb_beam).real
    noise = enmap.rand_map(shape_sim, wcs_sim, ps_noise, scalar=True)
    observed = lteb_beam + noise
    measured = enmap.downgrade(observed,
                               analysis_pixel_scale / sim_pixel_scale)
    if i == 0:

        #debug()

        shape_dat, wcs_dat = measured.shape, measured.wcs
        lxmap_dat, lymap_dat, modlmap_dat, angmap_dat, lx_dat, ly_dat = fmaps.get_ft_attributes_enmap(
            shape_dat, wcs_dat)
        nT = ntfunc(modlmap_dat)
        nP = npfunc(modlmap_dat)
        kbeam_dat = cmb.gauss_beam(modlmap_dat, beam_arcmin)

        if cluster:
            modr_dat = enmap.modrmap(shape_dat, wcs_dat) * 180. * 60. / np.pi
            bin_edges_dat = np.arange(0., modr_dat.max(), 1.0)
            binner_dat = stats.bin2D(modr_dat, bin_edges_dat)
Exemplo n.º 11
0
    ftot += m
    ps_auto += m[:, None] * np.conj(m[None, :])

print("Computing cross spectrum")
# Compute auto spectrum
ps_auto /= nfile**2
ftot /= nfile
# Compute total spectrum
ps_tot = ftot[:, None] * np.conj(ftot[None, :])
if len(args.ifiles) > 1:
    # Subtract to get cross spectrum
    ps_cross = ps_tot - ps_auto
else:
    ps_cross = ps_tot
del ps_tot, ps_auto, ftot
ps_cross = enmap.downgrade(ps_cross, args.pregrade)
print(ps_cross.shape)

print("Normalizing")
l = np.sum(ps_cross.lmap()**2, 0)**0.5
l = np.minimum(l, args.lmax_scale)
ps_cross *= ps_cross.area() / ps_cross.npix
ps_cross *= l**2 / (2 * np.pi)

print("Recentering")


# Center l=0
def recenter(m, shape):
    return np.roll(np.roll(m, -shape[-2] // 2, -2), -shape[-1] // 2, -1)
Exemplo n.º 12
0
for i in range(Nsims):
    print(i)
    unlensed = parray_sim.get_unlensed_cmb(seed=(200 + i))
    lensed = lensing.lens_map_flat_pix(unlensed.copy(),
                                       alpha_pix.copy(),
                                       order=lens_order)

    #m, = lensing.rand_map(shape, wcs, ps, lmax=lmax, maplmax=maplmax, seed=(seed,i))

    klteb = enmap.map2harm(lensed.copy())
    klteb_beam = klteb * kbeam_sim
    lteb_beam = enmap.ifft(klteb_beam).real
    noise = 0.  #parray_sim.get_noise_sim(seed=(300+i))
    observed = lteb_beam + noise
    measured = enmap.downgrade(observed, pixratio)
    if i == 0:

        shape_dat, wcs_dat = measured.shape, measured.wcs
        modr_dat = parray_dat.modrmap * 180. * 60. / np.pi

        # === ESTIMATOR ===

        template_dat = fmaps.simple_flipper_template_from_enmap(
            shape_dat, wcs_dat)
        lxmap_dat, lymap_dat, modlmap_dat, angmap_dat, lx_dat, ly_dat = fmaps.get_ft_attributes_enmap(
            shape_dat, wcs_dat)

        taper_percent = 15.0
        Ny, Nx = shape_dat
        taper = fmaps.cosineWindow(Ny,
Exemplo n.º 13
0
		rhs  = enmap.zeros((ncomp,)+shape, area.wcs, dtype)
		div  = enmap.zeros((ncomp,ncomp)+shape, area.wcs, dtype)
		junk = np.zeros(pcut.njunk, dtype)
	with bench.show("rhs"):
		tod *= ivar[:,None]
		pcut.backward(tod, junk)
		pmap.backward(tod, rhs)
	with bench.show("hits"):
		for i in range(ncomp):
			div[i,i] = 1
			pmap.forward(tod, div[i])
			tod *= ivar[:,None]
			pcut.backward(tod, junk)
			div[i] = 0
			pmap.backward(tod, div[i])
	with bench.show("map"):
		idiv = array_ops.eigpow(div, -1, axes=[0,1], lim=1e-5)
		map  = enmap.map_mul(idiv, rhs)
	# Estimate central amplitude
	c = np.array(map.shape[-2:])/2
	crad  = 50
	mcent = map[:,c[0]-crad:c[0]+crad,c[1]-crad:c[1]+crad]
	mcent = enmap.downgrade(mcent, 4)
	amp   = np.max(mcent)
	print("%s amp %7.3f asens %7.3f" % (id, amp/1e6, asens))
	with bench.show("write"):
		enmap.write_map("%s%s_map.fits" % (prefix, bid), map)
		enmap.write_map("%s%s_rhs.fits" % (prefix, bid), rhs)
		enmap.write_map("%s%s_div.fits" % (prefix, bid), div)
	del d, scan, pmap, pcut, tod, map, rhs, div, idiv, junk
Exemplo n.º 14
0
def get_map(ifile, args, return_info=False):
	"""Read the specified map, and massage it according to the options
	in args. Relevant ones are sub, autocrop, slice, op, downgrade, scale,
	mask. Retuns with shape [:,ny,nx], where any extra dimensions have been
	flattened into a single one."""
	with warnings.catch_warnings():
		warnings.filterwarnings("ignore")
		toks = ifile.split(":")
		ifile, slice = toks[0], ":".join(toks[1:])
		m0 = enmap.read_map(ifile)
		if args.fix_wcs:
			m0.wcs = enwcs.fix_wcs(m0.wcs)
		# Save the original map, so we can compare its wcs later
		m  = m0
		# Submap slicing currently has wrapping issues
		if args.sub is not None:
			default = [[-90,-180],[90,180]]
			sub  = np.array([[(default[j][i] if q == '' else float(q))*np.pi/180 for j,q in enumerate(w.split(":"))]for i,w in enumerate(args.sub.split(","))]).T
			m = m.submap(sub)
		# Perform a common autocrop across all fields
		if args.autocrop:
			m = enmap.autocrop(m)
		# If necessary, split into stamps. If no stamp splitting occurs,
		# a list containing only the original map is returned
		mlist = extract_stamps(m, args)
		# The stamp stuff is a bit of an ugly hack. This loop and wcslist
		# are parts of that hack.
		for i, m in enumerate(mlist):
			# Downgrade
			downgrade = [int(w) for w in args.downgrade.split(",")]
			m = enmap.downgrade(m, downgrade)
			# Slicing, either at the file name level or though the slice option
			m = eval("m"+slice)
			if args.slice is not None:
				m = eval("m"+args.slice)
			flip = (m.wcs.wcs.cdelt*m0.wcs.wcs.cdelt)[::-1]<0
			assert m.ndim >= 2, "Image must have at least 2 dimensions"
			# Apply arbitrary map operations
			m1 = m
			if args.op is not None:
				m = eval(args.op, {"m":m},np.__dict__)
			# Scale if requested
			scale = [int(w) for w in args.scale.split(",")]
			if np.any(np.array(scale)>1):
				m = enmap.upgrade(m, scale)
			# Flip such that pixels are in PIL or matplotlib convention,
			# such that RA increases towards the left and dec upwards in
			# the final image. Unless a slicing operation on the image
			# overrrode this.
			if m.wcs.wcs.cdelt[1] > 0: m = m[...,::-1,:]
			if m.wcs.wcs.cdelt[0] > 0: m = m[...,:,::-1]
			if flip[0]: m = m[...,::-1,:]
			if flip[1]: m = m[...,:,::-1]
			# Update stamp list
			mlist[i] = m
		wcslist = [m.wcs for m in mlist]
		m = enmap.samewcs(np.asarray(mlist),mlist[0])
		if args.stamps is None:
			m, wcslist = m[0], None
		# Flatten pre-dimensions
		mf = m.reshape((-1,)+m.shape[-2:])
		# Stack
		if args.tile is not None:
			toks = [int(i) for i in args.tile.split(",")]
			nrow = toks[0] if len(toks) > 0 else -1
			ncol = toks[1] if len(toks) > 1 else -1
			mf = hwstack(hwexpand(mf, nrow, ncol, args.tile_transpose))[None]
		# Mask bad data
		if args.mask is not None:
			if not np.isfinite(args.mask): mf[np.abs(mf)==args.mask] = np.nan
			else: mf[np.abs(mf-args.mask)<=args.mask_tol] = np.nan
		# Done
		if not return_info: return mf
		else:
			info = bunch.Bunch(fname=ifile, ishape=m.shape, wcslist=wcslist)
			return mf, info
Exemplo n.º 15
0
    imap = enmap.read_map(imapfiles[i])
    if args.slice: imap = eval("imap" + args.slice)
    # We want y,x-ordering
    off = off[:, ::-1]
    box = utils.minmax(off, 0)
    dets.append(det)
    offs.append(off)
    boxes.append(box)
    imaps.append(imap)
box = utils.bounding_box(boxes)
box = utils.widen_box(box, rad * 5, relative=False)

# We assume that the two maps have the same pixelization
imaps = enmap.samewcs(np.array(imaps), imaps[0])
# Downsample by averaging
imaps = enmap.downgrade(imaps, (1, args.step))
naz = imaps.shape[-1]

# Ok, build our output geometry
shape, wcs = enmap.geometry(pos=box,
                            res=args.res * utils.arcmin,
                            proj="car",
                            pre=(naz, ))
omap = enmap.zeros(shape, wcs, dtype=dtype)

# Normalization
norm = enmap.zeros(shape[-2:], wcs)
norm[0, 0] = 1
norm = enmap.smooth_gauss(norm, rad)[0, 0]

# Loop through slices and populate
Exemplo n.º 16
0
import numpy as np, argparse
from enlib import enmap, utils, bench
parser = argparse.ArgumentParser()
parser.add_argument("div")
parser.add_argument("ofile", nargs="?", default="/dev/stdout")
parser.add_argument("-d", "--downgrade", type=int, default=1)
parser.add_argument("-t", "--thin", type=int, default=1000)
parser.add_argument("-A", "--area-model", type=str, default="exact", help="How to model pixel area. exact: Compute shape of each pixel. average: Use a single average number for all")
args = parser.parse_args()

div = enmap.read_fits(args.div)
if args.downgrade:
	div  = enmap.downgrade(div, args.downgrade)
	div *= args.downgrade**2

div = div.reshape((-1,)+div.shape[-2:])[0]
# Individual pixel area
if args.area_model == "average":
	pix_area = div*0 + div.area()/div.size*(180*60/np.pi)**2
else:
	pos   = div.posmap()
	diffs = utils.rewind(pos[:,1:,1:]-pos[:,:-1,:-1],0)
	pix_area = np.abs(diffs[0]*diffs[1])*np.cos(pos[0,:-1,:-1])
	del diffs
	# Go to square arcmins
	pix_area /= utils.arcmin**2
	# Pad to recover edge pixels
	pix_area = np.concatenate([pix_area,pix_area[-1:]],0)
	pix_area = np.concatenate([pix_area,pix_area[:,-1:]],1)

# Flatten everything
Exemplo n.º 17
0
		rhs  = enmap.zeros((ncomp,)+shape, area.wcs, dtype)
		div  = enmap.zeros((ncomp,ncomp)+shape, area.wcs, dtype)
		junk = np.zeros(pcut.njunk, dtype)
	with bench.show("rhs"):
		tod *= ivar[:,None]
		pcut.backward(tod, junk)
		pmap.backward(tod, rhs)
	with bench.show("hits"):
		for i in range(ncomp):
			div[i,i] = 1
			pmap.forward(tod, div[i])
			tod *= ivar[:,None]
			pcut.backward(tod, junk)
			div[i] = 0
			pmap.backward(tod, div[i])
	with bench.show("map"):
		idiv = array_ops.eigpow(div, -1, axes=[0,1], lim=1e-5)
		map  = enmap.map_mul(idiv, rhs)
	# Estimate central amplitude
	c = np.array(map.shape[-2:])/2
	crad  = 50
	mcent = map[:,c[0]-crad:c[0]+crad,c[1]-crad:c[1]+crad]
	mcent = enmap.downgrade(mcent, 4)
	amp   = np.max(mcent)
	print "%s amp %7.3f asens %7.3f" % (id, amp/1e6, asens)
	with bench.show("write"):
		enmap.write_map("%s%s_map.fits" % (prefix, bid), map)
		enmap.write_map("%s%s_rhs.fits" % (prefix, bid), rhs)
		enmap.write_map("%s%s_div.fits" % (prefix, bid), div)
	del d, scan, pmap, pcut, tod, map, rhs, div, idiv, junk
Exemplo n.º 18
0
	imap = enmap.read_map(imapfiles[i])
	if args.slice: imap = eval("imap"+args.slice)
	# We want y,x-ordering
	off = off[:,::-1]
	box = utils.minmax(off,0)
	dets.append(det)
	offs.append(off)
	boxes.append(box)
	imaps.append(imap)
box = utils.bounding_box(boxes)
box = utils.widen_box(box, rad*5, relative=False)

# We assume that the two maps have the same pixelization
imaps = enmap.samewcs(np.array(imaps), imaps[0])
# Downsample by averaging
imaps = enmap.downgrade(imaps, (1,args.step))
naz   = imaps.shape[-1]

# Ok, build our output geometry
shape, wcs = enmap.geometry(pos=box, res=args.res*utils.arcmin, proj="car", pre=(naz,))
omap = enmap.zeros(shape, wcs, dtype=dtype)

# Normalization
norm = enmap.zeros(shape[-2:],wcs)
norm[0,0] = 1
norm = enmap.smooth_gauss(norm, rad)[0,0]

# Loop through slices and populate
bazs = []
for iaz in range(naz):
	# Get our boresight az
Exemplo n.º 19
0
import numpy as np, argparse, healpy
from enlib import enmap
#import enmap
parser = argparse.ArgumentParser()
parser.add_argument("ifile")
parser.add_argument("ofile")
parser.add_argument("-d", "--downgrade", type=int, default=1)
parser.add_argument("-N", "--nside", type=int, default=0)
args = parser.parse_args()

imap = enmap.read_map(args.ifile)
if args.downgrade > 1:
    imap = enmap.downgrade(imap, args.downgrade)
omap = imap.to_healpix(nside=args.nside)
healpy.write_map(args.ofile, omap)
Exemplo n.º 20
0
## Generate scalar-only lensed and unlensed map
#m_scal_u, m_scal_l = lensing.rand_map(shape, wcs, cl_scal, cl_phi, seed=args.seed, output="ul", verbose=args.verbose)
## Generate tensor-only lensed and unlensed map
#m_tens_u, m_tens_l = lensing.rand_map(shape, wcs, cl_tens, cl_phi, seed=args.seed, output="ul", verbose=args.verbose)

np.random.seed(args.seed)
phi      = enmap.rand_map(shape[-2:], wcs, cl_phi)
m_scal_u = enmap.rand_map(shape, wcs, cl_scal)
m_tens_u = enmap.rand_map(shape, wcs, cl_tens)
m_scal_l = lensing.lens_map_flat(m_scal_u, phi)
m_tens_l = lensing.lens_map_flat(m_tens_u, phi)

# And the sums
m_tot_u = m_scal_u + m_tens_u
m_tot_l = m_scal_l + m_tens_l

# Convert from TQU to TEB and downgrade
def to_eb(m): return enmap.ifft(enmap.map2harm(m)).real
m_scal_u, m_scal_l, m_tens_u, m_tens_l, m_tot_u, m_tot_l = [enmap.downgrade(to_eb(i),os) for i in [m_scal_u, m_scal_l, m_tens_u, m_tens_l, m_tot_u, m_tot_l]]

# And output
utils.mkdir(args.odir)
enmap.write_map(args.odir + "/map_scalar.fits", m_scal_u)
enmap.write_map(args.odir + "/map_tensor.fits", m_tens_u)
enmap.write_map(args.odir + "/map_tot.fits", m_tot_u)
enmap.write_map(args.odir + "/map_scalar_lensed.fits", m_scal_l)
enmap.write_map(args.odir + "/map_tensor_lensed.fits", m_tens_l)
enmap.write_map(args.odir + "/map_tot_lensed.fits", m_tot_l)

Exemplo n.º 21
0
def coadd_tile_data(datasets,
                    box,
                    odir,
                    ps_smoothing=10,
                    pad=0,
                    ref_beam=None,
                    cg_tol=1e-6,
                    dump=False,
                    verbose=False,
                    read_cache=False,
                    write_cache=False,
                    div_max_tol=100,
                    div_div_tol=1e-10):
    # Load data for this box for each dataset
    datasets, ffpad = read_data(datasets,
                                box,
                                odir,
                                pad=pad,
                                verbose=verbose,
                                read_cache=read_cache,
                                write_cache=write_cache)
    # We might not find any data
    if len(datasets) == 0: return None
    # Find the smallest beam size of the datasets
    bmin = np.min([beam_size(dataset.beam) for dataset in datasets])

    # Subtract mean map from each split to get noise maps. Our noise
    # model is HNH, where H is div**0.5 and N is the mean 2d noise spectrum
    # after some smoothing
    rhs, tot_div = None, None
    tot_iN, tot_udiv = None, 0
    for dataset in datasets:
        nsplit = 0
        dset_map, dset_div = None, None
        for split in dataset.splits:
            if dset_map is None:
                dset_map = split.data.map * 0
                dset_div = split.data.div * 0
            dset_map += split.data.map * split.data.div
            dset_div += split.data.div
        # Form the mean map for this dataset
        dset_map[:, dset_div > 0] /= dset_div[dset_div > 0]
        if tot_div is None: tot_div = dset_div * 0
        tot_div += dset_div
        tshape, twcs, tdtype = dset_map.shape, dset_div.wcs, dset_div.dtype
        # Then use it to build the diff maps and noise spectra
        dset_ps = None
        for split in dataset.splits:
            if split.data.empty: continue
            diff = split.data.map - dset_map
            wdiff = diff * split.data.H
            # What is the healthy area of wdiff? Wdiff should have variance
            # 1 or above. This tells us how to upweight the power spectrum
            # to take into account missing regions of the diff map.
            ndown = 10
            wvar = enmap.downgrade(wdiff**2, ndown)
            goodfrac = np.sum(wvar > 1e-3) / float(wvar.size)
            if goodfrac < 0.1: goodfrac = 0
            #opre  = odir + "/" + os.path.basename(split.map)[:-5]
            #enmap.write_map(opre + "_diff.fits", diff)
            #enmap.write_map(opre + "_wdiff.fits", wdiff)
            #enmap.write_map(opre + "_wvar.fits", wvar)
            ps = np.abs(map_fft(wdiff))**2
            #enmap.write_map(opre + "_ps1.fits", ps)
            # correct for unhit areas, which can't be whitened
            #print "A", dataset.name, np.median(ps[ps>0]), medloop(ps), goodfrac
            with utils.nowarn():
                ps /= goodfrac
            #print "B", dataset.name, np.median(ps[ps>0]), medloop(ps), goodfrac
            #enmap.write_map(opre + "_ps2.fits", ps)
            #enmap.write_map(opre + "_ps2d.fits", ps)
            if dset_ps is None:
                dset_ps = enmap.zeros(ps.shape, ps.wcs, ps.dtype)
            dset_ps += ps
            nsplit += 1
        if nsplit < 2: continue
        # With n splits, mean map has var 1/n, so diff has var (1-1/n) + (n-1)/n = 2*(n-1)/n
        # Hence tot-ps has var 2*(n-1)
        dset_ps /= 2 * (nsplit - 1)
        #enmap.write_map(opre + "_ps2d_tot.fits", dset_ps)
        dset_ps = smooth_pix(dset_ps, ps_smoothing)
        #enmap.write_map(opre + "_ps2d_smooth.fits", dset_ps)
        if np.all(np.isfinite(dset_ps)):
            # Super-low values of the spectrum are not realistic. These appear
            # due to beam/pixel smoothing in the planck maps. This will be
            # mostly taken care of when processing the beams, as long as we don't
            # let them get too small
            dset_ps = np.maximum(dset_ps, 1e-7)
            # Optionally cap the max dset_ps, this is mostly to speed up convergence
            if args.max_ps:
                dset_ps = np.minimum(dset_ps, args.max_ps)

            # Our fourier-space inverse noise matrix is based on the inverse noise spectrum
            iN = 1 / dset_ps
            #enmap.write_map(opre + "_iN_raw.fits", iN)
        else:
            print "Setting weight of dataset %s to zero" % dataset.name
            #print np.all(np.isfinite(dset_ps)), np.all(dset_ps>0)
            iN = enmap.zeros(dset_ps.shape, dset_ps.wcs, dset_ps.dtype)

        # Add any fourier-space masks to this
        ly, lx = enmap.laxes(tshape, twcs)
        lr = (ly[:, None]**2 + lx[None, :]**2)**0.5
        if dataset.highpass:
            kxmask = butter(lx, args.kxrad, -3)
            kxmask = 1 - (1 - kxmask[None, :]) * (
                np.abs(ly) < bmin * args.kx_ymax_scale)[:, None]
            highpass = butter(lr, args.highpass, -10)
            filter = highpass * kxmask
            #print "filter weighting", dataset.name
            del kxmask, highpass
        else:
            filter = 1

        if not args.filter: iN *= filter

        # We should deconvolve the relative beam from the maps,
        # but that's numerically nasty. But it can be handled
        # inversely. We want (BiNB + ...)x = (BiNB iB m + ...)
        # where iB is the beam deconvolution operation in map space.
        # Instead of actually doing that operation, we can compute two
        # inverse noise matrixes: iN_A = BiNB for the left hand
        # side and iN_b = BiN for the right hand side. That way we
        # avoid dividing by any huge numbers.

        # Add the relative beam
        iN_A = iN.copy()
        iN_b = iN.copy()
        if ref_beam is not None:
            rel_beam = beam_ratio(dataset.beam, ref_beam)
            bspec = eval_beam(rel_beam, lr)
            iN_A *= bspec**2
            iN_b *= bspec
        #moo = iN*0+filter
        #enmap.write_map(opre + "_filter.fits", moo)
        # Add filter to noise model if we're downweighting
        # rather than filtering.
        dataset.iN_A = iN_A
        dataset.iN_b = iN_b
        dataset.filter = filter
        #print "A", opre
        #enmap.write_map(opre + "_iN_A.fits", iN_A)
        #enmap.write_map(opre + "_iN.fits", iN)

    # Cap to avoid single crazy pixels
    tot_div = np.maximum(tot_div, np.median(tot_div[tot_div > 0]) * 0.01)
    tot_idiv = tot_div * 0
    tot_idiv[tot_div > div_div_tol] = 1 / tot_div[tot_div > div_div_tol]

    # Build the right-hand side. The right-hand side is
    # sum(HNHm)
    if rhs is None: rhs = enmap.zeros(tshape, twcs, tdtype)
    for dataset in datasets:
        i = 0
        for split in dataset.splits:
            if split.data.empty: continue
            #print "MOO", dataset.name, np.max(split.data.map), np.min(split.data.map), np.max(split.data.div), np.min(split.data.div)
            w = split.data.H * split.data.map
            fw = map_fft(w)
            fw *= dataset.iN_b
            if args.filter: fw *= dataset.filter
            w = map_ifft(fw) * split.data.H
            #enmap.write_map(odir + "/%s_%02d_rhs.fits" % (dataset.name, i), w)
            rhs += w
            i += 1
    del w, iN, iN_A, iN_b, filter

    # Now solve the equation
    def A(x):
        global times
        m = enmap.samewcs(x.reshape(rhs.shape), rhs)
        res = m * 0
        times[:] = 0
        ntime = 0
        for dataset in datasets:
            for split in dataset.splits:
                if split.data.empty: continue
                t = [time.time()]
                w = split.data.H * m
                t.append(time.time())
                fw = map_fft(w)
                t.append(time.time())
                fw *= dataset.iN_A
                t.append(time.time())
                w = map_ifft(fw)
                t.append(time.time())
                w *= split.data.H
                t.append(time.time())
                res += w
                for i in range(1, len(t)):
                    times[i - 1] += t[i] - t[i - 1]
                ntime += 1
                #w  = enmap.harm2map(dataset.iN_A*enmap.map2harm(w))
                #w *= split.data.H
                #res += w
                del w
        times /= ntime
        return res.reshape(-1)

    def M(x):
        m = enmap.samewcs(x.reshape(rhs.shape), rhs)
        res = m * tot_idiv
        return res.reshape(-1)

    solver = cg.CG(A, rhs.reshape(-1), M=M)
    for i in range(1000):
        t1 = time.time()
        solver.step()
        t2 = time.time()
        if verbose:
            print "%5d %15.7e %5.2f: %4.2f %4.2f %4.2f %4.2f %4.2f" % (
                solver.i, solver.err, t2 - t1, times[0], times[1], times[2],
                times[3], times[4]), np.std(solver.x)
        if dump and solver.i in [1, 2, 5, 10, 20, 50] + range(100, 10000, 100):
            m = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
            enmap.write_map(odir + "/step%04d.fits" % solver.i, m)
        if solver.err < cg_tol:
            if dump:
                m = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
                enmap.write_map(odir + "/step_final.fits", m)
            break
    tot_map = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
    # Get rid of the fourier padding
    ny, nx = tot_map.shape[-2:]
    tot_map = tot_map[..., :ny - ffpad[0], :nx - ffpad[1]]
    tot_div = tot_div[..., :ny - ffpad[0], :nx - ffpad[1]]
    return bunch.Bunch(map=tot_map, div=tot_div)
Exemplo n.º 22
0
# pl.add(ells,theory.uCl('TT',ells)*ells**2.*TCMB**2.)
# pl.done(outDir+"cls.png")
# sys.exit()

for i in range(N):
    map = enmap.rand_map(shape, wcs, ps) / TCMB

    # massIndex = massIndices[i]
    # inputKappaMap, szMap,M500,z = getKappaSZ(b,snap,massIndex,px,thetaMap.shape)
    # avgM500 += M500
    # avgz += z
    inputKappaMap = kappaMap
    szMap = kappaMap.copy() * 0.

    if int(pxDown / px) > 1:
        inpDown = enmap.downgrade(inputKappaMap, pxDown / px)
        szDown = enmap.downgrade(szMap, pxDown / px)
    else:
        inpDown = inputKappaMap
        szDown = szMap
    trueKappaStack += inpDown
    szStack += szDown

    # === DEFLECTION MAP ===
    a = alphaMaker(thetaMap)
    alpha = a.kappaToAlpha(inputKappaMap, test=False)
    alphamod = 180. * 60. * np.sum(alpha**2, 0)**0.5 / np.pi
    # print "alphaint ", alphamod[thetaMap*60.*180./np.pi<10.].mean()
    pos = thetaMap.posmap() + alpha
    pix = thetaMap.sky2pix(pos, safe=False)
Exemplo n.º 23
0
def read_map(fname):
    m = enmap.read_map(fname)
    m = m[..., 1:-1, 1:-1]
    #m = eval("m" + args.slice)
    m = enmap.downgrade(m, args.downgrade)
    return m
Exemplo n.º 24
0
     ],
     nowrap=True)
 tshape, twcs = enmap.downgrade_geometry(tshape, twcs, args.downgrade)
 #print ty, tx, np.mean(enmap.box(tshape, twcs),0)/utils.degree
 # Read in our tile data
 frhss, kmaps, mjds = [], [], []
 for ind in range(comm_intra.rank, len(idirs), comm_intra.size):
     if not overlaps(pboxes[ind], pixbox, nphi):
         #print idirs[ind], "does not overlap", pixbox.reshape(-1), pboxes[ind].reshape(-1)
         continue
     idir = idirs[ind]
     lshape, lwcs = enmap.read_map_geometry(idir + "/frhs.fits")
     pixbox_loc = pixbox - enmap.pixbox_of(wcs, lshape, lwcs)[0]
     kmap = enmap.read_map(idir + "/kmap.fits",
                           pixbox=pixbox_loc).astype(dtype)
     if args.downgrade > 1: kmap = enmap.downgrade(kmap, args.downgrade)
     # Skip tile if it's empty
     if np.any(~np.isfinite(kmap)) or np.all(kmap < 1e-10):
         if args.verbose:
             print "%3d skipping %s (nan or unexposed)" % (comm.rank,
                                                           idir)
         continue
     else:
         if args.verbose: print "%3d read     %s" % (comm.rank, idir)
     frhs = enmap.read_map(idir + "/frhs.fits",
                           pixbox=pixbox_loc).astype(dtype)
     if args.downgrade > 1: frhs = enmap.downgrade(frhs, args.downgrade)
     with h5py.File(idir + "/info.hdf", "r") as hfile:
         mjd = hfile["mjd"].value
     frhss.append(frhs)
     kmaps.append(kmap)
Exemplo n.º 25
0
def combine_tiles(ipathfmt,
                  opathfmt,
                  combine=2,
                  downsample=2,
                  itile1=(None, None),
                  itile2=(None, None),
                  tyflip=False,
                  txflip=False,
                  pad_to=None,
                  comm=None,
                  verbose=False):
    """Given a set of tiles on disk at locaiton ipathfmt % {"y":...,"x"...},
	combine them into larger tiles, downsample and write the result to
	opathfmt % {"y":...,"x":...}. x and y must be contiguous and start at 0.
	
	reftile[2] indicates the tile coordinates of the first valid input tile.
	This needs to be specified if not all tiles of the logical tiling are
	physically present.

	tyflip and txflip indicate if the tiles coordinate system is reversed
	relative to the pixel coordinates or not."
	"""
    # Expand combine and downsample to 2d
    combine = np.zeros(2, int) + combine
    downsample = np.zeros(2, int) + downsample
    if pad_to is not None:
        pad_to = np.zeros(2, int) + pad_to
    # Handle optional mpi
    rank, size = (comm.rank, comm.size) if comm is not None else (0, 1)
    # Find the range of input tiles
    itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2)
    # Read the first tile to get its size information
    ibase = enmap.read_map(ipathfmt % {"y": itile1[0], "x": itile1[1]}) * 0
    # Find the set of output tiles we need to consider
    otile1 = itile1 / combine
    otile2 = (itile2 - 1) / combine + 1
    # And loop over them
    oyx = [(oy, ox) for oy in range(otile1[0], otile2[0])
           for ox in range(otile1[1], otile2[1])]
    for i in range(rank, len(oyx), size):
        oy, ox = oyx[i]
        # Read in all associated tiles into a list of lists
        rows = []
        for dy in range(combine[0]):
            iy = oy * combine[0] + dy
            if iy >= itile2[0]: continue
            cols = []
            for dx in range(combine[1]):
                ix = ox * combine[1] + dx
                if ix >= itile2[1]: continue
                if iy < itile1[0] or ix < itile1[1]:
                    # The first tiles are missing on disk, but are
                    # logically a part of the tiling. Use ibase,
                    # which has been zeroed out.
                    cols.append(ibase)
                else:
                    itname = ipathfmt % {"y": iy, "x": ix}
                    cols.append(enmap.read_map(itname))
            if txflip: cols = cols[::-1]
            rows.append(cols)
        # Stack them next to each other into a big tile
        if tyflip: rows = rows[::-1]
        omap = enmap.tile_maps(rows)
        # Downgrade if necessary
        if np.any(downsample > 1):
            omap = enmap.downgrade(omap, downsample)
        if pad_to is not None:
            # Padding happens towards the end of the tiling,
            # which depends on the flip status
            padding = np.array(
                [[0, 0],
                 [pad_to[0] - omap.shape[-2], pad_to[1] - omap.shape[-1]]])
            if tyflip: padding[:, 0] = padding[::-1, 0]
            if txflip: padding[:, 1] = padding[::-1, 1]
            omap = enmap.pad(omap, padding)
        # And output
        otname = opathfmt % {"y": oy, "x": ox}
        utils.mkdir(os.path.dirname(otname))
        enmap.write_map(otname, omap)
        if verbose: print otname
Exemplo n.º 26
0
def read_map(fname):
	m = enmap.read_map(fname)
	m = m[...,1:-1,1:-1]
	#m = eval("m" + args.slice)
	m = enmap.downgrade(m, args.downgrade)
	return m