Example #1
0
	def calc_amp(self, profile):
		ivamp= np.sum(profile**2*self.div)
		if ivamp == 0: return 0, np.inf
		with utils.nowarn():
			vamp = 1/ivamp
			amp  = vamp*np.sum(profile*self.div*self.map)
		if ~np.isfinite(amp): amp = 0
		return amp, vamp
Example #2
0
 def calc_amp(self, profile):
     ivamp = np.sum(profile**2 * self.div)
     if ivamp == 0: return 0, np.inf
     with utils.nowarn():
         vamp = 1 / ivamp
         amp = vamp * np.sum(profile * self.div * self.map)
     if ~np.isfinite(amp): amp = 0
     return amp, vamp
Example #3
0
def calc_delta_score(split_hits, bhits, mask):
	# fractional improvement is (split_hits + bhits)/split_hits -1 = bhits/split_hits
	# This will often lead to division by zero. That is not catastrophic, but loses
	# the ability to distinguish between multiple cases that would all fill in empty pixels.
	# So we cap the ratio to a large number.
	with utils.nowarn():
		ratio = bhits/split_hits
		ratio[np.isnan(ratio)] = 0
		ratio = np.minimum(ratio, 1000)
	return np.sum(ratio[:,mask],-1)
Example #4
0
def calc_delta_score(split_hits, bhits, mask):
	# fractional improvement is (split_hits + bhits)/split_hits -1 = bhits/split_hits
	# This will often lead to division by zero. That is not catastrophic, but loses
	# the ability to distinguish between multiple cases that would all fill in empty pixels.
	# So we cap the ratio to a large number.
	with utils.nowarn():
		ratio = bhits/split_hits
		ratio[np.isnan(ratio)] = 0
		ratio = np.minimum(ratio, 1000)
	return np.sum(ratio[:,mask],-1)
Example #5
0
	def __init__(self, data, srcpos, pcut, nmat, perdet=False):
		pthumb = PmatThumbs(data, srcpos, perdet=perdet)
		twork  = np.full(data.tod.shape, 1.0, data.tod.dtype)
		nmat.white(twork)
		div   = enmap.zeros(pthumb.shape, pthumb.wcs, data.tod.dtype)
		junk  = np.zeros(pcut.njunk,data.tod.dtype)
		pcut.backward(twork, junk)
		pthumb.backward(twork, div)
		div = div[:,0]
		self.pthumb, self.pcut, self.nmat = pthumb, pcut, nmat
		self.div = div
		with utils.nowarn():
			self.idiv = 1/self.div
			self.idiv[~np.isfinite(self.idiv)] = 0
Example #6
0
	def __init__(self, data, srcpos, pcut, nmat, perdet=False):
		pthumb = PmatThumbs(data, srcpos, perdet=perdet)
		twork  = np.full(data.tod.shape, 1.0, data.tod.dtype)
		nmat.white(twork)
		div   = enmap.zeros(pthumb.shape, pthumb.wcs, data.tod.dtype)
		junk  = np.zeros(pcut.njunk,data.tod.dtype)
		pcut.backward(twork, junk)
		pthumb.backward(twork, div)
		div = div[:,0]
		self.pthumb, self.pcut, self.nmat = pthumb, pcut, nmat
		self.div = div
		with utils.nowarn():
			self.idiv = 1/self.div
			self.idiv[~np.isfinite(self.idiv)] = 0
Example #7
0
def find_ref_pixs(divs, rcost=1.0, dcost=1.0):
	"""rcost is cost per pixel away from center
	dcost is cost per dB change in div value avay from median"""
	# Find median nonzero div per map
	ref_val = np.asarray(np.median(np.ma.array(divs, mask=divs==0),(-2,-1)))
	with utils.nowarn():
		val_off = 10*(np.log10(divs)-np.log10(ref_val[:,None,None]))
	pix_map = divs.pixmap()
	center  = np.array(divs.shape[-2:])/2
	dist_map= np.sum((pix_map-center[:,None,None])**2,-3)**0.5
	cost    = dist_map * rcost + np.abs(val_off)*dcost
	# Sort each by cost. Will be [{y,x},map,order]
	inds    = np.array(np.unravel_index(np.argsort(cost.reshape(cost.shape[0],-1),1), cost.shape[-2:]))
	inds    = inds.T
	# Want to return [order,map,{y,x}]
	return inds
Example #8
0
def find_ref_pixs(divs, rcost=1.0, dcost=1.0):
	"""rcost is cost per pixel away from center
	dcost is cost per dB change in div value avay from median"""
	# Find median nonzero div per map
	ref_val = np.asarray(np.median(np.ma.array(divs, mask=divs==0),(-2,-1)))
	with utils.nowarn():
		val_off = 10*(np.log10(divs)-np.log10(ref_val[:,None,None]))
	pix_map = divs.pixmap()
	center  = np.array(divs.shape[-2:])/2
	dist_map= np.sum((pix_map-center[:,None,None])**2,-3)**0.5
	cost    = dist_map * rcost + np.abs(val_off)*dcost
	# Sort each by cost. Will be [{y,x},map,order]
	inds    = np.array(np.unravel_index(np.argsort(cost.reshape(cost.shape[0],-1),1), cost.shape[-2:]))
	inds    = inds.T
	# Want to return [order,map,{y,x}]
	return inds
Example #9
0
 def __init__(self, scan, model=None, window=None, filter=None):
     model  = config.get("noise_model", model)
     window = config.get("tod_window", window)*scan.srate
     nmat.apply_window(scan.tod, window)
     self.nmat = nmat_measure.NmatBuildDelayed(model, cut=scan.cut_noiseest, spikes=scan.spikes)
     self.nmat = self.nmat.update(scan.tod, scan.srate)
     nmat.apply_window(scan.tod, window, inverse=True)
     self.model, self.window = model, window
     self.ivar = self.nmat.ivar
     self.cut  = scan.cut
     # Optional extra filter
     if filter:
         freq = fft.rfftfreq(scan.nsamp, 1/scan.srate)
         fknee, alpha = filter
         with utils.nowarn():
             self.filter = (1 + (freq/fknee)**-alpha)**-1
     else: self.filter = None
Example #10
0
    def __init__(self, workspaces, template, comm=None):
        """Initialize a FastmapSolver for the equation system given by the workspace list
		workspaces. The template argument specifies the output coordinate system. This
		enmap have a wcs which is pixel-compatible with that used to build the workspaces."""
        if comm is None: comm = mpi.COMM_WORLD
        # Find the global coordinate offset needed to match our
        # global wcs with the template wcs
        corner = template.pix2sky([0, 0])
        gwcs, offset = offset_wcs(workspaces[0].geometry.gwcs, corner)
        # Prepare workspaces for solving in these coordinates
        self.workspaces = []
        for work in workspaces:
            work = work.copy()
            with utils.nowarn():
                hdiv_norm = work.hdiv / np.sum(work.hdiv[0, 0],
                                               -1)[None, None, :, None]
            hdiv_norm[~np.isfinite(hdiv_norm)] = 0
            work.hdiv_norm_sqrt = hdiv_norm[
                0, 0]**0.5  # array_ops.eigpow(hdiv_norm, 0.5, [0,1])
            # Update the global wcs and pixel coordinates
            work.geometry.gwcs = gwcs
            work.geometry.y0 -= offset[0]
            work.geometry.xshifts -= offset[1]
            # Set up our ponting matrix
            work.pmat = PmatWorkspaceMap(work.geometry)
            self.workspaces.append(work)
        # Update our template to match the geometry we're actually using.
        # If the original template was compatible, this will be a NOP geometry-wise
        template = enmap.zeros((work.geometry.ncomp, ) + template.shape[-2:],
                               work.geometry.gwcs, work.geometry.dtype)
        # Build a simple binned preconditioner
        # FIXME: This just makes things worse
        #idiv = enmap.zeros((work.geometry.ncomp,work.geometry.ncomp)+template.shape[-2:], work.geometry.gwcs, work.geometry.dtype)
        #for work in self.workspaces:
        #	wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs, work.geometry.dtype)
        #	for i in range(work.geometry.ncomp):
        #		tmp = idiv[0]*0
        #		tmp[i] = 1
        #		work.pmat.forward(wmap, tmp)
        #		wmap[:] = array_ops.matmul(work.hdiv, wmap, [0,1])
        #		wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv,1), wmap, [0,1])
        #		work.pmat.backward(wmap, idiv[i])
        #self.prec = array_ops.eigpow(idiv, -1, axes=[0,1])
        self.dof = zipper.ArrayZipper(template)
        self.comm = comm
Example #11
0
def write_package(fname, maps, divs, src_ids, d):
    header = map_to_header(maps)
    header["id"] = d.entry.id + ":" + entry.tag
    header["off_x"] = d.point_correction[0] / utils.arcmin
    header["off_y"] = d.point_correction[1] / utils.arcmin
    header["t1"] = d.boresight[0, 0]
    header["t2"] = d.boresight[0, -1]
    meanoff = np.mean(d.point_offset, 0) / utils.degree
    header["az1"] = np.min(d.boresight[1, :]) / utils.degree + meanoff[0]
    header["az2"] = np.max(d.boresight[1, :]) / utils.degree + meanoff[1]
    header["el"] = np.mean(d.boresight[2, ::100]) / utils.degree

    hdu_maps = astropy.io.fits.PrimaryHDU(maps, header)
    hdu_divs = astropy.io.fits.ImageHDU(divs, map_to_header(divs), name="div"),
    hdu_ids = astropy.io.fits.TableHDU(src_ids, name="ids")

    hdus = astropy.io.fits.HDUList([hdu_maps, hdu_divs, hdu_ids])
    with utils.nowarn():
        hdus.writeto(fname, clobber=True)
Example #12
0
	def __init__(self, workspaces, template, comm=None):
		"""Initialize a FastmapSolver for the equation system given by the workspace list
		workspaces. The template argument specifies the output coordinate system. This
		enmap have a wcs which is pixel-compatible with that used to build the workspaces."""
		if comm is None: comm = mpi.COMM_WORLD
		# Find the global coordinate offset needed to match our
		# global wcs with the template wcs
		corner = template.pix2sky([0,0])
		gwcs, offset = offset_wcs(workspaces[0].geometry.gwcs, corner)
		# Prepare workspaces for solving in these coordinates
		self.workspaces = []
		for work in workspaces:
			work = work.copy()
			with utils.nowarn():
				hdiv_norm = work.hdiv / np.sum(work.hdiv[0,0],-1)[None,None,:,None]
			hdiv_norm[~np.isfinite(hdiv_norm)] = 0
			work.hdiv_norm_sqrt = hdiv_norm[0,0]**0.5 # array_ops.eigpow(hdiv_norm, 0.5, [0,1])
			# Update the global wcs and pixel coordinates
			work.geometry.gwcs = gwcs
			work.geometry.y0  -= offset[0]
			work.geometry.xshifts -= offset[1]
			# Set up our ponting matrix
			work.pmat = PmatWorkspaceMap(work.geometry)
			self.workspaces.append(work)
		# Update our template to match the geometry we're actually using.
		# If the original template was compatible, this will be a NOP geometry-wise
		template = enmap.zeros((work.geometry.ncomp,)+template.shape[-2:], work.geometry.gwcs, work.geometry.dtype)
		# Build a simple binned preconditioner
		# FIXME: This just makes things worse
		#idiv = enmap.zeros((work.geometry.ncomp,work.geometry.ncomp)+template.shape[-2:], work.geometry.gwcs, work.geometry.dtype)
		#for work in self.workspaces:
		#	wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs, work.geometry.dtype)
		#	for i in range(work.geometry.ncomp):
		#		tmp = idiv[0]*0
		#		tmp[i] = 1
		#		work.pmat.forward(wmap, tmp)
		#		wmap[:] = array_ops.matmul(work.hdiv, wmap, [0,1])
		#		wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv,1), wmap, [0,1])
		#		work.pmat.backward(wmap, idiv[i])
		#self.prec = array_ops.eigpow(idiv, -1, axes=[0,1])
		self.dof  = zipper.ArrayZipper(template)
		self.comm = comm
Example #13
0
# in 1378883069.1378883122.ar1:
#  1.4179  0.0209 -3.8517  0.0303  9.7507 26.7
#  1.435          -3.864          13.217  38.4
# So that's a 44% higher S/N, corresponding to 2x more data. Not completely comparable, though,
# since the TOD-one didn't marginalize over position. The position also differs by almost a sigma,
# which it shouldn't considering that they share data. And I trust the tod-level one more.
# However, this takes 1-5 s per likelihood evaluation. A robust fit requires ~500 evaluations,
# which would be 8-42 minutes. And that's using 16 cores! That's too slow. So this one is
# useful for comparing with a faster methods for a few reference tods, but not in general.
# Currently N and P take similar time. Can optimize P more with some effort, but N is dominated
# by ffts, and can't improve much.
from __future__ import division, print_function
import numpy as np, time, astropy.io.fits, os, sys
from scipy import optimize, integrate
from enlib import utils
with utils.nowarn(): import h5py
from enlib import mpi, errors, fft, mapmaking, config, jointmap, pointsrcs
from enlib import pmat, coordinates, enmap, bench, bunch, nmat, sampcut, gapfill, wcsutils, array_ops
from enact import filedb, actdata, actscan, nmat_measure

config.set("downsample", 1, "Amount to downsample tod by")
config.set("gapfill", "linear", "Gapfiller to use. Can be 'linear' or 'joneig'")
config.default("pmat_interpol_pad", 10.0, "Number of arcminutes to pad the interpolation coordinate system by")
config.default("pmat_interpol_max_size", 4000000, "Maximum mesh size in pointing interpolation. Worst-case time and memory scale at most proportionally with this.")

parser = config.ArgumentParser(os.environ["HOME"] + "./enkirc")
parser.add_argument("mode", help="Mode to use. Can be srcs or planet. This sets up useful defaults for other arguments")
parser.add_argument("srcdb_or_planet")
parser.add_argument("sel")
parser.add_argument("odir")
parser.add_argument("-R", "--radius",    type=float, default=12)
Example #14
0
div = div.reshape((-1, ) + div.shape[-2:])[0]
# Individual pixel area
if args.area_model == "average":
    pix_area = div * 0 + div.area() / div.size * (180 * 60 / np.pi)**2
else:
    pos = div.posmap()
    diffs = utils.rewind(pos[:, 1:, 1:] - pos[:, :-1, :-1], 0)
    pix_area = np.abs(diffs[0] * diffs[1]) * np.cos(pos[0, :-1, :-1])
    del diffs
    # Go to square arcmins
    pix_area /= utils.arcmin**2
    # Pad to recover edge pixels
    pix_area = np.concatenate([pix_area, pix_area[-1:]], 0)
    pix_area = np.concatenate([pix_area, pix_area[:, -1:]], 1)

# Flatten everything
div = div.reshape(-1)
pix_area = pix_area.reshape(-1)

with utils.nowarn():
    rms = (pix_area / div)**0.5 if not args.already_arcmin else div**-0.5
    inds = np.argsort(rms, axis=None)
    rms = rms[inds]
    area = np.cumsum(pix_area[inds]) / 3600
    mask = np.isfinite(rms)
    rms, area = rms[mask], area[mask]

np.savetxt(args.ofile,
           np.array([area[::args.thin], rms[::args.thin]]).T,
           fmt="%9.3f %15.4f")
Example #15
0
def butter(f, f0, alpha):
    if f0 <= 0: return f * 0 + 1
    with utils.nowarn():
        return 1 / (1 + (np.abs(f) / f0)**alpha)
Example #16
0
    def analyze(self,
                ref_beam=None,
                mode="weight",
                map_max=1e8,
                div_tol=20,
                apod_val=0.2,
                apod_alpha=5,
                apod_edge=120,
                beam_tol=1e-4,
                ps_spec_tol=0.5,
                ps_smoothing=10,
                filter_kxrad=20,
                filter_highpass=200,
                filter_kx_ymax_scale=1):
        # Find the typical noise levels. We will use this to decide where
        # divs and beams etc. can be truncated to improve convergence.
        datasets = self.datasets
        ncomp = max([
            split.data.map.preflat.shape[0] for dataset in datasets
            for split in dataset.splits
        ])
        for dataset in datasets:
            for split in dataset.splits:
                split.ref_div = robust_ref(split.data.div)
                # Avoid single, crazy pixels
                split.data.div = np.minimum(split.data.div,
                                            split.ref_div * div_tol)
                split.data.div = filter_div(split.data.div)
                split.data.map = np.maximum(
                    -map_max, np.minimum(map_max, split.data.map))
                # Expand map to ncomp components
                split.data.map = add_missing_comps(split.data.map, ncomp)
                # Build apodization
                apod = np.minimum(split.data.div / (split.ref_div * apod_val),
                                  1.0)**apod_alpha
                apod = apod.apod(apod_edge)
                split.data.div *= apod
                split.data.H = split.data.div**0.5
            dataset.ref_div = np.sum(
                [split.ref_div for split in dataset.splits])
        tot_ref_div = np.sum([dataset.ref_div for dataset in datasets])

        ly, lx = enmap.laxes(self.shape, self.wcs)
        lr = (ly[:, None]**2 + lx[None, :]**2)**0.5
        bmin = np.min([beam_size(dataset.beam) for dataset in datasets])
        # Deconvolve all the relative beams. These should ideally include pixel windows.
        # This could matter for planck
        if ref_beam is not None:
            for dataset in datasets:
                rel_beam = beam_ratio(dataset.beam, ref_beam)
                # Avoid division by zero
                bspec = np.maximum(eval_beam(rel_beam, lr), 1e-10)
                # We don't want to divide by tiny numbers, so we will cap the relative
                # beam. The goal is just to make sure that the deconvolved noise ends up
                # sufficiently high that anything beyond that is negligible. This will depend
                # on the div ratios between the different datasets. We can stop deconvolving
                # when beam*my_div << (tot_div-my_div). But deconvolving even by a factor
                # 1000 leads to strange numberical errors
                bspec = np.maximum(
                    bspec, beam_tol * (tot_ref_div / dataset.ref_div - 1))
                bspec_dec = np.maximum(bspec, 0.1)
                for split in dataset.splits:
                    split.data.map = map_ifft(
                        map_fft(split.data.map) / bspec_dec)
                # In theory we don't need to worry about the beam any more by this point.
                # But the pixel window might be unknown or missing. So we save the beam so
                # we can make sure the noise model makes sense
                dataset.bspec = bspec
                # We classify this as a low-resolution dataset if we did an appreciable amount of
                # deconvolution
                dataset.lowres = np.min(bspec) < 0.5

        # Can now build the noise model and rhs for each dataset.
        # The noise model is N = HCH, where H = div**0.5 and C is the mean 2d noise spectrum
        # of the whitened map, after some smoothing.
        for dataset in datasets:
            nsplit = 0
            dset_map, dset_div = None, None
            for split in dataset.splits:
                if dset_map is None:
                    dset_map = split.data.map * 0
                    dset_div = split.data.div * 0
                dset_map += split.data.map * split.data.div
                dset_div += split.data.div
            # Form the mean map for this dataset
            dset_map[:, dset_div > 0] /= dset_div[dset_div > 0]
            # Then use it to build the diff maps and noise spectra
            dset_ps = None
            #i=0
            for split in dataset.splits:
                if split.data.empty: continue
                diff = split.data.map - dset_map
                wdiff = diff * split.data.H
                #i+=1
                # What is the healthy area of wdiff? Wdiff should have variance
                # 1 or above. This tells us how to upweight the power spectrum
                # to take into account missing regions of the diff map.
                ndown = 10
                wvar = enmap.downgrade(wdiff**2, ndown)
                goodfrac = np.sum(wvar > 1e-3) / float(wvar.size)
                if goodfrac < 0.1: goodfrac = 0
                ps = np.abs(map_fft(wdiff))**2
                # correct for unhit areas, which can't be whitened
                with utils.nowarn():
                    ps /= goodfrac
                if dset_ps is None:
                    dset_ps = enmap.zeros(ps.shape, ps.wcs, ps.dtype)
                dset_ps += ps
                nsplit += 1
            if nsplit < 2: continue
            # With n splits, mean map has var 1/n, so diff has var (1-1/n) + (n-1)/n = 2*(n-1)/n
            # Hence tot-ps has var 2*(n-1)
            dset_ps /= 2 * (nsplit - 1)
            dset_ps = smooth_pix(dset_ps, ps_smoothing)
            # Use the beam we saved from earlier to make sure we don't have a remaining
            # pixel window giving our high-l parts too high weight. If everything has
            # been correctly deconvolved, we expect high-l dset_ps to go as
            # 1/beam**2. The lower ls will realistically be no lower than this either.
            # So we can simply take the max
            dset_ps_ref = np.min(
                np.maximum(dset_ps, dataset.bspec**-2 * ps_spec_tol * 0.1))
            dset_ps = np.maximum(dset_ps,
                                 dset_ps_ref * dataset.bspec**-2 * ps_spec_tol)
            # Our fourier-space inverse noise matrix is the inverse of this
            if np.all(np.isfinite(dset_ps)):
                iN = 1 / dset_ps
            else:
                iN = enmap.zeros(dset_ps.shape, dset_ps.wcs, dset_ps.dtype)

            # Add any fourier-space masks to this
            if dataset.highpass:
                kxmask = butter(lx, filter_kxrad, -5)
                kxmask = 1 - (1 - kxmask[None, :]) * (
                    np.abs(ly) < bmin * filter_kx_ymax_scale)[:, None]
                highpass = butter(lr, filter_highpass, -10)
                filter = highpass * kxmask
                del kxmask, highpass
            else:
                filter = 1
            if mode != "filter": iN *= filter
            dataset.iN = iN
            dataset.filter = filter
            self.mode = mode
Example #17
0
    def query(self, query=None, apply_default_query=True):
        """Query the database. The query takes the form
		tag,tag,tag,...:sort[slice], where all tags must be satisfied for an id to
		be returned. More general syntax is also available. For example,
		(a+b>c)|foo&bar,cow. This follows standard python and numpy syntax,
		except that , is treated as a lower-priority version of &."""
        # Make a copy of self.data so we can't modify it without changing ourself
        data = self.data.copy()
        # First split off any sorting field or slice
        if query is None: query = ""
        toks = utils.split_outside(query, ":")
        query, rest = toks[0], ":".join(toks[1:])
        # Hack: Support id fields as tags, even if they contain
        # illegal characters..
        t1 = time.time()
        for id in data["id"]:
            if id not in query: continue
            query = re.sub(r"""(?<!['"])\b%s\b""" % id, "(id=='%s')" % id,
                           query)
        # Split into ,-separated fields. Fields starting with a "+"
        # are taken to be tag markers, and are simply propagated to the
        # resulting ids.
        toks = utils.split_outside(query, ",")
        fields, subid = [], []
        override_ids = None
        for tok in toks:
            if len(tok) == 0: continue
            if tok.startswith("+"):
                # Tags starting with + will be interpreted as a subid specification
                subid.append(tok[1:])
            elif tok.startswith("/"):
                # Tags starting with / will be interpreted as special query flags
                if tok == "/all": apply_default_query = False
                else: raise ValueError("Unknown query flag '%s'" % tok)
            else:
                # Normal field. Perform a few convenience transformations first.
                if tok.startswith("@@"):
                    # Hack. *Force* the given ids to be returned, even if they aren't in the database.
                    override_ids = load_ids(tok[2:])
                    continue
                elif tok.startswith("@"):
                    # Restrict dataset to those in the given file
                    tok = "file_contains('%s',id)" % tok[1:]
                elif tok.startswith("~@"):
                    tok = "~file_contains('%s',id)" % tok[2:]
                fields.append(tok)
        if override_ids is not None:
            # Append subids to our ids, and return immediately. All other fields
            # and queries are ignored.
            subs = np.array(",".join(subid))
            subs = np.full(len(override_ids), subs, subs.dtype)
            return append_subs(override_ids, subs)
        # Apply our default queries here. These are things that we almost always
        # want in our queries, and that it's tedious to have to specify manually
        # each time. For example, this would be "selected" for act todinfo queries
        if apply_default_query:
            fields = fields + utils.split_outside(self.default_query, ",")
        subid = ",".join(subid)
        # Now evaluate our fields one by one. This is done so that
        # function fields can inspect the current state at that point
        for field in fields:
            scope = np.__dict__.copy()
            scope.update(data)
            for name, functor in self.functors.iteritems():
                scope[name] = functor(data)
            with utils.nowarn():
                hits = eval(field, scope)
            # Restrict all fields to the result
            data = dslice(data, hits)
        # Split the rest into a sorting field and a slice
        toks = rest.split("[")
        if len(toks) == 1: sort, fsel, dsel = toks[0], "", ""
        elif len(toks) == 2: sort, fsel, dsel = toks[0], "", "[" + toks[1]
        else:
            sort, fsel, dsel = toks[0], "[" + toks[1], "[" + "[".join(toks[2:])
        if self.sort and not sort: sort = self.sort
        if sort:
            # Evaluate sorting field
            field = data[sort]
            field = eval("field" + fsel)
            data = dslice(data, np.argsort(field))
        # Finally apply the data slice
        inds = np.arange(len(data["id"]))
        inds = eval("inds" + dsel)
        data = dslice(data, inds)
        # Build our subid extensions and append them to ids
        subs = np.array([merge_subid(subid, sub) for sub in data["subids"]])
        ids = append_subs(data["id"], subs)
        return ids
Example #18
0
def coadd_tile_data(datasets,
                    box,
                    odir,
                    ps_smoothing=10,
                    pad=0,
                    ref_beam=None,
                    cg_tol=1e-6,
                    dump=False,
                    verbose=False,
                    read_cache=False,
                    write_cache=False,
                    div_max_tol=100,
                    div_div_tol=1e-10):
    # Load data for this box for each dataset
    datasets, ffpad = read_data(datasets,
                                box,
                                odir,
                                pad=pad,
                                verbose=verbose,
                                read_cache=read_cache,
                                write_cache=write_cache)
    # We might not find any data
    if len(datasets) == 0: return None
    # Find the smallest beam size of the datasets
    bmin = np.min([beam_size(dataset.beam) for dataset in datasets])

    # Subtract mean map from each split to get noise maps. Our noise
    # model is HNH, where H is div**0.5 and N is the mean 2d noise spectrum
    # after some smoothing
    rhs, tot_div = None, None
    tot_iN, tot_udiv = None, 0
    for dataset in datasets:
        nsplit = 0
        dset_map, dset_div = None, None
        for split in dataset.splits:
            if dset_map is None:
                dset_map = split.data.map * 0
                dset_div = split.data.div * 0
            dset_map += split.data.map * split.data.div
            dset_div += split.data.div
        # Form the mean map for this dataset
        dset_map[:, dset_div > 0] /= dset_div[dset_div > 0]
        if tot_div is None: tot_div = dset_div * 0
        tot_div += dset_div
        tshape, twcs, tdtype = dset_map.shape, dset_div.wcs, dset_div.dtype
        # Then use it to build the diff maps and noise spectra
        dset_ps = None
        for split in dataset.splits:
            if split.data.empty: continue
            diff = split.data.map - dset_map
            wdiff = diff * split.data.H
            # What is the healthy area of wdiff? Wdiff should have variance
            # 1 or above. This tells us how to upweight the power spectrum
            # to take into account missing regions of the diff map.
            ndown = 10
            wvar = enmap.downgrade(wdiff**2, ndown)
            goodfrac = np.sum(wvar > 1e-3) / float(wvar.size)
            if goodfrac < 0.1: goodfrac = 0
            #opre  = odir + "/" + os.path.basename(split.map)[:-5]
            #enmap.write_map(opre + "_diff.fits", diff)
            #enmap.write_map(opre + "_wdiff.fits", wdiff)
            #enmap.write_map(opre + "_wvar.fits", wvar)
            ps = np.abs(map_fft(wdiff))**2
            #enmap.write_map(opre + "_ps1.fits", ps)
            # correct for unhit areas, which can't be whitened
            #print "A", dataset.name, np.median(ps[ps>0]), medloop(ps), goodfrac
            with utils.nowarn():
                ps /= goodfrac
            #print "B", dataset.name, np.median(ps[ps>0]), medloop(ps), goodfrac
            #enmap.write_map(opre + "_ps2.fits", ps)
            #enmap.write_map(opre + "_ps2d.fits", ps)
            if dset_ps is None:
                dset_ps = enmap.zeros(ps.shape, ps.wcs, ps.dtype)
            dset_ps += ps
            nsplit += 1
        if nsplit < 2: continue
        # With n splits, mean map has var 1/n, so diff has var (1-1/n) + (n-1)/n = 2*(n-1)/n
        # Hence tot-ps has var 2*(n-1)
        dset_ps /= 2 * (nsplit - 1)
        #enmap.write_map(opre + "_ps2d_tot.fits", dset_ps)
        dset_ps = smooth_pix(dset_ps, ps_smoothing)
        #enmap.write_map(opre + "_ps2d_smooth.fits", dset_ps)
        if np.all(np.isfinite(dset_ps)):
            # Super-low values of the spectrum are not realistic. These appear
            # due to beam/pixel smoothing in the planck maps. This will be
            # mostly taken care of when processing the beams, as long as we don't
            # let them get too small
            dset_ps = np.maximum(dset_ps, 1e-7)
            # Optionally cap the max dset_ps, this is mostly to speed up convergence
            if args.max_ps:
                dset_ps = np.minimum(dset_ps, args.max_ps)

            # Our fourier-space inverse noise matrix is based on the inverse noise spectrum
            iN = 1 / dset_ps
            #enmap.write_map(opre + "_iN_raw.fits", iN)
        else:
            print "Setting weight of dataset %s to zero" % dataset.name
            #print np.all(np.isfinite(dset_ps)), np.all(dset_ps>0)
            iN = enmap.zeros(dset_ps.shape, dset_ps.wcs, dset_ps.dtype)

        # Add any fourier-space masks to this
        ly, lx = enmap.laxes(tshape, twcs)
        lr = (ly[:, None]**2 + lx[None, :]**2)**0.5
        if dataset.highpass:
            kxmask = butter(lx, args.kxrad, -3)
            kxmask = 1 - (1 - kxmask[None, :]) * (
                np.abs(ly) < bmin * args.kx_ymax_scale)[:, None]
            highpass = butter(lr, args.highpass, -10)
            filter = highpass * kxmask
            #print "filter weighting", dataset.name
            del kxmask, highpass
        else:
            filter = 1

        if not args.filter: iN *= filter

        # We should deconvolve the relative beam from the maps,
        # but that's numerically nasty. But it can be handled
        # inversely. We want (BiNB + ...)x = (BiNB iB m + ...)
        # where iB is the beam deconvolution operation in map space.
        # Instead of actually doing that operation, we can compute two
        # inverse noise matrixes: iN_A = BiNB for the left hand
        # side and iN_b = BiN for the right hand side. That way we
        # avoid dividing by any huge numbers.

        # Add the relative beam
        iN_A = iN.copy()
        iN_b = iN.copy()
        if ref_beam is not None:
            rel_beam = beam_ratio(dataset.beam, ref_beam)
            bspec = eval_beam(rel_beam, lr)
            iN_A *= bspec**2
            iN_b *= bspec
        #moo = iN*0+filter
        #enmap.write_map(opre + "_filter.fits", moo)
        # Add filter to noise model if we're downweighting
        # rather than filtering.
        dataset.iN_A = iN_A
        dataset.iN_b = iN_b
        dataset.filter = filter
        #print "A", opre
        #enmap.write_map(opre + "_iN_A.fits", iN_A)
        #enmap.write_map(opre + "_iN.fits", iN)

    # Cap to avoid single crazy pixels
    tot_div = np.maximum(tot_div, np.median(tot_div[tot_div > 0]) * 0.01)
    tot_idiv = tot_div * 0
    tot_idiv[tot_div > div_div_tol] = 1 / tot_div[tot_div > div_div_tol]

    # Build the right-hand side. The right-hand side is
    # sum(HNHm)
    if rhs is None: rhs = enmap.zeros(tshape, twcs, tdtype)
    for dataset in datasets:
        i = 0
        for split in dataset.splits:
            if split.data.empty: continue
            #print "MOO", dataset.name, np.max(split.data.map), np.min(split.data.map), np.max(split.data.div), np.min(split.data.div)
            w = split.data.H * split.data.map
            fw = map_fft(w)
            fw *= dataset.iN_b
            if args.filter: fw *= dataset.filter
            w = map_ifft(fw) * split.data.H
            #enmap.write_map(odir + "/%s_%02d_rhs.fits" % (dataset.name, i), w)
            rhs += w
            i += 1
    del w, iN, iN_A, iN_b, filter

    # Now solve the equation
    def A(x):
        global times
        m = enmap.samewcs(x.reshape(rhs.shape), rhs)
        res = m * 0
        times[:] = 0
        ntime = 0
        for dataset in datasets:
            for split in dataset.splits:
                if split.data.empty: continue
                t = [time.time()]
                w = split.data.H * m
                t.append(time.time())
                fw = map_fft(w)
                t.append(time.time())
                fw *= dataset.iN_A
                t.append(time.time())
                w = map_ifft(fw)
                t.append(time.time())
                w *= split.data.H
                t.append(time.time())
                res += w
                for i in range(1, len(t)):
                    times[i - 1] += t[i] - t[i - 1]
                ntime += 1
                #w  = enmap.harm2map(dataset.iN_A*enmap.map2harm(w))
                #w *= split.data.H
                #res += w
                del w
        times /= ntime
        return res.reshape(-1)

    def M(x):
        m = enmap.samewcs(x.reshape(rhs.shape), rhs)
        res = m * tot_idiv
        return res.reshape(-1)

    solver = cg.CG(A, rhs.reshape(-1), M=M)
    for i in range(1000):
        t1 = time.time()
        solver.step()
        t2 = time.time()
        if verbose:
            print "%5d %15.7e %5.2f: %4.2f %4.2f %4.2f %4.2f %4.2f" % (
                solver.i, solver.err, t2 - t1, times[0], times[1], times[2],
                times[3], times[4]), np.std(solver.x)
        if dump and solver.i in [1, 2, 5, 10, 20, 50] + range(100, 10000, 100):
            m = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
            enmap.write_map(odir + "/step%04d.fits" % solver.i, m)
        if solver.err < cg_tol:
            if dump:
                m = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
                enmap.write_map(odir + "/step_final.fits", m)
            break
    tot_map = enmap.samewcs(solver.x.reshape(rhs.shape), rhs)
    # Get rid of the fourier padding
    ny, nx = tot_map.shape[-2:]
    tot_map = tot_map[..., :ny - ffpad[0], :nx - ffpad[1]]
    tot_div = tot_div[..., :ny - ffpad[0], :nx - ffpad[1]]
    return bunch.Bunch(map=tot_map, div=tot_div)
Example #19
0
import numpy as np, sys, os, ephem
from enlib import utils
with utils.nowarn(): import h5py
from enlib import config, pmat, mpi, errors, gapfill, enmap, bench, ephemeris
from enlib import fft, array_ops, sampcut, cg
from enact import filedb, actscan, actdata, cuts, nmat_measure
config.set("pmat_cut_type",  "full")
parser = config.ArgumentParser()
parser.add_argument("sel")
parser.add_argument("srcs")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("tag", nargs="?")
parser.add_argument("-R", "--dist", type=float, default=4)
parser.add_argument("-y", "--ypad", type=float, default=3)
parser.add_argument("-s", "--src",  type=int,   default=None, help="Only analyze given source")
parser.add_argument("-c", "--cont", action="store_true")
parser.add_argument("-m", "--model",type=str, default="constrained")
parser.add_argument("--hit-tol",    type=float, default=0.5)
args = parser.parse_args()

comm = mpi.COMM_WORLD
filedb.init()
R    = args.dist * utils.arcmin
ypad = args.ypad * utils.arcmin
csize= 100
config.set("pmat_ptsrc_cell_res", 2*(R+ypad)/utils.arcmin)
config.set("pmat_ptsrc_rsigma", 5)
config.set("pmat_interpol_pad", 5+ypad/utils.arcmin)

dtype = np.float32
Example #20
0
		print fbin
		f1,f2 = [min(nfreq-1,int(i*fmax/dfreq/nbin)) for i in [fbin,fbin+1]]
		fsub  = ft[:,f1:f2]
		cov   = array_ops.measure_cov(fsub)
		std   = np.diag(cov)**0.5
		corr  = cov / std[:,None] / std[None,:]
		myrhs = project_mat(pix, template, corr)
		mydiv = project_mat(pix, template)
		return fbin, myrhs, mydiv
	def collect(args):
		fbin, myrhs, mydiv = args
		rhs[fbin] += myrhs
		div[fbin] += mydiv
	p = multiprocessing.Pool(args.nmulti)
	for fbin in range(nbin):
		p.apply_async(handle_bin, [fbin], callback=collect)
	p.close()
	p.join()
	del ft

# Collect the results
if comm.rank == 0: print "Reducing"
rhs = enmap.samewcs(utils.allreduce(rhs, comm), rhs)
div = enmap.samewcs(utils.allreduce(div, comm), div)
with utils.nowarn():
	map = rhs/div

if comm.rank == 0:
	print "Writing"
	enmap.write_map(args.ofile, map)
Example #21
0
 def read_catalog(fname):
     with utils.nowarn():
         cat = np.loadtxt(fname, usecols=range(8), ndmin=2).reshape(-1, 8)
     cat[:, :2] = cat[:, 1::-1] * utils.degree
     cat[:, -2:] *= ym
     return cat