Ejemplo n.º 1
0
 def simple_max(self):
     srhs = enmap.smooth_gauss(self.map * self.div, self.beam.sigma)
     sdiv = enmap.smooth_gauss(self.div, self.beam.sigma)
     sdiv = np.maximum(sdiv, np.median(np.abs(sdiv)) * 0.1)
     smap = srhs / sdiv**0.5
     pos = enmap.argmax(smap)
     val = smap.at(pos)
     return pos[::-1], val
Ejemplo n.º 2
0
	def simple_max(self):
		srhs = enmap.smooth_gauss(self.map*self.div, self.beam.sigma)
		sdiv = enmap.smooth_gauss(self.div, self.beam.sigma)
		sdiv = np.maximum(sdiv, np.median(np.abs(sdiv))*0.1)
		smap = srhs/sdiv**0.5
		pos  = enmap.argmax(smap)
		val  = smap.at(pos)
		return pos[::-1], val
Ejemplo n.º 3
0
    def update_mask(self, rand_sigma_arcmin=2., rand_threshold=1e-3):
        if rand_sigma_arcmin > 1.e-3:
            print("Smoothing...")
            smap = enmap.smooth_gauss(self.rand_map,
                                      rand_sigma_arcmin * np.pi / 180. / 60.)
            print("Done smoothing...")
        else:
            smap = self.rand_map

        self.mask = np.zeros(self.shape)
        self.mask[smap > rand_threshold] = 1
        self._counts()
Ejemplo n.º 4
0
    def update_mask(self,rand_sigma_arcmin=2.,rand_threshold=1e-3):
        if rand_sigma_arcmin>1.e-3:
            if self.verbose: print( "Smoothing...")
            if self.curved:
                smap = hp.smoothing(self.rand_map,sigma=rand_sigma_arcmin*np.pi/180./60.)
            else:
                smap = enmap.smooth_gauss(self.rand_map,rand_sigma_arcmin*np.pi/180./60.)
            if self.verbose: print( "Done smoothing...")
        else:
            if self.verbose: smap = self.rand_map

        self.mask = np.zeros(self.shape)
        self.mask[smap>rand_threshold] = 1
        if not self.curved:
            self.mask = enmap.enmap(self.mask,self.wcs)
        self._counts()
Ejemplo n.º 5
0
imaps = enmap.samewcs(np.array(imaps), imaps[0])
# Downsample by averaging
imaps = enmap.downgrade(imaps, (1, args.step))
naz = imaps.shape[-1]

# Ok, build our output geometry
shape, wcs = enmap.geometry(pos=box,
                            res=args.res * utils.arcmin,
                            proj="car",
                            pre=(naz, ))
omap = enmap.zeros(shape, wcs, dtype=dtype)

# Normalization
norm = enmap.zeros(shape[-2:], wcs)
norm[0, 0] = 1
norm = enmap.smooth_gauss(norm, rad)[0, 0]

# Loop through slices and populate
bazs = []
for iaz in range(naz):
    # Get our boresight az
    bazs.append(imaps.pix2sky([0, iaz])[1])
    vals = []
    for i in range(nfile):
        # Go from detectors to y-pixel in input maps
        ypix = utils.transpose_inds(dets[i], nrow, ncol)
        vals.append(imaps[i, ypix, iaz])
    vals = np.concatenate(vals)
    pos = np.concatenate(offs)
    # Write to appropriate position in array
    pix = np.maximum(
Ejemplo n.º 6
0
ps_cmb[:, :, lmax_tmp:] = ps_cmb_tmp[:, :, -1]

sigma = args.beam * np.pi / 180 / 60 / (8 * np.log(2))
l = np.arange(ps_cmb.shape[-1])
ps_src = np.exp(-l * (l + 1) * sigma**2)[None, None, :] * (args.srcrms *
                                                           np.pi / 180 / 60)**2

L.info("Setting up signal and noise matrices")
S = enmap.spec2flat(map.shape, map.wcs, ps_cmb, 1.0)
iP = enmap.spec2flat(map.shape, map.wcs, ps_src, -1.0)
N = (inoise + np.max(inoise) * 1e-3)**-1

# apodize map based on a smooth noise map
L.info("Apodizing")
print inoise.shape, inoise.dtype
inoise_smooth = enmap.smooth_gauss(inoise[0, 0], 10 * np.pi / 180 / 60)[None,
                                                                        None]
apod = (np.minimum(1, inoise_smooth / (np.max(inoise_smooth) * 0.05))**4)[0, 0]
map *= apod[None]

enmap.write_map(args.odir + "/inoise.fits", inoise)
enmap.write_map(args.odir + "/inoise_smooth.fits", inoise_smooth)
enmap.write_map(args.odir + "/apod.fits", apod)
enmap.write_map(args.odir + "/map_apod.fits", map)


def mul(mat, vec, axes=[0, 1]):
    return enmap.samewcs(
        array_ops.matmul(mat.astype(vec.dtype), vec, axes=axes), mat, vec)


def pow(mat, exp, axes=[0, 1]):
Ejemplo n.º 7
0
box = utils.widen_box(box, rad*5, relative=False)

# We assume that the two maps have the same pixelization
imaps = enmap.samewcs(np.array(imaps), imaps[0])
# Downsample by averaging
imaps = enmap.downgrade(imaps, (1,args.step))
naz   = imaps.shape[-1]

# Ok, build our output geometry
shape, wcs = enmap.geometry(pos=box, res=args.res*utils.arcmin, proj="car", pre=(naz,))
omap = enmap.zeros(shape, wcs, dtype=dtype)

# Normalization
norm = enmap.zeros(shape[-2:],wcs)
norm[0,0] = 1
norm = enmap.smooth_gauss(norm, rad)[0,0]

# Loop through slices and populate
bazs = []
for iaz in range(naz):
	# Get our boresight az
	bazs.append(imaps.pix2sky([0,iaz])[1])
	vals = []
	for i in range(nfile):
		# Go from detectors to y-pixel in input maps
		ypix = utils.transpose_inds(dets[i], nrow, ncol)
		vals.append(imaps[i,ypix,iaz])
	vals = np.concatenate(vals)
	pos  = np.concatenate(offs)
	# Write to appropriate position in array
	pix  = np.maximum(0,np.minimum((np.array(shape[-2:])-1)[:,None],enmap.sky2pix(shape, wcs, pos.T).astype(np.int32)))
Ejemplo n.º 8
0
	def fit_grid(self, verbose=False, grid_res=0.6*utils.arcmin, super=10):
		self.verbose = verbose
		t1 = time.time()
		if verbose: print "Building coarse likelihood grid"
		ngrid = int(np.round(2*self.lik.rmax/grid_res))
		dchisqs, amps = self.likgrid(self.lik.rmax, ngrid, super=super, verbose=verbose)
		if np.all(dchisqs == 0):
			raise ValueError("featureless likelihood")
		if False and verbose:
			for i,s in enumerate(self.sdata):
				enmap.write_map("map_%d.fits"%i,s.map)
				enmap.write_map("div_%d.fits"%i,s.div)
				enmap.write_map("white_%d.fits"%i,s.map*s.div**0.5)
				enmap.write_map("pchisq_%d.fits"%i,s.map**2*s.div)
				enmap.write_map("pchisq_smooth_%d.fits%i",enmap.smooth_gauss(s.map**2*s.div,0.6*utils.arcmin))
			enmap.write_map("dchisqs.fits",dchisqs)
		# Find local dchisq maxima
		maxmap  = ndimage.maximum_filter(dchisqs, super)
		peaks   = np.where((dchisqs==maxmap)*(maxmap>0))
		maxvals = dchisqs[peaks]
		maxpos  = dchisqs.pix2sky(peaks)
		# Why isn't this just amps[:,peaks] or similar?
		maxamps = amps.reshape(amps.shape[0],-1)[:,np.ravel_multi_index(peaks, amps.shape[-2:])]
		inds    = np.argsort(maxvals)[::-1]
		maxvals = maxvals[inds]
		maxpos  = maxpos[:,inds]
		maxamps = maxamps[:,inds]
		# Perform ML fit for the highest one
		dpos = optimize.fmin_powell(self.calc_chisq_wrapper, maxpos[:,0]/self.scale, disp=False)*self.scale
		res  = self.calc_full_result(dpos, marginalize=False)
		if False and verbose:
			for i, m in enumerate(res.models):
				enmap.write_map("model_%d.fits"%i,m)
				resid  = self.sdata[i].map-m
				enmap.write_map("resid_%d.fits"%i,resid)
				pchisq = resid**2*sdata[i].div
				pchisq_smooth = enmap.smooth_gauss(pchisq, 0.6*utils.arcmin)
				enmap.write_map("pchisq_smooth_resid.fits",pchisq_smooth)
				print np.sum((self.sdata[i].map-m)**2*self.sdata[i].div) - self.lik.chisq0

		# Ideally we would integrate over the full likelihood, not
		# just the peaks. But the peaks have higher weight
		# and should be distributed representatively. Using just the
		# peaks makes it easy to compare with our ML-fit, which is also
		# a single point. So we loop over just the peaks here.
		maxvals = maxvals[1:]
		maxpos  = maxpos[:,1:]
		maxamps = maxamps[:,1:]
		P    = np.exp(0.5*(maxvals-res.dchisq))
		P0   = 1/(1+np.sum(P))
		P   *= P0
		# Marginalize over peaks
		res.dpos  = P0*res.dpos + np.sum(P*maxpos,-1)
		off = maxpos-res.dpos[:,None]
		res.pcov  = P0*res.pcov + np.sum(P*off[:,None]*off[None,:],-1)
		res.ddpos = np.diag(res.pcov)**0.5
		res.pcorr = res.pcov[0,1]/res.ddpos[0]/res.ddpos[1]
		res.amps  = P0*res.amps + np.sum(P*maxamps,-1)
		res.damps = (res.damps**2 + np.sum(P*(maxamps-res.amps[:,None])**2,-1))**0.5
		# For the significance, we will use the difference from our peak to our
		# strongest competitor
		res.dchisq= res.dchisq - maxvals[0]
		# Base nsigma on the sources
		res.nsigma= max(0,res.dchisq)**0.5
		res.time = time.time()-t1
		return res
Ejemplo n.º 9
0
parser = argparse.ArgumentParser()
parser.add_argument("ifiles", nargs=2)
parser.add_argument("ofile")
parser.add_argument("-b", "--binsize", type=int, default=3)
parser.add_argument("-s", "--smooth",  type=float, default=30)
parser.add_argument("--div", type=float, default=1.0)
args = parser.parse_args()
b  = args.binsize
smooth = args.smooth * np.pi/180/60/(8*np.log(2))
m  = [enmap.read_map(f) for f in args.ifiles]
dm = (m[1]-m[0])/2

pixarea = dm.area()/np.product(dm.shape[-2:])*(180*60/np.pi)**2

# Compute standard deviation in bins
dm = dm[...,:dm.shape[-2]/b*b,:dm.shape[-1]/b*b]
dm_blocks = dm.reshape(dm.shape[:-2]+(dm.shape[-2]/b,b,dm.shape[-1]/b,b))
var  = np.std(dm_blocks,axis=(-3,-1))**2*pixarea/args.div
# This reshaping stuff messes up the wcs, which doesn't notice
# that we now have bigger pixels. So correct that.
var  = enmap.samewcs(var, dm[...,::b,::b])

typ = np.median(var[var!=0])
var[~np.isfinite(var)] = 0
var = np.minimum(var,typ*1e6)

svar = enmap.smooth_gauss(var, smooth)
sigma = svar**0.5

enmap.write_map(args.ofile, sigma)
Ejemplo n.º 10
0
decs = np.random.uniform(bbox[0, 0], bbox[1, 0], size=N)
ras = np.random.uniform(bbox[0, 1], bbox[1, 1], size=N)

print((ras.min() * 180. / np.pi, ras.max() * 180. / np.pi))
print((decs.min() * 180. / np.pi, decs.max() * 180. / np.pi))
# coords = np.vstack((decs,ras))

# print "getting pixels..."
# pixs = gal_map.sky2pix(coords)

# print "binning..."
# dat,xedges,yedges = np.histogram2d(pixs[1,:],pixs[0,:],bins=shape)

mapper = CatMapper(shape, wcs, ras * 180. / np.pi, decs * 180. / np.pi)
gal_map = mapper.get_map()

print((gal_map.sum()))
print((gal_map.sum() - N))

gal_map = enmap.smooth_gauss(gal_map, 2.0 * np.pi / 180. / 60.)

#gal_map = enmap.smooth_gauss(enmap.ndmap(dat.astype(np.float32),wcs),2.0*np.pi/180./60.)
print("plotting...")
io.highResPlot2d(gal_map, out_dir + "galmap.png")

fc = enmap.FourierCalc(shape, wcs)
p2d, d, d = fc.power2d(gal_map)

io.quickPlot2d(np.fft.fftshift(np.log10(p2d)), out_dir + "logp2d.png")
io.quickPlot2d(np.fft.fftshift((p2d)), out_dir + "p2d.png")
Ejemplo n.º 11
0
    def fit_grid(self, verbose=False, grid_res=0.6 * utils.arcmin, super=10):
        self.verbose = verbose
        t1 = time.time()
        if verbose: print "Building coarse likelihood grid"
        ngrid = int(np.round(2 * self.lik.rmax / grid_res))
        dchisqs, amps = self.likgrid(self.lik.rmax,
                                     ngrid,
                                     super=super,
                                     verbose=verbose)
        if np.all(dchisqs == 0):
            raise ValueError("featureless likelihood")
        if False and verbose:
            for i, s in enumerate(self.sdata):
                enmap.write_map("map_%d.fits" % i, s.map)
                enmap.write_map("div_%d.fits" % i, s.div)
                enmap.write_map("white_%d.fits" % i, s.map * s.div**0.5)
                enmap.write_map("pchisq_%d.fits" % i, s.map**2 * s.div)
                enmap.write_map(
                    "pchisq_smooth_%d.fits%i",
                    enmap.smooth_gauss(s.map**2 * s.div, 0.6 * utils.arcmin))
            enmap.write_map("dchisqs.fits", dchisqs)
        # Find local dchisq maxima
        maxmap = ndimage.maximum_filter(dchisqs, super)
        peaks = np.where((dchisqs == maxmap) * (maxmap > 0))
        maxvals = dchisqs[peaks]
        maxpos = dchisqs.pix2sky(peaks)
        # Why isn't this just amps[:,peaks] or similar?
        maxamps = amps.reshape(
            amps.shape[0], -1)[:,
                               np.ravel_multi_index(peaks, amps.shape[-2:])]
        inds = np.argsort(maxvals)[::-1]
        maxvals = maxvals[inds]
        maxpos = maxpos[:, inds]
        maxamps = maxamps[:, inds]
        # Perform ML fit for the highest one
        dpos = optimize.fmin_powell(self.calc_chisq_wrapper,
                                    maxpos[:, 0] / self.scale,
                                    disp=False) * self.scale
        res = self.calc_full_result(dpos, marginalize=False)
        if False and verbose:
            for i, m in enumerate(res.models):
                enmap.write_map("model_%d.fits" % i, m)
                resid = self.sdata[i].map - m
                enmap.write_map("resid_%d.fits" % i, resid)
                pchisq = resid**2 * sdata[i].div
                pchisq_smooth = enmap.smooth_gauss(pchisq, 0.6 * utils.arcmin)
                enmap.write_map("pchisq_smooth_resid.fits", pchisq_smooth)
                print np.sum((self.sdata[i].map - m)**2 *
                             self.sdata[i].div) - self.lik.chisq0

        # Ideally we would integrate over the full likelihood, not
        # just the peaks. But the peaks have higher weight
        # and should be distributed representatively. Using just the
        # peaks makes it easy to compare with our ML-fit, which is also
        # a single point. So we loop over just the peaks here.
        maxvals = maxvals[1:]
        maxpos = maxpos[:, 1:]
        maxamps = maxamps[:, 1:]
        P = np.exp(0.5 * (maxvals - res.dchisq))
        P0 = 1 / (1 + np.sum(P))
        P *= P0
        # Marginalize over peaks
        res.dpos = P0 * res.dpos + np.sum(P * maxpos, -1)
        off = maxpos - res.dpos[:, None]
        res.pcov = P0 * res.pcov + np.sum(P * off[:, None] * off[None, :], -1)
        res.ddpos = np.diag(res.pcov)**0.5
        res.pcorr = res.pcov[0, 1] / res.ddpos[0] / res.ddpos[1]
        res.amps = P0 * res.amps + np.sum(P * maxamps, -1)
        res.damps = (res.damps**2 +
                     np.sum(P * (maxamps - res.amps[:, None])**2, -1))**0.5
        # For the significance, we will use the difference from our peak to our
        # strongest competitor
        res.dchisq = res.dchisq - maxvals[0]
        # Base nsigma on the sources
        res.nsigma = max(0, res.dchisq)**0.5
        res.time = time.time() - t1
        return res
Ejemplo n.º 12
0
parser = argparse.ArgumentParser()
parser.add_argument("ifiles", nargs=2)
parser.add_argument("ofile")
parser.add_argument("-b", "--binsize", type=int, default=3)
parser.add_argument("-s", "--smooth",  type=float, default=30)
parser.add_argument("--div", type=float, default=1.0)
args = parser.parse_args()
b  = args.binsize
smooth = args.smooth * np.pi/180/60/(8*np.log(2))
m  = [enmap.read_map(f) for f in args.ifiles]
dm = (m[1]-m[0])/2

pixarea = dm.area()/np.product(dm.shape[-2:])*(180*60/np.pi)**2

# Compute standard deviation in bins
dm = dm[...,:dm.shape[-2]/b*b,:dm.shape[-1]/b*b]
dm_blocks = dm.reshape(dm.shape[:-2]+(dm.shape[-2]/b,b,dm.shape[-1]/b,b))
var  = np.std(dm_blocks,axis=(-3,-1))**2*pixarea/args.div
# This reshaping stuff messes up the wcs, which doesn't notice
# that we now have bigger pixels. So correct that.
var  = enmap.samewcs(var, dm[...,::b,::b])

typ = np.median(var[var!=0])
var[~np.isfinite(var)] = 0
var = np.minimum(var,typ*1e6)

svar = enmap.smooth_gauss(var, smooth)
sigma = svar**0.5

enmap.write_map(args.ofile, sigma)