Example #1
0
def get_pix_ranges(shape, wcs, horbox, daz, nt=4, azdown=1, ndet=1.0):
	(t1,t2),(az1,az2),el = horbox[:,0], horbox[:,1], np.mean(horbox[:,2])
	nphi = np.abs(utils.nint(360/wcs.wcs.cdelt[0]))
	# Find the pixel coordinates of first az sweep
	naz  = utils.nint(np.abs(az2-az1)/daz)/azdown
	ahor = np.zeros([3,naz])
	ahor[0] = utils.ctime2mjd(t1)
	ahor[1] = np.linspace(az1,az2,naz)
	ahor[2] = el
	acel    = coordinates.transform("hor","cel",ahor[1:],time=ahor[0],site=site)
	y, x1   = upscale(fixx(utils.nint(enmap.sky2pix(shape, wcs, acel[::-1])),nphi),azdown)
	# Reduce to unique y values
	_, uinds, hits = np.unique(y, return_index=True, return_counts=True)
	y, x1 = y[uinds], x1[uinds]
	# Find the pixel coordinates of time drift
	thor = np.zeros([3,nt])
	thor[0] = utils.ctime2mjd(np.linspace(t1,t2,nt))
	thor[1] = az1
	thor[2] = el
	tcel    = coordinates.transform("hor","cel",thor[1:],time=thor[0],site=site)
	_, tx   = utils.nint(fixx(enmap.sky2pix(shape, wcs, tcel[::-1]),nphi))
	x2 = x1 + tx[-1]-tx[0]
	x1, x2  = np.minimum(x1,x2), np.maximum(x1,x2)
	pix_ranges = np.concatenate([y[:,None],x1[:,None],x2[:,None]],1)
	# Weight per pixel in pix ranges. If ndet=1 this corresponds to
	# telescope time per output pixel
	weights = (t2-t1)/(naz*azdown)/(x2-x1)*ndet * hits
	return pix_ranges, weights
Example #2
0
def calc_pbox(shape, wcs, box, n=10):
    nphi = utils.nint(np.abs(360 / wcs.wcs.cdelt[0]))
    dec = np.linspace(box[0, 0], box[1, 0], n)
    ra = np.linspace(box[0, 1], box[1, 1], n)
    y = enmap.sky2pix(shape, wcs, [dec, dec * 0 + box[0, 1]])[0]
    x = enmap.sky2pix(shape, wcs, [ra * 0 + box[0, 0], ra])[1]
    x = utils.unwind(x, nphi)
    pbox = np.array([[np.min(y), np.min(x)], [np.max(y), np.max(x)]])
    xm1 = np.mean(pbox[:, 1])
    xm2 = utils.rewind(xm1, shape[-1] / 2, nphi)
    pbox[:, 1] += xm2 - xm1
    pbox = utils.nint(pbox)
    return pbox
Example #3
0
def make_projectable_map_by_pos(pos,
                                lmax,
                                dims=(),
                                oversample=2.0,
                                dtype=float,
                                verbose=False):
    """Make a map suitable as an intermediate step in projecting alms up to
	lmax on to the given positions. Helper function for alm2map."""
    # First find the theta range of the pixels, with a 10% margin
    ra_ref = np.mean(pos[1]) / utils.degree
    decrange = np.array([np.min(pos[0]), np.max(pos[0])])
    decrange = (decrange - np.mean(decrange)) * 1.1 + np.mean(decrange)
    decrange = np.array(
        [max(-np.pi / 2, decrange[0]),
         min(np.pi / 2, decrange[1])])
    decrange /= utils.degree
    wdec = np.abs(decrange[1] - decrange[0])
    # The shortest wavelength in the alm is about 2pi/lmax. We need at least
    # two samples per mode.
    res = 180. / lmax / oversample
    # Set up an intermediate coordinate system for the SHT. We will use
    # CAR coordinates conformal on the equator, with a pixel on each pole.
    # This will give it clenshaw curtis pixelization.
    nx = utils.nint(360 / res)
    nytot = utils.nint(180 / res)
    # First set up the pixelization for the whole sky. Negative cdelt to
    # make sharp extra happy. Not really necessary, but makes some things
    # simpler later.
    wcs = enwcs.WCS(naxis=2)
    wcs.wcs.ctype = ["RA---CAR", "DEC--CAR"]
    wcs.wcs.crval = [ra_ref, 0]
    wcs.wcs.cdelt = [360. / nx, -180. / nytot]
    wcs.wcs.crpix = [nx / 2.0 + 1, nytot / 2.0 + 1]
    # Then find the subset that includes the dec range we want
    y1 = utils.nint(wcs.wcs_world2pix(0, decrange[0], 0)[1])
    y2 = utils.nint(wcs.wcs_world2pix(0, decrange[1], 0)[1])
    y1, y2 = min(y1, y2), max(y1, y2)
    # Offset wcs to start at our target range
    ny = y2 - y1
    wcs.wcs.crpix[1] -= y1
    # Construct the map. +1 to put extra pixel at pole when we are fullsky
    if verbose:
        print "Allocating shape %s dtype %s intermediate map" % (
            dims + (ny + 1, nx), np.dtype(dtype).char)
    tmap = enmap.zeros(dims + (ny + 1, nx), wcs, dtype=dtype)
    return tmap
Example #4
0
def sim_srcs(shape, wcs, srcs, beam, omap=None, dtype=None, nsigma=5, rmax=None, method="loop", mmul=1,
		return_padded=False):
	"""Simulate a point source map in the geometry given by shape, wcs
	for the given srcs[nsrc,{dec,ra,T...}], using the beam[{r,val},npoint],
	which must be equispaced. If omap is specified, the sources will be
	added to it in place. All angles are in radians. The beam is only evaluated up to
	the point where it reaches exp(-0.5*nsigma**2) unless rmax is specified, in which
	case this gives the maximum radius. mmul gives a factor to multiply the resulting
	source model by. This is mostly useful in conction with omap. method can be
	"loop" or "vectorized", but "loop" is both faster and uses less memory, so there's
	no point in using the latter.
	
	The source simulation is sped up by using a source lookup grid.
	"""
	if omap is None: omap = enmap.zeros(shape, wcs, dtype)
	ishape = omap.shape
	omap   = omap.preflat
	ncomp  = omap.shape[0]
	# In keeping with the rest of the functions here, srcs is [nsrc,{dec,ra,T,Q,U}].
	# The beam parameters are ignored - the beam argument is used instead
	amps = srcs[:,2:2+ncomp]
	poss = srcs[:,:2].copy()
	# Rewind positions to let us use flat-sky approximation for distance calculations
	#wcs  = enmap.enlib.wcs.fix_wcs(wcs)
	ref  = np.mean(enmap.box(shape, wcs, corner=False)[:,1])
	poss[:,1] = utils.rewind(poss[:,1], ref)
	beam = expand_beam(beam, nsigma, rmax)
	rmax = nsigma2rmax(beam, nsigma)
	# Pad our map by rmax, so we get the contribution from sources
	# just ourside our area. We will later split our map into cells of size cres. Let's
	# adjust the padding so we have a whole number of cells
	cres = utils.nint(rmax/omap.pixshape())
	epix = cres-(omap.shape[-2:]+2*cres)%cres
	padding = [cres,cres+epix]
	wmap, wslice  = enmap.pad(omap, padding, return_slice=True)
	# Overall we will have this many grid cells
	cshape = wmap.shape[-2:]/cres
	# Find out which sources matter for which cells
	srcpix = wmap.sky2pix(poss.T).T
	pixbox= np.array([[0,0],wmap.shape[-2:]],int)
	nhit, cell_srcs = build_src_cells(pixbox, srcpix, cres)
	posmap = wmap.posmap()
	model = eval_srcs_loop(posmap, poss, amps, beam, cres, nhit, cell_srcs)
	# Update our work map, through our view
	if mmul != 1: model *= mmul
	wmap  += model
	if not return_padded:
		# Copy out
		omap[:] = wmap[wslice]
		# Restore shape
		omap = omap.reshape(ishape)
		return omap
	else:
		return wmap.reshape(ishape[:-2]+wmap.shape[-2:]), wslice
Example #5
0
def make_projectable_map_cyl(map, verbose=False):
    """Given an enmap in a cylindrical projection, return a map with
	the same pixelization, but extended to cover a whole band in phi
	around the sky. Also returns the slice required to recover the
	input map from the output map."""
    # First check if we need flipping. Sharp wants theta,phi increasing,
    # which means dec decreasing and ra increasing.
    flipx = map.wcs.wcs.cdelt[0] < 0
    flipy = map.wcs.wcs.cdelt[1] > 0
    if flipx: map = map[..., :, ::-1]
    if flipy: map = map[..., ::-1, :]
    # Then check if the map satisfies the lat-ring requirements
    ny, nx = map.shape[-2:]
    vy, vx = enmap.pix2sky(map.shape, map.wcs, [np.arange(ny), np.zeros(ny)])
    hy, hx = enmap.pix2sky(map.shape, map.wcs, [np.zeros(nx), np.arange(nx)])
    dx = hx[1:] - hx[:-1]
    dx = dx[np.isfinite(dx)]  # Handle overextended coordinates

    if not np.allclose(dx, dx[0]):
        raise ShapeError("Map must have constant phi spacing")
    nphi = utils.nint(2 * np.pi / dx[0])
    if not np.allclose(2 * np.pi / nphi, dx[0]):
        raise ShapeError("Pixels must evenly circumference")
    if not np.allclose(vx, vx[0]):
        raise ShapeError(
            "Different phi0 per row indicates non-cylindrical enmap")
    phi0 = vx[0]
    # Make a map with the same geometry covering a whole band around the sky
    # We can do this simply by extending it in the positive pixel dimension.
    oshape = map.shape[:-1] + (nphi, )
    owcs = map.wcs
    # Our input map could in theory cover multiple copies of the sky, which
    # would require us to copy out multiple slices.
    nslice = (nx + nphi - 1) // nphi
    islice, oslice = [], []

    def negnone(x):
        return x if x >= 0 else None

    for i in range(nslice):
        # i1:i2 is the range of pixels in the original map to use
        i1, i2 = i * nphi, min((i + 1) * nphi, nx)
        islice.append((Ellipsis, slice(i1, i2)))
        # yslice and xslice give the range of pixels in our temporary map to use.
        # This is 0:(i2-i1) if we're not flipping, but if we flip we count from
        # the opposite direction: nx-1:nx-1-(i2-i1):-1
        yslice = slice(-1, None, -1) if flipy else slice(None)
        xslice = slice(nx - 1, negnone(nx - 1 - (i2 - i1)),
                       -1) if flipx else slice(0, i2 - i1)
        oslice.append((Ellipsis, yslice, xslice))
    if verbose:
        print "Allocating shape %s dtype %s intermediate map" % (
            str(oshape), np.dtype(map.dtype).char)
    return enmap.empty(oshape, owcs, dtype=map.dtype), islice, oslice
Example #6
0
File: scan.py Project: Nium14/enlib
def build_hwp_sample_mapping(hwp, quantile=0.1):
	"""Given a HWP angle, return an array with shape [nout] containing
	the original sample index (float) corresponding to each sample in the
	remapped array, along with the resulting hwp sample rate.
	The remapping also truncates the end to ensure that
	there is an integer number of HWP rotations in the data."""
	# Make sure there are no angle wraps in the hwp
	hwp = utils.unwind(hwp)
	# interp requires hwp to be monotonically increasing. In practice
	# it could be monotonically decreasing too, but our final result
	# does not depend on the direction of rotation, so we simply flip it here
	# if necessary
	hwp = np.abs(hwp)
	# Find the target hwp speed
	speed = np.percentile(hwp[1:]-hwp[:-1], 100*quantile)
	# We want a whole number of samples per revolution, and
	# a whole number of revolutions in the whole tod
	a    = hwp - hwp[0]
	nrev = int(np.floor(a[-1]/(2*np.pi)))
	nper = utils.nint(2*np.pi/speed)
	# Make each of these numbers fourier-friendly
	nrev = fft.fft_len(nrev, "below")
	nper = fft.fft_len(nper, "above")
	# Set up our output samples
	speed = 2*np.pi/nper
	nout  = nrev*nper
	ohwp  = hwp[0] + np.arange(nout)*speed
	# Find the input sample for each output sample
	res = bunch.Bunch()
	res.oimap = np.interp(ohwp, hwp, np.arange(len(hwp)))
	# Find the output sampe for each input sample too. Because of
	# cropping, the last of these will be invalid
	res.iomap = np.interp(np.arange(len(hwp)), res.oimap, np.arange(len(res.oimap)))
	# Find the average sampling rate change fsamp_rel = fsamp_out/fsamp_in
	res.fsamp_rel = 1/np.mean(res.oimap[1:]-res.oimap[:-1])
	res.insamp = len(hwp)
	res.onsamp = nout
	res.nrev   = nrev
	res.nper   = nper
	return res
Example #7
0
def make_equispaced(d, t, quantile=0.1, order=3, mask_nan=False):
    """Given an array d[...,nt] of data that has been sampled at times t[nt],
	return an array that has been resampled to have a constant sampling rate."""
    # Find the typical sampling rate of the input. We will lose information if
    # we don't use a sampling rate that's higher than the highest rate in the
    # input. But we also don't want to exaggerate the number of samples. Use a
    # configurable quantile as a compromise.
    dt = np.percentile(np.abs(t[1:] - t[:-1]), quantile * 100)
    # Modify so we get a whole number of samples
    nout = utils.nint(np.abs(t[-1] - t[0]) / dt) + 1
    dt = (t[-1] - t[0]) / (nout - 1)
    # Construct our output time steps
    tout = np.arange(nout) * dt + t[0]
    # To interpolate, we need the input sample number as a function of time
    samples = np.interp(tout, t, np.arange(len(t)))
    # Now that we have the samples we can finally evaluate the function
    dout = utils.interpol(d,
                          samples[None],
                          mode="nearest",
                          order=order,
                          mask_nan=mask_nan)
    return dout, tout
Example #8
0
def npix2nside(npix):
	return utils.nint((npix/12)**0.5)
Example #9
0
def get_pix_ranges(shape, wcs, horbox, daz, nt=4, ndet=1.0, site=None):
    """An appropriate daz for this function is about 1 degree"""
    # For each row in the map we want to know the hit density for that row,
    # as well as its start and end. In the original function we got one
    # sample per row by oversampling and then using unique. This is unreliable,
    # and also results in quantized steps in the depth. We can instead
    # do a coarse equispaced az -> ra,dec -> y,x. We can then interpolate
    # this to get exactly one sample per y. To get the density properly,
    # we just need dy/dt = dy/daz * daz/dt, where we assume daz/dt is constant.
    # We get dy/daz from the coarse stuff, and interpolate that too, which gives
    # the density per row.
    (t1, t2), (az1, az2), el = horbox[:, 0], horbox[:, 1], np.mean(horbox[:,
                                                                          2])
    nphi = np.abs(utils.nint(360 / wcs.wcs.cdelt[0]))
    # First produce the coarse single scan
    naz = utils.nint(np.abs(az2 - az1) / daz)
    if naz <= 1: return None, None
    ahor = np.zeros([3, naz])
    ahor[0] = utils.ctime2mjd(t1)
    ahor[1] = np.linspace(az1, az2, naz)
    ahor[2] = el
    acel = coordinates.transform("hor",
                                 "cel",
                                 ahor[1:],
                                 time=ahor[0],
                                 site=site)
    ylow, x1low = fixx(enmap.sky2pix(shape, wcs, acel[::-1]), nphi)
    if ylow[1] < ylow[0]:
        ylow, x1low = ylow[::-1], x1low[::-1]
    # Find dy/daz for these points
    glow = np.gradient(ylow) * (naz - 1) / (az2 - az1)
    # Now interpolate to full resolution
    y = np.arange(utils.nint(ylow[0]), utils.nint(ylow[-1]) + 1)
    if len(y) == 0:
        print "Why is y empty?", naz, ylow[0], ylow[1]
        return None, None
    x1 = np.interp(y, ylow, x1low)
    grad = np.interp(y, ylow, glow)
    # Now we just need the width of the rows, x2, which comes
    # from the time drift
    thor = np.zeros([3, nt])
    thor[0] = utils.ctime2mjd(np.linspace(t1, t2, nt))
    thor[1] = az1
    thor[2] = el
    tcel = coordinates.transform("hor",
                                 "cel",
                                 thor[1:],
                                 time=thor[0],
                                 site=site)
    _, tx = utils.nint(fixx(enmap.sky2pix(shape, wcs, tcel[::-1]), nphi))
    x2 = x1 + tx[-1] - tx[0]
    x1, x2 = np.minimum(x1, x2), np.maximum(x1, x2)
    pix_ranges = utils.nint(
        np.concatenate([y[:, None], x1[:, None], x2[:, None]], 1))
    # Weight per pixel. We want this to be in units of seconds of
    # observing time per pixel if ndet=1. We know the total number of pixels
    # hit (ny*nx) and the total time (t2-t1), and we know the relative
    # weight per row (1/grad), so we can just normalize things
    ny, nx = len(y), x2[0] - x1[0]
    npix = ny * nx
    if npix == 0 or np.any(grad <= 0):
        return pix_ranges, grad * 0
    else:
        weights = 1 / grad
        weights *= (t2 - t1) / (np.sum(weights) *
                                nx) * ndet  # *nx because weight is per row
        return pix_ranges, weights
Example #10
0
def npix2nside(npix):
    return utils.nint((healmap.shape[-1] / 12)**0.5)
Example #11
0
def match_predefined_minfo(m, rtol=None, atol=None):
	"""Given an enmapwith constant-latitude rows and constant longitude
	intervals, return the libsharp predefined minfo with ringweights that's
	the closest match to our pixelization."""
	if rtol is None: rtol = 1e-3*utils.arcmin
	if atol is None: atol = 1.0*utils.arcmin
	# Make sure the colatitude ascends
	flipy  = m.wcs.wcs.cdelt[1] > 0
	if flipy: m = m[...,::-1,:]
	theta  = np.pi/2 - m[...,:,:1].posmap()[0,:,0]
	phi0   = m[...,1:2,0:1].posmap()[1,0,0]
	ntheta, nphi = m.shape[-2:]
	# First find out how many lat rings there are in the whole sky.
	# Find the first and last pixel center inside bounds
	y1   = int(np.round(m.sky2pix([np.pi/2,0])[0]))
	y2   = int(np.round(m.sky2pix([-np.pi/2,0])[0]))
	phi0 = m.pix2sky([0,0])[1]
	ny   = utils.nint(y2-y1+1)
	nx   = utils.nint(np.abs(360./m.wcs.wcs.cdelt[0]))
	# Define our candidate pixelizations
	minfos = []
	for i in range(-1,2):
		#minfos.append(sharp.map_info_gauss_legendre(ny+i, nx, phi0))
		minfos.append(sharp.map_info_clenshaw_curtis(ny+i, nx, phi0))
		minfos.append(sharp.map_info_fejer1(ny+i, nx, phi0))
		minfos.append(sharp.map_info_fejer2(ny+i, nx, phi0))
		minfos.append(sharp.map_info_mw(ny+i, nx, phi0))
	# For each pixelization find the first ring in the map
	aroffs, scores, minfos2 = [], [], []
	for minfo in minfos:
		# Find theta closest to our first theta
		i1 = np.argmin(np.abs(theta[0]-minfo.theta))
		# If we're already on the full sky, the the +1
		# pixel alternative will not work.
		if i1+len(theta) > minfo.theta.size: continue
		# Find the largest theta offset for all y in our input map
		offs = theta-minfo.theta[i1:i1+len(theta)]
		aoff = np.max(np.abs(offs))
		# Find the largest offset after applying a small pointing offset
		roff = np.max(np.abs(offs-np.mean(offs)))
		aroffs.append([aoff,roff,i1])
		scores.append(aoff/atol + roff/rtol)
		minfos2.append(minfo)
	# Choose the one with the lowest score (lowest mismatch)
	best  = np.argmin(scores)
	aoff, roff, i1 = aroffs[best]
	i2 = i1+ntheta
	if not aoff < atol: raise ShapeError("Could not find a map_info with predefined weights matching input map (abs offset %e >= %e)" % (aoff, atol))
	if not roff < rtol: raise ShapeError("Could not find a map_info with predefined weights matching input map (%rel offset e >= %e)" % (aoff, atol))
	minfo = minfos2[best]
	# Modify the minfo to restrict it to only the rows contained in m
	minfo_cut = sharp.map_info(
			minfo.theta[i1:i2],  minfo.nphi[i1:i2], minfo.phi0[i1:i2],
			minfo.offsets[i1:i2]-minfo.offsets[i1], minfo.stride[i1:i2],
			minfo.weight[i1:i2])
	if flipy:
		# Actual map is flipped in y relative to the one we computed the map info
		minfo_cut = sharp.map_info(
				minfo_cut.theta[::-1], minfo_cut.nphi[::-1], minfo_cut.phi0[::-1],
				minfo_cut.offsets[:], minfo_cut.stride[:], minfo_cut.weight[::-1])
	# Phew! Return the result
	return minfo_cut
Example #12
0
def rand_map(shape,
             wcs,
             ps_lensinput,
             lmax=None,
             maplmax=None,
             dtype=np.float64,
             seed=None,
             oversample=2.0,
             spin=2,
             output="l",
             geodesic=True,
             verbose=False,
             delta_theta=None):
    import curvedsky, sharp
    ctype = np.result_type(dtype, 0j)
    # Restrict to target number of components
    oshape = shape[-3:]
    if len(oshape) == 2: shape = (1, ) + tuple(shape)
    # First draw a random lensing field, and use it to compute the undeflected positions
    if verbose: print("Generating alms")
    alm, ainfo = curvedsky.rand_alm(ps_lensinput,
                                    lmax=lmax,
                                    seed=seed,
                                    dtype=ctype,
                                    return_ainfo=True)
    phi_alm, cmb_alm = alm[0], alm[1:1 + shape[-3]]
    # Truncate alm if we want a smoother map. In taylens, it was necessary to truncate
    # to a lower lmax for the map than for phi, to avoid aliasing. The appropriate lmax
    # for the cmb was the one that fits the resolution. FIXME: Can't slice alm this way.
    #if maplmax: cmb_alm = cmb_alm[:,:maplmax]
    del alm
    if delta_theta is None: bsize = shape[-2]
    else:
        bsize = utils.nint(abs(delta_theta / utils.degree / wcs.wcs.cdelt[1]))
        # Adjust bsize so we don't get any tiny blocks at the end
        nblock = shape[-2] // bsize
        bsize = int(shape[-2] / (nblock + 0.5))
    # Allocate output maps
    if "p" in output: phi_map = enmap.empty(shape[-2:], wcs, dtype=dtype)
    if "k" in output:
        kappa_map = enmap.empty(shape[-2:], wcs, dtype=dtype)
        l = np.arange(ainfo.lmax + 1.0)
        kappa_alm = ainfo.lmul(phi_alm, l * (l + 1) / 2)
        for i1 in range(0, shape[-2], bsize):
            curvedsky.alm2map(kappa_alm, kappa_map[..., i1:i1 + bize, :])
        del kappa_alm
    if "a" in output:
        grad_map = enmap.empty((2, ) + shape[-2:], wcs, dtype=dtype)
    if "u" in output: cmb_raw = enmap.empty(shape, wcs, dtype=dtype)
    if "l" in output: cmb_obs = enmap.empty(shape, wcs, dtype=dtype)
    # Then loop over dec bands
    for i1 in range(0, shape[-2], bsize):
        i2 = min(i1 + bsize, shape[-2])
        lshape, lwcs = enmap.slice_geometry(shape, wcs,
                                            (slice(i1, i2), slice(None)))
        if "p" in output:
            if verbose: print("Computing phi map")
            curvedsky.alm2map(phi_alm, phi_map[..., i1:i2, :])
        if verbose: print("Computing grad map")
        if "a" in output: grad = grad_map[..., i1:i2, :]
        else: grad = enmap.zeros((2, ) + lshape[-2:], lwcs, dtype=dtype)
        curvedsky.alm2map(phi_alm, grad, deriv=True)
        if "l" not in output: continue
        if verbose: print("Computing observed coordinates")
        obs_pos = enmap.posmap(lshape, lwcs)
        if verbose: print("Computing alpha map")
        raw_pos = enmap.samewcs(
            offset_by_grad(obs_pos, grad, pol=shape[-3] > 1,
                           geodesic=geodesic), obs_pos)
        del obs_pos, grad
        if "u" in output:
            if verbose: print("Computing unlensed map")
            curvedsky.alm2map(cmb_alm, cmb_raw[..., i1:i2, :], spin=spin)
        if verbose: print("Computing lensed map")
        cmb_obs[..., i1:i2, :] = curvedsky.alm2map_pos(cmb_alm,
                                                       raw_pos[:2],
                                                       oversample=oversample,
                                                       spin=spin)
        if raw_pos.shape[0] > 2 and np.any(raw_pos[2]):
            if verbose: print("Rotating polarization")
            cmb_obs[..., i1:i2, :] = enmap.rotate_pol(cmb_obs[..., i1:i2, :],
                                                      raw_pos[2])
        del raw_pos
    del cmb_alm, phi_alm
    # Output in same order as specified in output argument
    res = []
    for c in output:
        if c == "l": res.append(cmb_obs.reshape(oshape))
        elif c == "u": res.append(cmb_raw.reshape(oshape))
        elif c == "p": res.append(phi_map)
        elif c == "k": res.append(kappa_map)
        elif c == "a": res.append(grad_map)
    return tuple(res)
Example #13
0
	L.info("Initializing extra postprocessors")

	L.info("Writing preconditioners")
	mapmaking.write_precons(signals, root)

	for param, signal in zip(signal_params, signals):
		if config.get("crossmap") and param["type"] in ["map","bmap","fmap","dmap"]:
			L.info("Computing crosslink map")
			cmap = mapmaking.calc_crosslink_map(signal, myscans, weights)
			signal.write(root, "crosslink", cmap)
			del cmap
		if config.get("icovmap") and param["type"] in ["map","bmap","fmap","dmap","noiserect"]:
			L.info("Computing icov map")
			shape, wcs = signal.area.shape, signal.area.wcs
			# Use equidistant pixel spacing for robustness in non-cylindrical coordinates
			step = utils.nint(np.abs(config.get("icovstep")/wcs.wcs.cdelt[::-1]))
			posy = np.arange(0.5,shape[-2]/step[-2]) if step[-2] > 0 else np.array([0])
			posx = np.arange(0.5,shape[-1]/step[-1]) if step[-1] > 0 else np.array([0])
			pos  = utils.outer_stack([posy,posx]).reshape(2,-1).T
			if pos.size == 0:
				L.debug("Not enough pixels to compute icov for step size %f. Skipping icov" % config.get("icovstep"))
				continue
			# Apply the y skew
			if step[0] > 0:
				yskew     = utils.nint(config.get("icovyskew")/wcs.wcs.cdelt[1])
				pos[:,0] += pos[:,1] * yskew * 1.0 / step[0]
			# Go from grid indices to pixels
			pos       = (pos*step % shape[-2:]).astype(int)
			print(pos.shape, pos.dtype)
			icov = mapmaking.calc_icov_map(signal, myscans, pos, weights)
			signal.write(root, "icov", icov)
Example #14
0
def fastweight(shape,
               wcs,
               db,
               weight="det",
               array_rad=0.7 * utils.degree,
               comm=None,
               dtype=np.float64,
               daz=0.5 * utils.degree,
               nt=4,
               chunk_size=100,
               site=None,
               verbose=False,
               normalize=True):
    # Get the boresight bounds for each TOD
    ntod = len(db)
    mids = np.array([db.data["t"], db.data["az"], db.data["el"]])
    widths = np.array([db.data["dur"], db.data["waz"], db.data["wel"]])
    box = np.array([mids - widths / 2, mids + widths / 2])
    box[:, 1:] *= utils.degree
    ndets = db.data["ndet"]
    # Set up our output map
    omap = enmap.zeros(shape, wcs, dtype)
    # Sky horizontal period in pixels
    nphi = np.abs(utils.nint(360 / wcs.wcs.cdelt[0]))
    # Loop through chunks
    nchunk = (ntod + chunk_size - 1) / chunk_size
    if comm: rank, size = comm.rank, comm.size
    else: rank, size = 0, 1
    for chunk in range(rank, nchunk, size):
        i1 = chunk * chunk_size
        i2 = min((chunk + 1) * chunk_size, ntod)
        # Split the hits into horizontal pixel ranges
        pix_ranges, weights = [], []
        with bench.mark("get"):
            for i in range(i1, i2):
                ndet_eff = ndets[i] if weight == "det" else 1000.0
                pr, w = get_pix_ranges(shape,
                                       wcs,
                                       box[:, :, i],
                                       daz,
                                       nt,
                                       ndet=ndet_eff,
                                       site=site)
                if pr is None: continue
                pix_ranges.append(pr)
                weights.append(w)
            if len(pix_ranges) == 0: continue
            pix_ranges = np.concatenate(pix_ranges, 0)
            weights = np.concatenate(weights, 0)
        with bench.mark("add"):
            add_weight(omap, pix_ranges, weights, nphi)
        if verbose:
            print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank,
                                           bench.stats.get("get"),
                                           bench.stats.get("add"))
    if comm:
        omap = utils.allreduce(omap, comm)
    # Change unit from seconds per pixel to seconds per square acmin
    if normalize:
        pixarea = omap.pixsizemap() / utils.arcmin**2
        omap /= pixarea
    omap[~np.isfinite(omap)] = 0
    if array_rad:
        omap = smooth_tophat(omap, array_rad)
    omap[omap < 1e-6] = 0
    return omap
Example #15
0
                              proj="car",
                              ref=[0, 0])

    def get_area(area):
        try:
            return get_area_str(area)
        except ValueError:
            return get_area_file(area)

    utils.mkdir(args.odir)
    comm = mpi.COMM_WORLD
    dtype = np.float32
    #shape, wcs = enmap.read_map_geometry(args.area)
    shape, wcs = get_area(args.area)
    tsize, pad = args.tsize, args.pad
    nphi = abs(utils.nint(360. / wcs.wcs.cdelt[0]))
    only = map(int, args.only.split(",")) if args.only else None

    # Our parameter search space. Distances in AU, speeds in arcmin per year
    ym = utils.arcmin / utils.yr2days
    rmin, rmax, dr = [float(w) for w in args.rsearch.split(":")]
    vmin, vmax, dv = [float(w) * ym for w in args.vsearch.split(":")]
    nr = int(np.ceil((rmax - rmin) / dr)) + 1
    nv = 2 * int(np.round(vmax / dv)) + 1
    rlist = [rmin + i * dr for i in range(nr)]
    if args.rinf: rlist += [1e9]

    # How many tiles will we have?
    if tsize == 0: nty, ntx = 1, 1
    else: nty, ntx = (np.array(shape[-2:]) + tsize - 1) // tsize
    ntile = nty * ntx
Example #16
0
def retile(ipathfmt,
           opathfmt,
           itile1=(None, None),
           itile2=(None, None),
           otileoff=(0, 0),
           otilenum=(None, None),
           ocorner=(-np.pi / 2, -np.pi),
           otilesize=(675, 675),
           comm=None,
           verbose=False,
           slice=None,
           wrap=True):
    """Given a set of tiles on disk with locations ipathfmt % {"y":...,"x":...},
	retile them into a new tiling and write the result to opathfmt % {"y":...,"x":...}.
	The new tiling will have tile size given by otilesize[2]. Negative size means the
	tiling will to down/left instead of up/right. The corner of the tiling will
	be at sky coordinates ocorner[2] in radians. The new tiling will be pixel-
	compatible with the input tiling - w.g. the wcs will only differ by crpix.

	The output tiling will logically cover the whole sky, but only output tiles
	that overlap with input tiles will actually be written. This can be modified
	by using otileoff[2] and otilenum[2]. otileoff gives the tile indices of the
	corner tile, while otilenum indicates the number of tiles to write."""
    # Set up mpi
    rank, size = (comm.rank, comm.size) if comm is not None else (0, 1)
    # Expand any scalars
    if otilesize is None: otilesize = (675, 675)
    otilesize = np.zeros(2, int) + otilesize
    otileoff = np.zeros(2, int) + otileoff
    # Find the range of input tiles
    itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2)
    # To fill in the rest of the information we need to know more
    # about the input tiling, so read the first tile
    ibase = enmap.read_map(ipathfmt % {"y": itile1[0], "x": itile1[1]})
    if slice: ibase = eval("ibase" + slice)
    itilesize = ibase.shape[-2:]
    ixres = ibase.wcs.wcs.cdelt[0]
    nphi = utils.nint(360 / np.abs(ixres))
    ntile_wrap = nphi / otilesize[1]
    # Find the pixel position of our output corners according to the wcs.
    # This is the last place we need to do a coordinate transformation.
    # All the rest can be done in pure pixel logic.
    pixoff = np.round(ibase.sky2pix(ocorner)).astype(int)

    # Find the range of output tiles
    def pix2otile(pix, ioff, osize):
        return (pix - ioff) / osize

    otile1 = pix2otile(itile1 * itilesize, pixoff, otilesize)
    otile2 = pix2otile(itile2 * itilesize - 1, pixoff, otilesize)
    otile1, otile2 = np.minimum(otile1, otile2), np.maximum(otile1, otile2)
    otile2 += 1
    # We can now loop over output tiles
    cache = [None, None, None]
    oyx = [(oy, ox) for oy in range(otile1[0], otile2[0])
           for ox in range(otile1[1], otile2[1])]
    for i in range(rank, len(oyx), size):
        otile = np.array(oyx[i])
        # Find out which input tiles overlap with this output tile.
        # Our tile stretches from opix1:opix2 relative to the global input pixels
        opix1 = otile * otilesize + pixoff
        opix2 = (otile + 1) * otilesize + pixoff
        # output tiles and input tiles may increase in opposite directions
        opix1, opix2 = np.minimum(opix1, opix2), np.maximum(opix1, opix2)
        try:
            omap = read_area(ipathfmt, [opix1, opix2],
                             itile1=itile1,
                             itile2=itile2,
                             cache=cache,
                             slice=slice)
        except IOError:
            continue
        x = otile[1] + otileoff[1]
        if wrap: x %= ntile_wrap
        oname = opathfmt % {"y": otile[0] + otileoff[0], "x": x}
        utils.mkdir(os.path.dirname(oname))
        enmap.write_map(oname, omap)
        if verbose: print oname
Example #17
0
	L.info("Initializing extra postprocessors")

	L.info("Writing preconditioners")
	mapmaking.write_precons(signals, root)

	for param, signal in zip(signal_params, signals):
		if config.get("crossmap") and param["type"] in ["map","bmap","fmap","dmap"]:
			L.info("Computing crosslink map")
			cmap = mapmaking.calc_crosslink_map(signal, myscans, weights)
			signal.write(root, "crosslink", cmap)
			del cmap
		if config.get("icovmap") and param["type"] in ["map","bmap","fmap","dmap","noiserect"]:
			L.info("Computing icov map")
			shape, wcs = signal.area.shape, signal.area.wcs
			# Use equidistant pixel spacing for robustness in non-cylindrical coordinates
			step = utils.nint(np.abs(config.get("icovstep")/wcs.wcs.cdelt[::-1]))
			posy = np.arange(0.5,shape[-2]/step[-2]) if step[-2] > 0 else np.array([0])
			posx = np.arange(0.5,shape[-1]/step[-1]) if step[-1] > 0 else np.array([0])
			pos  = utils.outer_stack([posy,posx]).reshape(2,-1).T
			if pos.size == 0:
				L.debug("Not enough pixels to compute icov for step size %f. Skipping icov" % config.get("icovstep"))
				continue
			# Apply the y skew
			if step[0] > 0:
				yskew     = utils.nint(config.get("icovyskew")/wcs.wcs.cdelt[1])
				pos[:,0] += pos[:,1] * yskew * 1.0 / step[0]
			# Go from grid indices to pixels
			pos       = (pos*step % shape[-2:]).astype(int)
			print pos.shape, pos.dtype
			icov = mapmaking.calc_icov_map(signal, myscans, pos, weights)
			signal.write(root, "icov", icov)
Example #18
0
def smooth_tophat(map, rad):
	# Will use flat sky approximation here. It's not a good approximation for
	# our big maps, but this doesn't need to be accurate anyway
	ny,nx = map.shape[-2:]
	refy, refx = ny/2,nx/2
	pos   = map.posmap()
	pos[0] -= pos[0,refy,refx]
	pos[1] -= pos[1,refy,refx]
	r2     = np.sum(pos**2,0)
	kernel = (r2 < rad**2).astype(dtype) / (np.pi*rad**2) / map.size**0.5 * map.area()
	kernel = np.roll(kernel,-refy,0)
	kernel = np.roll(kernel,-refx,1)
	res = enmap.ifft(enmap.fft(map)*np.conj(enmap.fft(kernel))).real
	return res

nphi = np.abs(utils.nint(360/wcs.wcs.cdelt[0]))
for chunk in range(comm.rank, nchunk, comm.size):
	i1 = chunk*csize
	i2 = min((chunk+1)*csize, ntod)
	# Split the hits into horizontal pixel ranges
	pix_ranges, weights = [], []
	with bench.mark("get"):
		for i in range(i1,i2):
			pr, w = get_pix_ranges(shape, wcs, box[:,:,i], daz, nt, azdown=args.azdown, ndet=ndets[i])
			pix_ranges.append(pr)
			weights.append(w)
		pix_ranges = np.concatenate(pix_ranges, 0)
		weights    = np.concatenate(weights, 0)
	with bench.mark("add"):
		add_weight(omap, pix_ranges, weights, nphi)
	print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank, bench.stats.get("get"), bench.stats.get("add"))
Example #19
0
def get_scans(area, signal, bore, dets, noise, seed=0, real=None, noise_override=None):
	scans = []
	# Get real scan information if necessary
	L.debug("real")
	if real:
		real_scans = []
		filedb.init()
		db   = filedb.data
		ids  = fileb.scans[real].ids
		for id in ids:
			try:
				real_scans.append(actscan.ACTScan(db[id]))
			except errors.DataMissing as e:
				L.debug("Skipped %s (%s)" % (id, str(e)))
	# Dets
	L.debug("dets")
	sim_dets = []
	toks = dets.split(":")
	if toks[0] == "scattered":
		ngroup, nper, rad = int(toks[1]), int(toks[2]), float(toks[3])
		sim_dets = [scansim.dets_scattered(ngroup, nper,rad=rad*np.pi/180/60)]
		margin   = rad*np.pi/180/60
	elif toks[0] == "real":
		ndet = int(toks[1])
		dslice = slice(0,ndet) if ndet > 0 else slice(None)
		sim_dets = [bunch.Bunch(comps=s.comps[dslice], offsets=s.offsets[dslice]) for s in real_scans]
		margin = np.max([np.sum(s.offsets**2,1)**0.5 for s in sim_dets])
	else: raise ValueError
	# Boresight. Determines our number of scans
	L.debug("bore")
	sim_bore = []
	toks = bore.split(":")
	if toks[0] == "grid":
		nscan, density, short = int(toks[1]), float(toks[2]), float(toks[3])
		for i in range(nscan):
			tbox = shorten(area.box(),i%2,short)
			sim_bore.append(scansim.scan_grid(tbox, density*np.pi/180/60, dir=i, margin=margin))
	elif toks[0] == "ces":
		nscan = int(toks[1])
		azs   = [float(w)*utils.degree for w in toks[2].split(",")]
		els   = [float(w)*utils.degree for w in toks[3].split(",")]
		mjd0  = float(toks[4])
		dur   = float(toks[5])
		azrate= float(toks[6]) if len(toks) > 6 else 1.5*utils.degree
		srate = float(toks[7]) if len(toks) > 7 else 400
		nsamp = utils.nint(dur*srate)
		for i in range(nscan):
			mjd  = mjd0 + dur*(i//(2*len(els)))/(24*3600)
			el   = els[(i//2)%len(els)]
			az1, az2 = azs
			if i%2 == 1: az1, az2 = -az2, -az1
			box = np.array([[az1,el],[az2,el]])
			sim_bore.append(scansim.scan_ceslike(nsamp, box, mjd0=mjd, srate=srate, azrate=azrate))
	elif toks[0] == "real":
		sim_bore = [bunch.Bunch(boresight=s.boresight, hwp_phase=s.hwp_phase, sys=s.sys, site=s.site, mjd0=s.mjd0) for s in real_scans]
	else: raise ValueError
	nsim = len(sim_bore)
	# Make one det info per scan
	sim_dets = sim_dets*(nsim/len(sim_dets))+sim_dets[:nsim%len(sim_dets)]
	# Noise
	L.debug("noise")
	sim_nmat = []
	toks = noise.split(":")
	nonoise = False
	if toks[0] == "1/f":
		sigma, alpha, fknee = [float(v) for v in toks[1:4]]
		nonoise = sigma < 0
		for i in range(nsim):
			sim_nmat.append(scansim.oneoverf_noise(sim_dets[i].comps.shape[0], sim_bore[i].boresight.shape[0], sigma=np.abs(sigma), alpha=alpha, fknee=fknee))
	elif toks[0] == "detcorr":
		sigma, alpha, fknee = [float(v) for v in toks[1:4]]
		nonoise = sigma < 0
		for i in range(nsim):
			sim_nmat.append(scansim.oneoverf_detcorr_noise(sim_dets[i].comps.shape[0], sim_bore[i].boresight.shape[0], sigma=np.abs(sigma), alpha=alpha, fknee=fknee))
	elif toks[0] == "real":
		scale = 1.0 if len(toks) < 2 else float(toks[1])
		for i,s in enumerate(real_scans):
			ndet = len(sim_dets[i].offsets)
			nmat = s.noise[:ndet]*scale**-2
			sim_nmat.append(nmat)
	else: raise ValueError
	noise_scale = not nonoise if noise_override is None else noise_override
	sim_nmat = sim_nmat*(nsim/len(sim_nmat))+sim_nmat[:nsim%len(sim_nmat)]
	# Signal
	L.debug("signal")
	toks = signal.split(":")
	if toks[0] == "none":
		for i in range(nsim):
			scans.append(scansim.SimPlain(sim_bore[i], sim_dets[i], sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	elif toks[0] == "ptsrc":
		# This one always operates in the same coordinates as 
		nsrc, amp, fwhm = int(toks[1]), float(toks[2]), float(toks[3])
		np.random.seed(seed)
		sim_srcs = scansim.rand_srcs(area.box(), nsrc, amp, abs(fwhm)*np.pi/180/60, rand_fwhm=fwhm<0)
		for i in range(nsim):
			scans.append(scansim.SimSrcs(sim_bore[i], sim_dets[i], sim_srcs, sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	elif toks[0] == "vsrc":
		# Create a single variable source
		ra, dec, fwhm = float(toks[1])*np.pi/180, float(toks[2])*np.pi/180, float(toks[3])*np.pi/180/60
		amps = [float(t) for t in toks[4].split(",")]
		for i in range(nsim):
			sim_srcs = bunch.Bunch(pos=np.array([[dec,ra]]),amps=np.array([[amps[i],0,0,0]]), beam=np.array([fwhm/(8*np.log(2)**0.5)]))
			scans.append(scansim.SimSrcs(sim_bore[i], sim_dets[i], sim_srcs, sim_nmat[i], seed=seed+i, noise_scale=noise_scale, nsigma=20))
	elif toks[0] == "cmb":
		np.random.seed(seed)
		ps = powspec.read_spectrum(toks[1])
		sim_map  = enmap.rand_map(area.shape, area.wcs, ps)
		for i in range(nsim):
			scans.append(scansim.SimMap(sim_bore[i], sim_dets[i], sim_map,    sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	else: raise ValueError
	return scans
Example #20
0
import numpy as np, argparse
from enlib import enmap, utils
parser = argparse.ArgumentParser()
parser.add_argument("imap")
parser.add_argument("template")
parser.add_argument("omap")
parser.add_argument("-O", "--order", type=int, default=3)
parser.add_argument("-m", "--mode", type=str, default="constant")
parser.add_argument("-M", "--mem", type=float, default=1e8)
args = parser.parse_args()

imap = enmap.read_map(args.imap)
shape, wcs = enmap.read_map_geometry(args.template)
omap = enmap.zeros(shape, wcs, imap.dtype)

blockpix = np.product(shape[:-2]) * shape[-1]
bsize = max(1, utils.nint(args.mem / (blockpix * imap.dtype.itemsize)))

nblock = (shape[-2] + bsize - 1) // bsize
for b in range(nblock):
    r1, r2 = b * bsize, (b + 1) * bsize
    osub = omap[..., r1:r2, :]
    omap[..., r1:r2, :] = enmap.project(imap,
                                        osub.shape,
                                        osub.wcs,
                                        order=args.order,
                                        mode=args.mode)
#o = enmap.project(m, t.shape, t.wcs, order=args.order, mode=args.mode)
enmap.write_map(args.omap, omap)
Example #21
0
def read_area(ipathfmt,
              opix,
              itile1=(None, None),
              itile2=(None, None),
              verbose=False,
              cache=None,
              slice=None,
              wrap=True):
    """Given a set of tiles on disk with locations ipathfmt % {"y":...,"x":...},
	read the data corresponding to the pixel range opix[{from,to],{y,x}] in
	the full map."""
    opix = np.asarray(opix)
    # Find the range of input tiles
    itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2)
    # To fill in the rest of the information we need to know more
    # about the input tiling, so read the first tile
    if cache is None or cache[2] is None:
        geo = read_tileset_geometry(ipathfmt, itile1=itile1, itile2=itile2)
    else:
        geo = cache[2]
    if cache is not None: cache[2] = geo
    # Determine tile wrapping
    npix_phi = np.abs(360. / geo.wcs.wcs.cdelt[0])
    ntile_phi = utils.nint(npix_phi / geo.tshape[-1])

    isize = geo.tshape
    osize = opix[1] - opix[0]
    omap = enmap.zeros(geo.shape[:-2] + tuple(osize), geo.wcs, geo.dtype)
    # Find out which input tiles overlap with this output tile.
    # Our tile stretches from opix1:opix2 relative to the global input pixels
    it1 = opix[0] / isize
    it2 = (opix[1] - 1) / isize + 1
    noverlap = 0
    for ity in range(it1[0], it2[0]):
        if ity < itile1[0] or ity >= itile2[0]: continue
        # Start/end of this tile in global input pixels
        ipy1, ipy2 = ity * isize[0], (ity + 1) * isize[0]
        overlap = range_overlap(opix[:, 0], [ipy1, ipy2])
        oy1, oy2 = overlap - opix[0, 0]
        iy1, iy2 = overlap - ipy1
        for itx in range(it1[1], it2[1]):
            if wrap: itx_wrap = itx % ntile_phi
            if itx_wrap < itile1[1] or itx_wrap >= itile2[1]: continue
            ipx1, ipx2 = itx * isize[1], (itx + 1) * isize[1]
            overlap = range_overlap(opix[:, 1], [ipx1, ipx2])
            ox1, ox2 = overlap - opix[0, 1]
            ix1, ix2 = overlap - ipx1
            # Read the input tile and copy over
            iname = ipathfmt % {"y": ity, "x": itx_wrap}
            if cache is None or cache[0] != iname:
                imap = enmap.read_map(iname)
                if slice: imap = eval("imap" + slice)
            else: imap = cache[1]
            if cache is not None:
                cache[0], cache[1] = iname, imap
            if verbose: print iname
            # Edge input tiles may be smaller than the standard
            # size.
            ysub = isize[0] - imap.shape[-2]
            xsub = isize[1] - imap.shape[-1]
            # If the input map is too small, there may actually be
            # zero overlap.
            if oy2 - ysub <= oy1 or ox2 - xsub <= ox1: continue
            omap[..., oy1:oy2 - ysub,
                 ox1:ox2 - xsub] = imap[..., iy1:iy2 - ysub, ix1:ix2 - xsub]
            noverlap += 1
    if noverlap == 0:
        raise IOError("No tiles for tiling %s in range %s" %
                      (ipathfmt, ",".join(
                          [":".join([str(p) for p in r]) for r in opix.T])))
    # Set up the wcs for the output tile
    omap.wcs.wcs.crpix -= opix[0, ::-1]
    return omap
Example #22
0
def get_scans(area, signal, bore, dets, noise, seed=0, real=None, noise_override=None):
	scans = []
	# Get real scan information if necessary
	L.debug("real")
	if real:
		real_scans = []
		filedb.init()
		db   = filedb.data
		ids  = fileb.scans[real].ids
		for id in ids:
			try:
				real_scans.append(actscan.ACTScan(db[id]))
			except errors.DataMissing as e:
				L.debug("Skipped %s (%s)" % (id, e.message))
	# Dets
	L.debug("dets")
	sim_dets = []
	toks = dets.split(":")
	if toks[0] == "scattered":
		ngroup, nper, rad = int(toks[1]), int(toks[2]), float(toks[3])
		sim_dets = [scansim.dets_scattered(ngroup, nper,rad=rad*np.pi/180/60)]
		margin   = rad*np.pi/180/60
	elif toks[0] == "real":
		ndet = int(toks[1])
		dslice = slice(0,ndet) if ndet > 0 else slice(None)
		sim_dets = [bunch.Bunch(comps=s.comps[dslice], offsets=s.offsets[dslice]) for s in real_scans]
		margin = np.max([np.sum(s.offsets**2,1)**0.5 for s in sim_dets])
	else: raise ValueError
	# Boresight. Determines our number of scans
	L.debug("bore")
	sim_bore = []
	toks = bore.split(":")
	if toks[0] == "grid":
		nscan, density, short = int(toks[1]), float(toks[2]), float(toks[3])
		for i in range(nscan):
			tbox = shorten(area.box(),i%2,short)
			sim_bore.append(scansim.scan_grid(tbox, density*np.pi/180/60, dir=i, margin=margin))
	elif toks[0] == "ces":
		nscan = int(toks[1])
		azs   = [float(w)*utils.degree for w in toks[2].split(",")]
		els   = [float(w)*utils.degree for w in toks[3].split(",")]
		mjd0  = float(toks[4])
		dur   = float(toks[5])
		azrate= float(toks[6]) if len(toks) > 6 else 1.5*utils.degree
		srate = float(toks[7]) if len(toks) > 7 else 400
		nsamp = utils.nint(dur*srate)
		for i in range(nscan):
			mjd  = mjd0 + dur*(i//(2*len(els)))/(24*3600)
			el   = els[(i//2)%len(els)]
			az1, az2 = azs
			if i%2 == 1: az1, az2 = -az2, -az1
			box = np.array([[az1,el],[az2,el]])
			sim_bore.append(scansim.scan_ceslike(nsamp, box, mjd0=mjd, srate=srate, azrate=azrate))
	elif toks[0] == "real":
		sim_bore = [bunch.Bunch(boresight=s.boresight, hwp_phase=s.hwp_phase, sys=s.sys, site=s.site, mjd0=s.mjd0) for s in real_scans]
	else: raise ValueError
	nsim = len(sim_bore)
	# Make one det info per scan
	sim_dets = sim_dets*(nsim/len(sim_dets))+sim_dets[:nsim%len(sim_dets)]
	# Noise
	L.debug("noise")
	sim_nmat = []
	toks = noise.split(":")
	nonoise = False
	if toks[0] == "1/f":
		sigma, alpha, fknee = [float(v) for v in toks[1:4]]
		nonoise = sigma < 0
		for i in range(nsim):
			sim_nmat.append(scansim.oneoverf_noise(sim_dets[i].comps.shape[0], sim_bore[i].boresight.shape[0], sigma=np.abs(sigma), alpha=alpha, fknee=fknee))
	elif toks[0] == "detcorr":
		sigma, alpha, fknee = [float(v) for v in toks[1:4]]
		nonoise = sigma < 0
		for i in range(nsim):
			sim_nmat.append(scansim.oneoverf_detcorr_noise(sim_dets[i].comps.shape[0], sim_bore[i].boresight.shape[0], sigma=np.abs(sigma), alpha=alpha, fknee=fknee))
	elif toks[0] == "real":
		scale = 1.0 if len(toks) < 2 else float(toks[1])
		for i,s in enumerate(real_scans):
			ndet = len(sim_dets[i].offsets)
			nmat = s.noise[:ndet]*scale**-2
			sim_nmat.append(nmat)
	else: raise ValueError
	noise_scale = not nonoise if noise_override is None else noise_override
	sim_nmat = sim_nmat*(nsim/len(sim_nmat))+sim_nmat[:nsim%len(sim_nmat)]
	# Signal
	L.debug("signal")
	toks = signal.split(":")
	if toks[0] == "none":
		for i in range(nsim):
			scans.append(scansim.SimPlain(sim_bore[i], sim_dets[i], sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	elif toks[0] == "ptsrc":
		# This one always operates in the same coordinates as 
		nsrc, amp, fwhm = int(toks[1]), float(toks[2]), float(toks[3])
		np.random.seed(seed)
		sim_srcs = scansim.rand_srcs(area.box(), nsrc, amp, abs(fwhm)*np.pi/180/60, rand_fwhm=fwhm<0)
		for i in range(nsim):
			scans.append(scansim.SimSrcs(sim_bore[i], sim_dets[i], sim_srcs, sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	elif toks[0] == "vsrc":
		# Create a single variable source
		ra, dec, fwhm = float(toks[1])*np.pi/180, float(toks[2])*np.pi/180, float(toks[3])*np.pi/180/60
		amps = [float(t) for t in toks[4].split(",")]
		for i in range(nsim):
			sim_srcs = bunch.Bunch(pos=np.array([[dec,ra]]),amps=np.array([[amps[i],0,0,0]]), beam=np.array([fwhm/(8*np.log(2)**0.5)]))
			scans.append(scansim.SimSrcs(sim_bore[i], sim_dets[i], sim_srcs, sim_nmat[i], seed=seed+i, noise_scale=noise_scale, nsigma=20))
	elif toks[0] == "cmb":
		np.random.seed(seed)
		ps = powspec.read_spectrum(toks[1])
		sim_map  = enmap.rand_map(area.shape, area.wcs, ps)
		for i in range(nsim):
			scans.append(scansim.SimMap(sim_bore[i], sim_dets[i], sim_map,    sim_nmat[i], seed=seed+i, noise_scale=noise_scale))
	else: raise ValueError
	return scans