Exemplo n.º 1
0
def calc_az_sweep(pattern, offset, site, pad=2.0, subsample=1.0):
    """Helper function. Given a pattern and mean focalplane offset,
	computes the shape of an azimuth sweep on the sky."""
    el1 = pattern[0] + offset[0]
    az1 = pattern[1] + offset[1] - pad
    az2 = pattern[2] + offset[1] + pad
    daz = rhs.pixshape()[0] / np.cos(el1) / subsample
    naz = int(np.ceil((az2 - az1) / daz))
    naz = fft.fft_len(naz, "above", [2, 3, 5, 7])
    # Simulate a single sweep at arbitrary time
    sweep_az = np.arange(naz) * daz + az1
    sweep_el = np.full(naz, el1)
    sweep_cel = coordinates.transform("hor",
                                      "cel",
                                      np.array([sweep_az, sweep_el]),
                                      time=ref_time,
                                      site=site)
    # Make ra safe
    sweep_cel = utils.unwind(sweep_cel)
    return bunch.Bunch(sweep_cel=sweep_cel,
                       sweep_hor=np.array([sweep_az, sweep_el]),
                       el=el1,
                       az1=az1,
                       az2=az2,
                       naz=naz,
                       daz=daz)
Exemplo n.º 2
0
    def read(cls,
             datasets,
             box,
             pad=0,
             verbose=False,
             cache_dir=None,
             dtype=None,
             div_unhit=1e-7,
             read_cache=False,
             ncomp=None,
             *args,
             **kwargs):
        odatasets = []
        for dataset in datasets:
            dataset = dataset.copy()
            pbox = calc_pbox(dataset.shape, dataset.wcs, box)
            #pbox = np.round(enmap.sky2pix(dataset.shape, dataset.wcs, box.T).T).astype(int)
            pbox[0] -= pad
            pbox[1] += pad
            psize = pbox[1] - pbox[0]
            ffpad = np.array(
                [fft.fft_len(s, direction="above") - s for s in psize])
            pbox[1] += ffpad

            dataset.pbox = pbox
            osplits = []
            for split in dataset.splits:
                split = split.copy()
                if verbose: print "Reading %s" % split.map
                try:
                    map = read_map(split.map,
                                   pbox,
                                   name=os.path.basename(split.map),
                                   cache_dir=cache_dir,
                                   dtype=dtype,
                                   read_cache=read_cache)
                    div = read_map(split.div,
                                   pbox,
                                   name=os.path.basename(split.div),
                                   cache_dir=cache_dir,
                                   dtype=dtype,
                                   read_cache=read_cache).preflat[0]
                except IOError as e:
                    continue
                map *= dataset.gain
                div *= dataset.gain**-2
                div[~np.isfinite(div)] = 0
                map[~np.isfinite(map)] = 0
                div[div < div_unhit] = 0
                if np.all(div == 0): continue
                split.data = bunch.Bunch(map=map,
                                         div=div,
                                         empty=np.all(div == 0))
                osplits.append(split)
            if len(osplits) < 2: continue
            dataset.splits = osplits
            odatasets.append(dataset)
        if len(odatasets) == 0: return None
        return cls(odatasets, ffpad, ncomp=ncomp, *args, **kwargs)
Exemplo n.º 3
0
    def __init__(self,
                 shape,
                 wcs,
                 pattern,
                 offset,
                 site,
                 pad=2.0 * utils.degree):
        """This unskew operation assumes that equal spacing in
		dec corresponds to equal spacing in time, and that shifts in
		RA can be done in units of whole pixels. This is an approximation
		relative to UnskewCurved, but it is several times faster, uses
		less memory, and causes less smoothing."""
        ndec, nra = shape[-2:]
        info = calc_az_sweep(pattern, offset, site, pad=pad)
        sweep_ra, sweep_dec = info.sweep_cel
        # For each pixel in dec (that we hit for this scanning pattern), we
        # want to know how far we have been displaced in ra.
        # First get the dec of each pixel center.
        ysweep, xsweep = enmap.sky2pix(shape, wcs, [sweep_dec, sweep_ra])
        y1 = max(int(np.min(ysweep)), 0)
        y2 = min(int(np.max(ysweep)) + 1, shape[-2])
        # Make fft-friendly
        ny = y2 - y1
        ny2 = fft.fft_len(ny, "above", [2, 3, 5, 7])
        y1 = max(y1 - (ny2 - ny) / 2, 0)
        y2 = min(y1 + ny2, shape[-2])
        y = np.arange(y1, y2)
        dec, _ = enmap.pix2sky(shape, wcs, [y, y * 0])
        # Then interpolate the ra values corresponding to those decs.
        # InterpolatedUnivariateSpline broken. Returns nan even when
        # interpolating. So we will use UnivariateSpline
        spline = scipy.interpolate.UnivariateSpline(sweep_dec, sweep_ra)
        ra = spline(dec)
        dra = ra - ra[len(ra) / 2]
        y, x = np.round(enmap.sky2pix(shape, wcs, [dec, ra]))
        dx = x - x[len(x) / 2]
        # It's also useful to be able to go from normal map index to
        # position in y and dx
        inv_y = np.zeros(shape[-2], dtype=int) - 1
        inv_y[y.astype(int)] = np.arange(len(y))
        # Compute the azimuth step size based on the total azimuth sweep.
        daz = (pattern[2] - pattern[1] + 2 * pad) / len(y)
        # Build the geometry of the unskewed system
        ushape, uwcs = enmap.geometry(pos=[0, 0],
                                      shape=[len(y), shape[-1]],
                                      res=[daz,
                                           enmap.pixshape(shape, wcs)[1]],
                                      proj="car")
        # And store the result
        self.y = y.astype(int)
        self.dx = np.round(dx).astype(int)
        self.dx_raw = dx
        self.inv_y = inv_y
        self.ushape = ushape
        self.uwcs = uwcs
Exemplo n.º 4
0
def crop_fftlen(data, factors=None):
	"""Slightly crop samples in order to make ffts faster. This should
	be called at a point when the length won't be futher cropped by other
	effects."""
	if data.nsamp in [0, None]: raise errors.DataMissing("nsamp")
	if data.nsamp < 0: raise errors.DataMissing("nsamp")
	factors = config.get("fft_factors", factors)
	if isinstance(factors, basestring): factors = [int(w) for w in factors.split(",")]
	ncrop = fft.fft_len(data.nsamp, factors=factors)
	data += dataset.DataField("fftlen", samples=[data.samples[0],data.samples[0]+ncrop])
	return data
Exemplo n.º 5
0
Arquivo: scan.py Projeto: Nium14/enlib
def build_hwp_sample_mapping(hwp, quantile=0.1):
	"""Given a HWP angle, return an array with shape [nout] containing
	the original sample index (float) corresponding to each sample in the
	remapped array, along with the resulting hwp sample rate.
	The remapping also truncates the end to ensure that
	there is an integer number of HWP rotations in the data."""
	# Make sure there are no angle wraps in the hwp
	hwp = utils.unwind(hwp)
	# interp requires hwp to be monotonically increasing. In practice
	# it could be monotonically decreasing too, but our final result
	# does not depend on the direction of rotation, so we simply flip it here
	# if necessary
	hwp = np.abs(hwp)
	# Find the target hwp speed
	speed = np.percentile(hwp[1:]-hwp[:-1], 100*quantile)
	# We want a whole number of samples per revolution, and
	# a whole number of revolutions in the whole tod
	a    = hwp - hwp[0]
	nrev = int(np.floor(a[-1]/(2*np.pi)))
	nper = utils.nint(2*np.pi/speed)
	# Make each of these numbers fourier-friendly
	nrev = fft.fft_len(nrev, "below")
	nper = fft.fft_len(nper, "above")
	# Set up our output samples
	speed = 2*np.pi/nper
	nout  = nrev*nper
	ohwp  = hwp[0] + np.arange(nout)*speed
	# Find the input sample for each output sample
	res = bunch.Bunch()
	res.oimap = np.interp(ohwp, hwp, np.arange(len(hwp)))
	# Find the output sampe for each input sample too. Because of
	# cropping, the last of these will be invalid
	res.iomap = np.interp(np.arange(len(hwp)), res.oimap, np.arange(len(res.oimap)))
	# Find the average sampling rate change fsamp_rel = fsamp_out/fsamp_in
	res.fsamp_rel = 1/np.mean(res.oimap[1:]-res.oimap[:-1])
	res.insamp = len(hwp)
	res.onsamp = nout
	res.nrev   = nrev
	res.nper   = nper
	return res
Exemplo n.º 6
0
def calc_az_sweep(pattern, offset, site, pad=2.0, subsample=1.0):
	"""Helper function. Given a pattern and mean focalplane offset,
	computes the shape of an azimuth sweep on the sky."""
	el1 = pattern[0] + offset[0]
	az1 = pattern[1] + offset[1] - pad
	az2 = pattern[2] + offset[1] + pad
	daz = rhs.pixshape()[0]/np.cos(el1)/subsample
	naz  = int(np.ceil((az2-az1)/daz))
	naz  = fft.fft_len(naz, "above", [2,3,5,7])
	# Simulate a single sweep at arbitrary time
	sweep_az = np.arange(naz)*daz + az1
	sweep_el = np.full(naz,el1)
	sweep_cel = coordinates.transform("hor","cel", np.array([sweep_az,sweep_el]),time=ref_time,site=site)
	# Make ra safe
	sweep_cel = utils.unwind(sweep_cel)
	return bunch.Bunch(sweep_cel=sweep_cel, sweep_hor=np.array([sweep_az,sweep_el]),
			el=el1, az1=az1, az2=az2, naz=naz, daz=daz)
Exemplo n.º 7
0
	def __init__(self, shape, wcs, pattern, offset, site, pad=2.0*utils.degree):
		"""This unskew operation assumes that equal spacing in
		dec corresponds to equal spacing in time, and that shifts in
		RA can be done in units of whole pixels. This is an approximation
		relative to UnskewCurved, but it is several times faster, uses
		less memory, and causes less smoothing."""
		ndec, nra = shape[-2:]
		info = calc_az_sweep(pattern, offset, site, pad=pad)
		sweep_ra, sweep_dec = info.sweep_cel
		# For each pixel in dec (that we hit for this scanning pattern), we
		# want to know how far we have been displaced in ra.
		# First get the dec of each pixel center.
		ysweep, xsweep = enmap.sky2pix(shape, wcs, [sweep_dec,sweep_ra])
		y1  = max(int(np.min(ysweep)),0)
		y2  = min(int(np.max(ysweep))+1,shape[-2])
		# Make fft-friendly
		ny  = y2-y1
		ny2 = fft.fft_len(ny, "above", [2,3,5,7])
		y1  = max(y1-(ny2-ny)/2,0)
		y2  = min(y1+ny2,shape[-2])
		y   = np.arange(y1,y2)
		dec, _ = enmap.pix2sky(shape, wcs, [y,y*0])
		# Then interpolate the ra values corresponding to those decs.
		# InterpolatedUnivariateSpline broken. Returns nan even when
		# interpolating. So we will use UnivariateSpline
		spline  = scipy.interpolate.UnivariateSpline(sweep_dec, sweep_ra)
		ra      = spline(dec)
		dra     = ra - ra[len(ra)/2]
		y, x    = np.round(enmap.sky2pix(shape, wcs, [dec,ra]))
		dx      = x-x[len(x)/2]
		# It's also useful to be able to go from normal map index to
		# position in y and dx
		inv_y   = np.zeros(shape[-2],dtype=int)-1
		inv_y[y.astype(int)]= np.arange(len(y))
		# Compute the azimuth step size based on the total azimuth sweep.
		daz = (pattern[2]-pattern[1]+2*pad)/len(y)
		# Build the geometry of the unskewed system
		ushape, uwcs = enmap.geometry(pos=[0,0], shape=[len(y),shape[-1]], res=[daz,enmap.pixshape(shape,wcs)[1]], proj="car")
		# And store the result
		self.y  = y.astype(int)
		self.dx = np.round(dx).astype(int)
		self.dx_raw = dx
		self.inv_y  = inv_y
		self.ushape = ushape
		self.uwcs   = uwcs
Exemplo n.º 8
0
def read_data(datasets,
              box,
              odir,
              pad=0,
              verbose=False,
              read_cache=False,
              write_cache=False,
              div_max=100,
              div_unhit=1e-7,
              map_max=1e8):
    odatasets = []
    for dataset in datasets:
        dataset = dataset.copy()
        pbox = calc_pbox(dataset.shape, dataset.wcs, box)
        #pbox = np.round(enmap.sky2pix(dataset.shape, dataset.wcs, box.T).T).astype(int)
        pbox[0] -= pad
        pbox[1] += pad
        psize = pbox[1] - pbox[0]
        ffpad = np.array(
            [fft.fft_len(s, direction="above") - s for s in psize])
        pbox[1] += ffpad

        dataset.pbox = pbox
        osplits = []
        for split in dataset.splits:
            split = split.copy()
            if verbose: print "Reading %s" % split.map
            try:
                map = read_map(split.map,
                               pbox,
                               odir + "/" + os.path.basename(split.map),
                               read_cache=read_cache,
                               write_cache=write_cache)
                div = read_map(split.div,
                               pbox,
                               odir + "/" + os.path.basename(split.div),
                               read_cache=read_cache,
                               write_cache=write_cache).preflat[0]
            except IOError:
                continue
            map *= dataset.gain
            div *= dataset.gain**-2
            # Sanitize div and map, so that they don't contain unreasonable
            # values. After this, the rest of the code doesn't need to worry
            # about that.
            div[~np.isfinite(div)] = 0
            map[~np.isfinite(map)] = 0
            div = np.maximum(0, np.minimum(div_max, div))
            div[div < div_unhit] = 0
            #print "moo"
            #div = smooth_pix(div, 100)
            map = np.maximum(-map_max, np.minimum(map_max, map))

            if np.any(div > 0): ref_val = np.mean(div[div > 0]) * args.apod_val
            else: ref_val = 1.0
            apod = np.minimum(div / ref_val, 1.0)**args.apod_alpha
            apod = apod.apod(args.apod_edge)
            #opre  = odir + "/" + os.path.basename(split.map)[:-5]
            #enmap.write_map(opre + "_apod.fits", apod)
            #enmap.write_map(opre + "_amap.fits", apod*map)
            #enmap.write_map(opre + "_adiv.fits", apod*div)
            div *= apod
            if np.all(div == 0): continue
            split.data = bunch.Bunch(map=map,
                                     div=div,
                                     H=div**0.5,
                                     empty=np.all(div > 0))
            osplits.append(split)
        if len(osplits) < 2: continue
        dataset.splits = osplits
        odatasets.append(dataset)
    return odatasets, ffpad
Exemplo n.º 9
0
from pyfftw.interfaces.scipy_fftpack import fft2 as psfft2
from pyfftw.interfaces.numpy_fft import fft2 as pnfft2

import numpy as np
import time
import multiprocessing
from enlib import fft as fftfast

import sys

Nyorig = int(sys.argv[1])
Nxorig = int(sys.argv[2])
N = int(sys.argv[3])

Nxdown = fftfast.fft_len(Nxorig, direction="below")
Nydown = fftfast.fft_len(Nyorig, direction="below")
Nxup = fftfast.fft_len(Nxorig, direction="above")
Nyup = fftfast.fft_len(Nyorig, direction="above")

nthread_fft = multiprocessing.cpu_count()
print "Number of threads: ", nthread_fft

print "Starting benchmarks..."

i = 0
for Ny, Nx, label in [(Nyorig, Nxorig, "original"),
                      (Nydown, Nxdown, "smaller"), (Nyup, Nxup, "larger")]:

    print "==============================="
    print label, " length (", Ny, ",", Nx, ")"