Ejemplo n.º 1
0
def read_sdata(ifile):
    # Output thumb for this tod
    with h5py.File(ifile, "r") as hfile:
        sdata = [None for key in hfile]
        for key in hfile:
            ind = int(key)
            g = hfile[key]
            sdat = bunch.Bunch()
            # First parse the wcs
            hwcs = g["wcs"]
            header = astropy.io.fits.Header()
            for key in hwcs:
                header[key] = hwcs[key].value
            wcs = wcsutils.WCS(header).sub(2)
            # Then get the site
            sdat.site = bunch.Bunch(
                **{key: g["site/" + key].value
                   for key in g["site"]})
            # And the rest
            for key in [
                    "map", "div", "srcpos", "sid", "vel", "fknee", "alpha",
                    "id", "ctime", "dur", "el", "az", "off"
            ]:
                sdat[key] = g[key].value
            sdat.map = enmap.ndmap(fixorder(sdat.map), wcs)
            sdat.div = enmap.ndmap(fixorder(sdat.div), wcs)
            sdata[ind] = sdat
    return sdata
Ejemplo n.º 2
0
def calc_az_sweep(pattern, offset, site, pad=2.0, subsample=1.0):
    """Helper function. Given a pattern and mean focalplane offset,
	computes the shape of an azimuth sweep on the sky."""
    el1 = pattern[0] + offset[0]
    az1 = pattern[1] + offset[1] - pad
    az2 = pattern[2] + offset[1] + pad
    daz = rhs.pixshape()[0] / np.cos(el1) / subsample
    naz = int(np.ceil((az2 - az1) / daz))
    naz = fft.fft_len(naz, "above", [2, 3, 5, 7])
    # Simulate a single sweep at arbitrary time
    sweep_az = np.arange(naz) * daz + az1
    sweep_el = np.full(naz, el1)
    sweep_cel = coordinates.transform("hor",
                                      "cel",
                                      np.array([sweep_az, sweep_el]),
                                      time=ref_time,
                                      site=site)
    # Make ra safe
    sweep_cel = utils.unwind(sweep_cel)
    return bunch.Bunch(sweep_cel=sweep_cel,
                       sweep_hor=np.array([sweep_az, sweep_el]),
                       el=el1,
                       az1=az1,
                       az2=az2,
                       naz=naz,
                       daz=daz)
Ejemplo n.º 3
0
 def solve(self, maxiter=100, cg_tol=1e-7, verbose=False, dump_dir=None):
     if np.sum(self.highres_mask) == 0: return None
     solver = cg.CG(self.A, self.rhs.reshape(-1), M=self.M)
     for i in range(maxiter):
         t1 = time.time()
         solver.step()
         t2 = time.time()
         if verbose:
             print "%5d %15.7e %5.2f" % (solver.i, solver.err, t2 - t1)
         if dump_dir is not None and solver.i in [1, 2, 5, 10, 20, 50
                                                  ] + range(
                                                      100, 10000, 100):
             m = enmap.ndmap(solver.x.reshape(self.shape), self.wcs)
             enmap.write_map(dump_dir + "/step%04d.fits" % solver.i, m)
         if solver.err < cg_tol:
             if dump_dir is not None:
                 m = enmap.ndmap(solver.x.reshape(self.shape), self.wcs)
                 enmap.write_map(dump_dir + "/step_final.fits", m)
             break
     tot_map = self.highres_mask * solver.x.reshape(self.shape)
     tot_div = self.highres_mask * self.tot_div
     # Get rid of the fourier padding
     ny, nx = tot_map.shape[-2:]
     tot_map = tot_map[..., :ny - self.ffpad[0], :nx - self.ffpad[1]]
     tot_div = tot_div[..., :ny - self.ffpad[0], :nx - self.ffpad[1]]
     return bunch.Bunch(map=tot_map, div=tot_div)
Ejemplo n.º 4
0
 def eval(self, dpos):
     res = bunch.Bunch(chisq=0,
                       chisq0=0,
                       marg=0,
                       amps=[],
                       vamps=[],
                       poss=[],
                       models=[],
                       dpos=dpos)
     for lik, trf, s in zip(self.liks, self.trfs, self.sdata):
         dpos_cel = trf.foc2cel(dpos)
         sub = lik.eval(s.srcpos + dpos_cel)
         res.chisq += sub.chisq
         res.chisq0 += sub.chisq0
         res.marg += sub.marg
         res.amps.append(sub.amp)
         res.vamps.append(sub.vamp)
         res.poss.append(sub.pos)
         res.models.append(sub.model)
     res.amps = np.array(res.amps)
     res.npix = self.npix
     res.vamps = np.array(res.vamps)
     # Add prior
     rrel = np.sum(dpos**2)**0.5 / self.rmax
     if rrel > 1:
         penalty = (20 * (rrel - 1))**2
         res.chisq += penalty
         res.marg += penalty
     return res
Ejemplo n.º 5
0
def scan_ceslike(nsamp, box, sys="equ", srate=100, azrate=0.123):
	t = np.arange(nsamp,dtype=float)/srate
	boresight = np.zeros([nsamp,3])
	boresight[:,0] = t
	boresight[:,1] = box[0,1]+(box[1,1]-box[0,1])*(1+np.cos(2*np.pi*t*azrate))/2
	boresight[:,2] = box[0,0]+(box[1,0]-box[0,0])*np.arange(nsamp)/nsamp
	return bunch.Bunch(boresight=boresight, sys=sys,mjd0=55500,site=scan.default_site)
Ejemplo n.º 6
0
def get_coadded_tile(mapinfo, box, obeam=None, ncomp=1, dump_dir=None, verbose=False):
	if not overlaps_any(box, boxes): return None
	mapset = mapinfo.read(box, pad=pad, dtype=dtype, verbose=verbose, ncomp=ncomp)
	if mapset is None: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.sanitize_maps(mapset)
	jointmap.build_noise_model(mapset)
	if len(mapset.datasets) == 0: return None
	if all([d.insufficient for d in mapset.datasets]): return None
	jointmap.setup_beams(mapset)
	jointmap.setup_target_beam(mapset, obeam)
	jointmap.setup_filter(mapset, mode=args.filter_mode)
	jointmap.setup_background_spectrum(mapset)
	mask    = jointmap.get_mask_insufficient(mapset)
	if args.wiener: coadder = jointmap.Wiener(mapset)
	else:           coadder = jointmap.Coadder(mapset)
	rhs     = coadder.calc_rhs()
	if dump_dir:
		enmap.write_map(dump_dir + "/rhs.fits", rhs)
		enmap.write_map(dump_dir + "/ps_rhs.fits", np.abs(enmap.fft(rhs.preflat[0]))**2)
	map     = coadder.calc_map(rhs, dump_dir=dump_dir, verbose=verbose, cg_tol=args.cg_tol)#, maxiter=1)
	if dump_dir:
		enmap.write_map(dump_dir + "/ps_map.fits", np.abs(enmap.fft(mapdiag(map)))**2)
	div     = coadder.tot_div
	#C       = 1/mapset.datasets[0].iN
	res = bunch.Bunch(rhs=rhs*mask, map=map*mask, div=div*mask)#, C=C)
	#res = bunch.Bunch(rhs=rhs, map=map, div=div)#, C=C)
	return res
Ejemplo n.º 7
0
def merge_tod_stats(statlist):
    keys = np.unique(np.concatenate([list(stat.keys()) for stat in statlist]))
    res = bunch.Bunch()
    for key in keys:
        res[key] = np.array([(stat[key] if key in stat else False)
                             for stat in statlist])
    return res
Ejemplo n.º 8
0
	def __init__(self, data, srcpos, srcamp, perdet=False, ncomp=1, thumbs=False, N=None, method="fixamp", filter=None):
		# Set up fiducial source model. These source parameters
		# are not the same as those we will be optimizing.
		with bench.show("PmatTot"):
			self.P = PmatTot(data, srcpos, perdet=perdet)
		with bench.show("NmatTot"):
			self.N = N if N else NmatTot(data, filter=filter)
		self.tod  = data.tod # might only need the one below
		with bench.show("Nmat apply"):
			self.Nd   = self.N.apply(self.tod.copy())
		self.i    = 0
		# Initial values. We have room for 3 stokes parameters, but for now the input
		# amplitudes are always T-only
		self.amp0   = np.zeros(self.P.params.shape[:-1]+(ncomp,))
		self.amp0[...,0] = srcamp[:,None,None]
		self.off0   = self.P.off0
		self.chisq0 = None
		# These are for internal mapmaking
		self.thumb_mapper = None
		if thumbs:
			with bench.show("ThumbMapper"):
				self.thumb_mapper = ThumbMapper(data, srcpos, self.P.pcut, self.N, perdet=perdet)
		self.amp_unit, self.off_unit = 1e3, utils.arcmin
		# Save samples from the wrapper, so we can use them to estimate uncertainty
		self.samples = bunch.Bunch(offs=[], amps=[], aicovs=[], chisqs=[])
		self.method  = method
		self.ncomp   = ncomp
Ejemplo n.º 9
0
    def read(cls,
             datasets,
             box,
             pad=0,
             verbose=False,
             cache_dir=None,
             dtype=None,
             div_unhit=1e-7,
             read_cache=False,
             ncomp=None,
             *args,
             **kwargs):
        odatasets = []
        for dataset in datasets:
            dataset = dataset.copy()
            pbox = calc_pbox(dataset.shape, dataset.wcs, box)
            #pbox = np.round(enmap.sky2pix(dataset.shape, dataset.wcs, box.T).T).astype(int)
            pbox[0] -= pad
            pbox[1] += pad
            psize = pbox[1] - pbox[0]
            ffpad = np.array(
                [fft.fft_len(s, direction="above") - s for s in psize])
            pbox[1] += ffpad

            dataset.pbox = pbox
            osplits = []
            for split in dataset.splits:
                split = split.copy()
                if verbose: print "Reading %s" % split.map
                try:
                    map = read_map(split.map,
                                   pbox,
                                   name=os.path.basename(split.map),
                                   cache_dir=cache_dir,
                                   dtype=dtype,
                                   read_cache=read_cache)
                    div = read_map(split.div,
                                   pbox,
                                   name=os.path.basename(split.div),
                                   cache_dir=cache_dir,
                                   dtype=dtype,
                                   read_cache=read_cache).preflat[0]
                except IOError as e:
                    continue
                map *= dataset.gain
                div *= dataset.gain**-2
                div[~np.isfinite(div)] = 0
                map[~np.isfinite(map)] = 0
                div[div < div_unhit] = 0
                if np.all(div == 0): continue
                split.data = bunch.Bunch(map=map,
                                         div=div,
                                         empty=np.all(div == 0))
                osplits.append(split)
            if len(osplits) < 2: continue
            dataset.splits = osplits
            odatasets.append(dataset)
        if len(odatasets) == 0: return None
        return cls(odatasets, ffpad, ncomp=ncomp, *args, **kwargs)
Ejemplo n.º 10
0
Archivo: retile.py Proyecto: jit9/enlib
def read_tileset_geometry(ipathfmt, itile1=(None,None), itile2=(None,None)):
	itile1, itile2 = find_tile_range(ipathfmt, itile1, itile2)
	m1 = enmap.read_map(ipathfmt % {"y":itile1[0],"x":itile1[1]})
	m2 = enmap.read_map(ipathfmt % {"y":itile2[0]-1,"x":itile2[1]-1})
	wy,wx  = m1.shape[-2:]
	oshape = tuple(np.array(m1.shape[-2:])*(itile2-itile1-1) + np.array(m2.shape[-2:]))
	return bunch.Bunch(shape=m1.shape[:-2]+oshape, wcs=m1.wcs, dtype=m1.dtype,
			tshape=m1.shape[-2:])
Ejemplo n.º 11
0
def read_array_info(fname):
	"""Read the array info, which contains the id, row, column, frequency,
	wafer, pairing, etc. info."""
	info = astropy.io.fits.open(fname)[1].data
	nrow = np.max(info.row)+1
	ncol = np.max(info.col)+1
	ndet = len(info)
	return bunch.Bunch(info=info, ndet=ndet, nrow=nrow, ncol=ncol)
Ejemplo n.º 12
0
def get_coadded_tile(mapinfo,
                     box,
                     obeam=None,
                     ncomp=1,
                     dump_dir=None,
                     verbose=False):
    if not overlaps_any(np.sort(box, 0), boxes): return None
    mapset = mapinfo.read(box,
                          pad=pad,
                          dtype=dtype,
                          verbose=verbose,
                          ncomp=ncomp)
    if mapset is None: return None
    if all([d.insufficient for d in mapset.datasets]): return None
    jointmap.sanitize_maps(mapset, detrend=args.detrend)
    jointmap.build_noise_model(mapset)
    if len(mapset.datasets) == 0: return None
    if all([d.insufficient for d in mapset.datasets]): return None
    jointmap.setup_beams(mapset)
    jointmap.setup_target_beam(mapset, obeam)
    jointmap.setup_filter(mapset, mode=args.filter_mode)
    jointmap.setup_background_spectrum(mapset)
    mask = jointmap.get_mask_insufficient(mapset)
    if args.wiener: coadder = jointmap.Wiener(mapset)
    else: coadder = jointmap.Coadder(mapset)
    rhs = coadder.calc_rhs()
    if dump_dir:
        enmap.write_map(dump_dir + "/rhs.fits", rhs)
        enmap.write_map(dump_dir + "/ps_rhs.fits",
                        np.abs(enmap.fft(rhs.preflat[0]))**2)
        with open(dump_dir + "/names.txt", "w") as nfile:
            for name in coadder.names:
                nfile.write(name + "\n")
        ls, weights = coadder.calc_debug_weights()
        np.savetxt(
            dump_dir + "/weights_1d.txt",
            np.concatenate(
                [ls[None], weights.reshape(-1, weights.shape[-1])], 0).T,
            fmt="%15.7e")
        ls, noisespecs = coadder.calc_debug_noise()
        np.savetxt(
            dump_dir + "/noisespecs_1d.txt",
            np.concatenate(
                [ls[None],
                 noisespecs.reshape(-1, noisespecs.shape[-1])], 0).T,
            fmt="%15.7e")
    map = coadder.calc_map(rhs,
                           dump_dir=dump_dir,
                           verbose=verbose,
                           cg_tol=args.cg_tol)  #, maxiter=1)
    if dump_dir:
        enmap.write_map(dump_dir + "/ps_map.fits",
                        np.abs(enmap.fft(mapdiag(map)))**2)
    div = coadder.tot_div
    #C       = 1/mapset.datasets[0].iN
    res = bunch.Bunch(rhs=rhs * mask, map=map * mask, div=div * mask)  #, C=C)
    #res = bunch.Bunch(rhs=rhs, map=map, div=div)#, C=C)
    return res
Ejemplo n.º 13
0
 def query(self, id):
     globs, locs = {"id": id}, {}
     exec(self.vars_code, {}, globs)
     exec(self.db_code, globs, locs)
     globs.update(locs)
     locs = recursive_format(locs, globs)
     for key in globs["export"]:
         locs[key] = globs[key]
     return bunch.Bunch(locs)
Ejemplo n.º 14
0
Archivo: scan.py Proyecto: Nium14/enlib
	def __init__(self, fname):
		self.fname = fname
		with h5py.File(fname, "r") as hfile:
			for k in ["boresight","offsets","comps","sys","mjd0","dets"]:
				setattr(self, k, hfile[k].value)
			n = self.boresight.shape[0]
			neach = hfile["cut/neach"].value
			flat  = hfile["cut/flat"].value
			self.cut  = sampcut.Sampcut(flat, utils.cumsum(neach, endpoint=True), n)
			self.cut_noiseest = self.cut.copy()
			self.noise= nmat.read_nmat(hfile, "noise")
			self.site = bunch.Bunch({k:hfile["site/"+k].value for k in hfile["site"]})
			self.subdets = np.arange(self.ndet)
			self.hwp = np.zeros(n)
			self.hwp_phase = np.zeros([n,2])
			self.sampslices = []
			self.id = os.path.basename(fname)
			self.entry = bunch.Bunch(id=self.id)
Ejemplo n.º 15
0
def read_site(fname):
	"""Given a filename or file, parse a file with key = value information and return
	it as a Bunch."""
	res = bunch.Bunch()
	for line in utils.lines(fname):
		line = line.strip()
		if len(line) == 0 or line.startswith("#"): continue
		a = ast.parse(line)
		id = a.body[0].targets[0].id
		res[id] = ast.literal_eval(a.body[0].value)
	return res
Ejemplo n.º 16
0
def dets_scattered(nmul, nper=3, rad=0.5*np.pi/180):
	ndet = nmul*nper
	offsets = np.repeat(np.random.uniform(size=[nmul,3])*rad, nper,0)
	offsets[:,0] = 0
	# T,Q,U sensitivity
	angles = np.arange(ndet)*np.pi/nmul
	comps     = np.zeros([ndet,4])
	comps[:,0] = 1
	comps[:,1] = np.cos(2*angles)
	comps[:,2] = np.sin(2*angles)
	return bunch.Bunch(comps=comps, offsets=offsets)
Ejemplo n.º 17
0
def make_dummy_tile(shape, wcs, box, pad=0):
    pbox = calc_pbox(shape, wcs, box)
    if pad:
        pbox[0] -= pad
        pbox[1] += pad
    shape2, wcs2 = enmap.slice_wcs(
        shape, wcs,
        (slice(pbox[0, 0], pbox[1, 0]), slice(pbox[0, 1], pbox[1, 1])))
    shape2 = tuple(pbox[1] - pbox[0])
    map = enmap.zeros(shape2, wcs2, dtype)
    div = enmap.zeros(shape2[-2:], wcs2, dtype)
    return bunch.Bunch(map=map, div=div)
Ejemplo n.º 18
0
 def eval(self, pos):
     res = bunch.Bunch()
     res.profile = self.calc_profile(pos)
     res.amp, res.vamp = self.calc_amp(res.profile)
     res.model = res.profile * res.amp
     res.resid = self.map - res.model
     res.chisq = np.sum(res.resid**2 * self.div)
     res.chisq0 = self.chisq0
     res.npix = self.npix
     res.marg = -res.amp**2 / res.vamp - np.sum(np.log(res.vamp))
     res.pos = pos
     return res
Ejemplo n.º 19
0
    def summarize(self, chain):
        """Given a chain of lpars, compute a summary in the
		same format as returned by SrcFitterML."""
        dposs = np.array([c.dpos for c in chain])
        poss_cel = np.array([c.poss for c in chain])
        poss_hor = poss_cel * 0
        for i in range(len(self.sdata)):
            poss_hor[:, i] = coordinates.transform("cel",
                                                   "hor",
                                                   poss_cel[:, i].T,
                                                   time=utils.ctime2mjd(
                                                       self.sdata[i].ctime),
                                                   site=self.sdata[i].site).T
        ampss = np.array([c.amps for c in chain])
        vampss = np.array([c.vamps for c in chain])
        chisq0 = chain[0].chisq0
        chisq = np.mean([c.chisq for c in chain])
        # Compute means
        dpos = np.mean(dposs, 0)
        pos_cel = np.mean(poss_cel, 0)
        pos_hor = np.mean(poss_hor, 0)
        amps = np.mean(ampss, 0)
        # And uncertainties
        dpos_cov = np.cov(dposs.T)
        ddpos = np.diag(dpos_cov)**0.5
        pcorr = dpos_cov[0, 1] / ddpos[0] / ddpos[1]
        # mean([da0_i + da_ij]**2). Uncorrelated, so this is
        # mean(da0**2) + mean(da**2)
        arel = ampss + np.random.standard_normal(ampss.shape) * vampss**0.5
        amps = np.mean(arel, 0)
        vamps = np.var(arel, 0)
        #vamps   = np.var(ampss,0)+np.mean(vampss,0)
        damps = vamps**0.5
        models = self.lik.eval(dpos).models
        nsigma = (chisq0 - chisq)**0.5
        # We want how much to offset detector by, not how much to offset
        # source by
        res = bunch.Bunch(dpos=dpos,
                          poss_cel=pos_cel,
                          poss_hor=pos_hor,
                          ddpos=ddpos,
                          amps=amps,
                          damps=damps,
                          pcorr=pcorr,
                          nsrc=len(self.sdata),
                          models=models,
                          nsigma=nsigma,
                          chisq0=chisq0,
                          chisq=chisq,
                          npix=self.lik.npix)
        return res
Ejemplo n.º 20
0
 def load(self, data, funcs={}):
     self.rules = []
     self.static = bunch.Bunch()
     for line in data.splitlines():
         line = line.strip()
         if len(line) < 1 or line[0] == "#": continue
         # Ignore part after hash
         line = line.split("#")[0]
         # Split into part before first : and the rest
         toks = pre_split(line)
         if len(toks) == 1: toks = toks + [""]
         # There may be multiple formats on the same line, pipe-separated
         name, format = toks[0], toks[1:]
         self.rules.append({"name": name, "format": format})
         self.static[name] = format
Ejemplo n.º 21
0
def read_thumb_data(fname):
	res    = bunch.Bunch()
	hdus   = astropy.io.fits.open(fname)
	header = hdus[0].header
	with warnings.catch_warnings():
		wcs = wcsutils.WCS(header).sub(2)
	res.rhs, res.div, res.corr = enmap.fix_endian(enmap.ndmap(hdus[0].data, wcs))
	res.srcinfo = hdus[1].data
	res.detinfo = hdus[2].data
	res.id  = header["id"]
	res.off = np.array([float(header["off_x"]),float(header["off_y"])])*utils.arcmin
	for key in ["bore_az1","bore_az2","bore_el"]:
		res[key] = float(header[key])*utils.degree
	res.ctime = float(header["ctime"])
	return res
Ejemplo n.º 22
0
def build_rangedata(tod, rcut, d, ivar):
    nmax = np.max(rcut.ranges[:, 1] - rcut.ranges[:, 0])
    nrange = rcut.nrange

    rdata = bunch.Bunch()
    rdata.detmap = np.zeros(nrange, int)
    rdata.tod = np.zeros([nrange, nmax], dtype)
    rdata.pos = np.zeros([nrange, nmax, 2])
    rdata.ivar = np.zeros([nrange, nmax], dtype)
    # Build our detector mapping
    for di in range(rcut.ndet):
        rdata.detmap[rcut.detmap[di]:rcut.detmap[di + 1]] = di
    rdata.n = rcut.ranges[:, 1] - rcut.ranges[:, 0]
    # Extract our tod samples and coordinates
    for i, r in enumerate(rcut.ranges):
        di = rdata.detmap[i]
        rn = r[1] - r[0]
        rdata.tod[i, :rn] = tod[di, r[0]:r[1]]
        bore = d.boresight[:, r[0]:r[1]]
        mjd = utils.ctime2mjd(bore[0])
        pos_hor = bore[1:] + d.point_offset[di, :, None]
        pos_rel = coordinates.transform(tod_sys,
                                        sys,
                                        pos_hor,
                                        time=mjd,
                                        site=d.site)
        rdata.pos[i, :rn] = pos_rel.T
        # Expand noise ivar too, including the effect of our normal data cut
        rdata.ivar[
            i, :rn] = ivar[di] * (1 - d.cut[di:di + 1, r[0]:r[1]].to_mask()[0])

    # Precompute our fourier space units
    rdata.freqs = fft.rfftfreq(nmax, 1 / d.srate)
    # And precompute out butterworth filter
    rdata.butter = filters.mce_filter(rdata.freqs, d.mce_fsamp, d.mce_params)
    # Store the fiducial time constants for reference
    rdata.tau = d.tau
    # These are also nice to have
    rdata.dsens = ivar**-0.5 / d.srate**0.5
    rdata.asens = np.sum(ivar)**-0.5 / d.srate**0.5
    rdata.srate = d.srate
    rdata.dets = d.dets
    rdata.beam = d.beam
    rdata.id = d.entry.id
    return rdata
Ejemplo n.º 23
0
def read_layout(fname):
	"""Read the detector layout, returning a Bunch of with
	ndet, nrow, ncol, rows, cols, darksquid, pcb."""
	rows, cols, dark, pcb = [], [], [], []
	with open(fname,"r") as f:
		for line in f:
			if line.startswith("#"): continue
			toks = line.split()
			r, c, d, p = int(toks[1]), int(toks[2]), int(toks[3])>0, toks[4]
			rows.append(r)
			cols.append(c)
			dark.append(d)
			pcb.append(p)
	rows = np.array(rows)
	cols = np.array(cols)
	dark = np.array(dark)
	pcb  = np.array(pcb)
	return bunch.Bunch(rows=rows, cols=cols, dark=dark, pcb=pcb, nrow=np.max(rows)+1, ncol=np.max(cols)+1, ndet=len(rows))
Ejemplo n.º 24
0
def rand_srcs(box, nsrc, amp, fwhm, rand_fwhm=False):
	pos  = np.array([np.random.uniform(box[0,1],box[1,1],nsrc),np.random.uniform(box[0,0],box[1,0],nsrc)]).T
	amps = np.random.exponential(scale=amp, size=nsrc)
	amps *= 2*np.random.randint(low=0,high=2,size=nsrc)-1 # both sign sources for fun
	pos_angs = np.random.uniform(0, np.pi, nsrc)
	pos_fracs = np.random.uniform(0, 1, nsrc)
	pos_comps = np.zeros([nsrc,4])
	pos_comps[:,0] = 1
	pos_comps[:,1] = np.cos(2*pos_angs)*pos_fracs
	pos_comps[:,2] = np.sin(2*pos_angs)*pos_fracs
	amps = amps[:,None]*pos_comps
	if rand_fwhm:
		skew = 2
		ofwhm = np.random.exponential(scale=fwhm**(1.0/skew), size=nsrc)**skew
		amps *= ((fwhm/ofwhm)**1)[:,None]
	else:
		ofwhm = np.zeros([nsrc]) + fwhm
	return bunch.Bunch(pos=pos,amps=amps,beam=ofwhm/(8*np.log(2))**0.5)
Ejemplo n.º 25
0
	def build_stats(self):
		nburn = int(self.nsamp*self.burn_frac)
		for i in range(nburn):
			self.draw_sample()
		poss  = np.zeros([self.nsamp, self.ndim])
		amps  = np.zeros([self.nsamp, self.nsrc])
		adivs = np.zeros([self.nsamp, self.nsrc])
		for i in range(self.nsamp):
			poss[i], amps[i], adivs[i] = self.draw_sample()
		pos_mean = np.mean(poss,0)
		amp_mean = np.mean(amps,0)
		pos_dev  = np.std(poss,0)
		# Because we're not fully sampling the amplitude, we can't just do np.std(amps,0)
		# here. We need to take into account adiv too. Conceptually we should
		# sample from N(amp,1/adiv) for each amp,adiv, and calculate the stats based
		# on all those. The mean of that will be mean(amp), but the variance will
		# be var(amp) + mean(vars).
		amp_dev  = (np.var(amps,0) + np.mean(1/adivs,0))**0.5
		return bunch.Bunch(pos=pos_mean, dpos=pos_dev, amp=amp_mean, damp=amp_dev)
Ejemplo n.º 26
0
def scan_grid(box, res, sys="equ", dir=0, margin=0):
	box[np.argmin(box,0)] += margin
	box[np.argmax(box,0)] -= margin
	n = np.round(np.asarray(box[1]-box[0])/res).astype(int)
	dec = np.linspace(box[0,0],box[1,0],n[0],endpoint=False) + res/2
	ra  = np.linspace(box[0,1],box[1,1],n[1],endpoint=False) + res/2
	if dir % 2 == 0:
		decra = np.empty([2,dec.size,ra.size])
		decra[0] = dec[:,None]
		decra[1] = ra [None,:]
	else:
		decra = np.empty([2,ra.size,dec.size])
		decra[0] = dec[None,:]
		decra[1] = ra [:,None]
	decra = decra.reshape(2,-1)
	t = np.arange(decra.shape[1])*1e3/decra.shape[1]
	boresight = np.empty([t.size,3])
	boresight[:,0] = t
	boresight[:,1:] = decra.T
	return bunch.Bunch(boresight=boresight, sys=sys, mjd0=55500,site=scan.default_site)
Ejemplo n.º 27
0
Archivo: scan.py Proyecto: Nium14/enlib
def build_hwp_sample_mapping(hwp, quantile=0.1):
	"""Given a HWP angle, return an array with shape [nout] containing
	the original sample index (float) corresponding to each sample in the
	remapped array, along with the resulting hwp sample rate.
	The remapping also truncates the end to ensure that
	there is an integer number of HWP rotations in the data."""
	# Make sure there are no angle wraps in the hwp
	hwp = utils.unwind(hwp)
	# interp requires hwp to be monotonically increasing. In practice
	# it could be monotonically decreasing too, but our final result
	# does not depend on the direction of rotation, so we simply flip it here
	# if necessary
	hwp = np.abs(hwp)
	# Find the target hwp speed
	speed = np.percentile(hwp[1:]-hwp[:-1], 100*quantile)
	# We want a whole number of samples per revolution, and
	# a whole number of revolutions in the whole tod
	a    = hwp - hwp[0]
	nrev = int(np.floor(a[-1]/(2*np.pi)))
	nper = utils.nint(2*np.pi/speed)
	# Make each of these numbers fourier-friendly
	nrev = fft.fft_len(nrev, "below")
	nper = fft.fft_len(nper, "above")
	# Set up our output samples
	speed = 2*np.pi/nper
	nout  = nrev*nper
	ohwp  = hwp[0] + np.arange(nout)*speed
	# Find the input sample for each output sample
	res = bunch.Bunch()
	res.oimap = np.interp(ohwp, hwp, np.arange(len(hwp)))
	# Find the output sampe for each input sample too. Because of
	# cropping, the last of these will be invalid
	res.iomap = np.interp(np.arange(len(hwp)), res.oimap, np.arange(len(res.oimap)))
	# Find the average sampling rate change fsamp_rel = fsamp_out/fsamp_in
	res.fsamp_rel = 1/np.mean(res.oimap[1:]-res.oimap[:-1])
	res.insamp = len(hwp)
	res.onsamp = nout
	res.nrev   = nrev
	res.nper   = nper
	return res
Ejemplo n.º 28
0
 def query(self, id, multi=True):
     # Split the id argument into the actual id plus a tag list
     toks = id.split(":")
     id = toks[0]
     tag = toks[1] if len(toks) > 1 else None
     info = {name: fun(id) for name, fun in self.funcs}
     res = bunch.Bunch()
     selected = [True]
     for rule in self.rules:
         name, format = rule["name"], rule["format"]
         if name[0] == "@":
             # In this case, format is actually the conditional in the selector
             if name == "@end":
                 selected.pop()
             elif name == "@else":
                 selected[-1] = not selected[-1]
             else:
                 # General format @var:case case case ...
                 match = False
                 for case in format:
                     match |= ("{%s}" % name[1:]).format(**info) == case
                 selected.append(match)
                 #selected.append(("{%s}"%name[1:]).format(**info) == format[0])
         elif len(format) == 0 or len(format[0]) == 0:
             # Handle variable assignment. Avoids repeating the same long paths over and over again
             vname, vval = re.split(r"\s*=\s*", name)
             info[vname] = vval
         elif all(selected):
             tmp = [fmt.format(**info) for fmt in rule["format"]]
             res[rule["name"]] = tmp if multi else tmp[0]
     res.id = id
     res.tag = tag
     # Apply override if specified:
     if self.override and self.override != "none":
         for tok in self.override.split(","):
             name, val = tok.split(":")
             val = val.format(**info)
             res[name] = [val] if multi else val
     return res
Ejemplo n.º 29
0
    def calc_chisqs_amps_cached(self, poss, taus):
        """Calc the total chisquare for all detectos, but reuse contributions
		from unchanged detectors."""
        if self.cache is None:
            chisqs, amps, adiv = self.calc_chisqs_amps(poss, taus)
            self.cache = bunch.Bunch(poss=poss.copy(),
                                     taus=taus.copy(),
                                     chisqs=chisqs,
                                     amps=amps,
                                     adiv=adiv)
        else:
            # Find the set of changed detectors
            changed = np.any(poss != self.cache.poss,
                             1) | (taus != self.cache.taus)
            dets = np.where(changed)[0]
            if len(dets) > 0:
                #print "cached call"
                #print " with dets", dets
                #print " with poss", poss[dets]
                #print " with taus", taus[dets]
                chisqs, amps, adiv = self.calc_chisqs_amps(
                    poss[dets], taus[dets], dets)
                self.cache.chisqs[dets] = chisqs
                self.cache.amps[dets] = amps
                self.cache.adiv[dets] = adiv
                self.cache.poss[dets] = poss[dets]
                self.cache.taus[dets] = taus[dets]
            #print "raw call"
            #print " with poss", poss
            #print " with taus", taus
            #chisqs2, amps2, adiv2 = self.calc_chisqs_amps(poss, taus)
            ## Find actual changes
            #changed2 = chisqs2 != self.cache.chisqs
            #print np.sum(self.cache.chisqs), np.sum(chisqs2)
            #print "A", np.where(changed), np.where(changed2)
            #print "B", self.cache.chisqs[changed], chisqs2[changed2]
        return self.cache.chisqs.copy(), self.cache.amps.copy(
        ), self.cache.adiv.copy()
Ejemplo n.º 30
0
args = parser.parse_args()

if args.seed: np.random.seed(args.seed)

comm = mpi4py.MPI.COMM_WORLD
ncomp = args.ncomp
dtype = np.float64
d2r = np.pi / 180
m2r = np.pi / 180 / 60
b2r = np.pi / 180 / 60 / (8 * np.log(2))**0.5

try:
    # Beam in format [r,val], where r is equispaced starting at 0, in units of degrees
    # and val has a max value of 1
    b = np.loadtxt(args.beam)
    beam = bunch.Bunch(profile=b[:, 1], rmax=b[1, 0] * len(b) * utils.degree)
except IOError:
    # Assume beam is gaussian
    b = float(args.beam) * utils.arcmin * utils.fwhm
    r = np.linspace(0, 10, 1000) * b
    beam = bunch.Bunch(profile=np.exp(-0.5 * (r / b)**2), rmax=10 * b)
    beam_global = b
if not args.oldformat:
    beam_global = 1.0
    print "Using new model"
else:
    print "Using old model"
# prior on beam deformations
beam_rel_min = 0.5
beam_rel_max = 2.0
beam_ratio_max = 3.0