示例#1
0
def classify_scanning_patterns(myscans, tol=0.5*utils.degree, comm=None):
	"""Classify scans into scanning patterns based on [az,el] bounds.
	Returns patterns[:,{ftom,to},{el,az}] and pids[len(myscans)], where
	pids contains the index of each myscan into patterns."""
	boxes = get_scan_bounds(myscans)
	rank  = np.full(len(boxes),comm.rank)
	if comm is not None:
		boxes = utils.allgatherv(boxes, comm)
		rank  = utils.allgatherv(rank,  comm)
	pids = utils.label_unique(boxes, axes=(1,2), atol=tol)
	npattern = np.max(pids)+1
	# For each scanning pattern, define a bounding box
	pboxes = np.array([utils.bounding_box(boxes[pids==pid]) for pid in xrange(npattern)])
	# Get the ids for the scans that we have
	if comm is not None:
		pids = pids[rank==comm.rank]
	return pboxes, pids
示例#2
0
文件: tod2map2.py 项目: amaurea/tenki
def build_noise_stats(myscans, comm):
	ids    = utils.allgatherv([scan.id    for scan in myscans], comm)
	ndets  = utils.allgatherv([scan.ndet  for scan in myscans], comm)
	srates = utils.allgatherv([scan.srate for scan in myscans], comm)
	gdets  = utils.allgatherv(safe_concat([scan.dets       for scan in myscans],int),   comm)
	ivars  = utils.allgatherv(safe_concat([scan.noise.ivar for scan in myscans],float), comm)
	offs   = utils.cumsum(ndets, endpoint=True)
	res    = []
	for i, id in enumerate(ids):
		o1, o2 = offs[i], offs[i+1]
		dsens = (ivars[o1:o2]*srates[i])**-0.5
		asens = (np.sum(ivars[o1:o2])*srates[i])**-0.5
		dets  = gdets[o1:o2]
		# We want sorted dets
		inds  = np.argsort(dets)
		dets, dsens = dets[inds], dsens[inds]
		line = {"id": id, "asens": asens, "dsens": dsens, "dets": dets}
		res.append(line)
	inds = np.argsort(ids)
	res = [res[ind] for ind in inds]
	return res
示例#3
0
def build_noise_stats(myscans, comm):
	ids    = utils.allgatherv([scan.id    for scan in myscans], comm)
	ndets  = utils.allgatherv([scan.ndet  for scan in myscans], comm)
	srates = utils.allgatherv([scan.srate for scan in myscans], comm)
	gdets  = utils.allgatherv(safe_concat([scan.dets       for scan in myscans],int),   comm)
	ivars  = utils.allgatherv(safe_concat([scan.noise.ivar for scan in myscans],float), comm)
	offs   = utils.cumsum(ndets, endpoint=True)
	res    = []
	for i, id in enumerate(ids):
		o1, o2 = offs[i], offs[i+1]
		dsens = (ivars[o1:o2]*srates[i])**-0.5
		asens = (np.sum(ivars[o1:o2])*srates[i])**-0.5
		dets  = gdets[o1:o2]
		# We want sorted dets
		inds  = np.argsort(dets)
		dets, dsens = dets[inds], dsens[inds]
		line = {"id": id, "asens": asens, "dsens": dsens, "dets": dets}
		res.append(line)
	inds = np.argsort(ids)
	res = [res[ind] for ind in inds]
	return res
示例#4
0
for ind in range(comm.rank, len(ids), comm.size):
    id = ids[ind]
    entry = file_db[id]
    try:
        stats.append(todinfo.build_tod_stats(entry))
    except (errors.DataMissing, AttributeError) as e:
        print "Skipping %s (%s)" % (id, e.message)
        continue
    print "%3d %4d/%d %5.1f%% %s" % (comm.rank, ind + 1, len(ids),
                                     (ind + 1) / float(len(ids)) * 100, id)
stats = todinfo.merge_tod_stats(stats)

if comm.rank == 0: print "Reducing"
comm.Barrier()
for key in stats:
    stats[key] = utils.allgatherv(stats[key], comm)

# Sort by id and move id index last
inds = np.argsort(stats["id"])
for key in stats:
    stats[key] = utils.moveaxis(stats[key][inds], 0, -1)
stat_db = todinfo.Todinfo(stats)
# Merge with original tags. Rightmost overrides for overridable fields.
# For tags, we get the union. This means that stat_db can't override incorrect
# tags in scan_db, just add to them.
stat_db = scan_db + stat_db

if comm.rank == 0:
    print "Writing"
    stat_db.write(args.ofile)
    print "Done"
示例#5
0
    def __init__(self,
                 shape,
                 wcs=None,
                 bbpix=None,
                 tshape=None,
                 dtype=None,
                 comm=None,
                 bbox=None,
                 pre=None):
        """DGeometry(shape, wcs, bbpix, tshape) or
		DGeometry(dgeometry)

		Construct a DMap geometry. When constructed explicitly from shape, wcs, etc.,
		it is a relatively expensive operation, as MPI communication is necessary.
		Constructing one based on an existing DGeometry is fast.

		bbox indicates the bounds [{from,to},{lat,lon}] of the area of the map of
		interest to each mpi task, and will in general be different for each task.
		bbpix is the same as bbox, but expressed in units of pixels.
		It is allowed to pass a list of bounding boxes, in which case each mpi task
		will have multiple work spaces.

		tshape specifies the tiling scheme to use. The global geometry will be
		split into tiles of tshape dimensions (except at the edges, as no tile
		will extend beyond the edge of the full map), and tiles will be stored
		in a distributed fashion between mpi tasks based on the degree of overlap
		with their workspaces."""
        try:
            # We are pretty lightweight, so copy everything properly. This is
            # less error prone than having changes in one object suddenly affect
            # another. comm must *not* be deepcopied, as that breaks it.
            for key in ["shape", "tshape", "comm", "dtype"]:
                setattr(self, key, getattr(shape, key))
            for key in [
                    "wcs", "tile_pos", "tile_boxes", "tile_geometry",
                    "work_geometry", "tile_ownership", "tile_glob2loc",
                    "tile_loc2glob", "tile_bufinfo", "work_bufinfo"
            ]:
                setattr(self, key, copy.deepcopy(getattr(shape, key)))
        except AttributeError:
            # 1. Set up basic properties
            assert shape is not None
            if wcs is None:
                _, wcs = enmap.geometry(pos=np.array([[-1, -1], [1, 1]]) * 5 *
                                        np.pi / 180,
                                        shape=shape[-2:])
            if comm is None: comm = mpi.COMM_WORLD
            if tshape is None: tshape = (720, 720)
            if dtype is None: dtype = np.float64
            if bbpix is None:
                if bbox is None: bbpix = [[0, 0], shape[-2:]]
                else: bbpix = box2pix(shape, wcs, bbox)
            nphi = int(np.round(np.abs(360 / wcs.wcs.cdelt[0])))
            # Reorder from/to if necessary, and expand to [:,2,2]
            bbpix = sanitize_pixbox(bbpix, shape)
            dtype = np.dtype(dtype)
            self.shape, self.wcs, self.bbpix = tuple(
                shape), wcs.deepcopy(), np.array(bbpix)
            self.tshape, self.dtype, self.comm = tuple(tshape), dtype, comm

            # 2. Set up workspace descriptions
            work_geometry = [
                enmap.slice_geometry(
                    shape,
                    wcs, (slice(b[0, 0], b[1, 0]), slice(b[0, 1], b[1, 1])),
                    nowrap=True) for b in bbpix
            ]
            # 3. Define global workspace ownership
            nwork = utils.allgather([len(bbpix)], comm)
            wown = np.concatenate(
                [np.full(n, i, dtype=int) for i, n in enumerate(nwork)])
            # 3. Define tiling. Each tile has shape tshape, starting from the (0,0) corner
            #    of the full map. Tiles at the edge are clipped, as pixels beyond the edge
            #    of the full map may have undefined wcs positions.
            tbox = build_tiles(shape, tshape)
            bshape = tbox.shape[:2]
            tbox = tbox.reshape(-1, 2, 2)
            ntile = len(tbox)
            tile_indices = np.array(
                [np.arange(ntile) / bshape[1],
                 np.arange(ntile) % bshape[1]]).T
            # 4. Define tile ownership.
            # a) For each task compute the overlap of each tile with its workspaces, and
            #    concatenate across tasks to form a [nworktot,ntile] array.
            wslices = select_nonempty(  # slices into work
                utils.allgatherv(utils.box_slice(bbpix, tbox), comm,
                                 axis=0),  # normal
                utils.allgatherv(utils.box_slice(bbpix + [0, nphi], tbox),
                                 comm,
                                 axis=0))  # wrapped
            tslices = select_nonempty(  # slices into tiles
                utils.allgatherv(utils.box_slice(tbox, bbpix), comm,
                                 axis=1),  # normal
                utils.allgatherv(utils.box_slice(tbox, bbpix + [0, nphi]),
                                 comm,
                                 axis=1))  # wrapped
            # b) Compute the total overlap each mpi task has with each tile, and use this
            # to decide who should get which tiles
            overlaps = utils.box_area(wslices)
            overlaps = utils.sum_by_id(overlaps, wown, 0)
            town = assign_cols_round_robin(overlaps)
            # Map tile indices from local to global and back
            tgmap = [[] for i in range(comm.size)]
            tlmap = np.zeros(ntile, dtype=int)
            for ti, id in enumerate(town):
                tlmap[ti] = len(tgmap[id])  # glob 2 loc
                tgmap[id].append(ti)  # loc  2 glob
            # 5. Define tiles
            tile_geometry = [
                enmap.slice_geometry(
                    shape, wcs,
                    (slice(tb[0, 0], tb[1, 0]), slice(tb[0, 1], tb[1, 1])))
                for tb in tbox
            ]
            # 6. Define mapping between work<->wbuf and tiles<->tbuf
            wbufinfo = np.zeros([2, comm.size], dtype=int)
            tbufinfo = np.zeros([2, comm.size], dtype=int)
            winfo, tinfo = [], []
            woff, toff = 0, 0
            prelen = np.product(shape[:-2])
            for id in xrange(comm.size):
                ## Buffer info to send to alltoallv
                wbufinfo[1, id] = woff
                tbufinfo[1, id] = toff
                # Slices for transfering to and from w buffer. Loop over all of my
                # workspaces and determine the slices into them and how much we need
                # to send.
                for tloc, tglob in enumerate(np.where(town == id)[0]):
                    for wloc, wglob in enumerate(
                            np.where(wown == comm.rank)[0]):
                        ws = wslices[wglob, tglob]
                        wlen = utils.box_area(ws) * prelen
                        work_slice = (Ellipsis, slice(ws[0, 0], ws[1, 0]),
                                      slice(ws[0, 1], ws[1, 1]))
                        wbuf_slice = slice(woff, woff + wlen)
                        winfo.append((wloc, work_slice, wbuf_slice))
                        woff += wlen
                # Slices for transferring to and from t buffer. Loop over all
                # my tiles, and determine how much I have to receive from each
                # workspace of each task.
                for tloc, tglob in enumerate(np.where(town == comm.rank)[0]):
                    for wloc, wglob in enumerate(np.where(wown == id)[0]):
                        ts = tslices[tglob, wglob]
                        tlen = utils.box_area(ts) * prelen
                        tile_slice = (Ellipsis, slice(ts[0, 0], ts[1, 0]),
                                      slice(ts[0, 1], ts[1, 1]))
                        tbuf_slice = slice(toff, toff + tlen)
                        tinfo.append((tloc, tile_slice, tbuf_slice))
                        toff += tlen
                wbufinfo[0, id] = woff - wbufinfo[1, id]
                tbufinfo[0, id] = toff - tbufinfo[1, id]
            wbufinfo, tbufinfo = tuple(wbufinfo), tuple(tbufinfo)
            # TODO: store tbox? loc_geometry vs. tile_geometry, etc.
            # 7. Store
            # [ntile,2]: position of each (global) tile in grid
            self.tile_pos = tile_indices
            # [ntile,2,2]: pixel box for each (global) tile
            self.tile_boxes = tbox
            # [ntile,(shape,wcs)]: geometry of each (global) tile
            self.tile_geometry = tile_geometry
            # [nwork,(shape,wcs)]: geometry of each local workspace
            self.work_geometry = work_geometry
            # [ntile]: rank of owner of each tile
            self.tile_ownership = town
            # [ntile]: local index of each (global) tile
            self.tile_glob2loc = tlmap
            # [nloc]:  global index of each local tile
            self.tile_loc2glob = tgmap[comm.rank]
            # Communication buffers
            self.tile_bufinfo = Bufmap(tinfo, tbufinfo, toff)
            self.work_bufinfo = Bufmap(winfo, wbufinfo, woff)
示例#6
0
L = log.init(level=log_level, rank=comm.rank)
tshape = (720, 720)

# Read in all our scans
L.info("Reading %d scans" % len(ids))
myinds = np.arange(len(ids))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(ids,
                                       myinds,
                                       actscan.ACTScan,
                                       filedb.data,
                                       dets=args.dets,
                                       downsample=config.get("downsample"))
myinds = np.array(myinds, int)

# Collect scan info. This currently fails if any task has empty myinds
read_ids = [ids[ind] for ind in utils.allgatherv(myinds, comm)]
read_ntot = len(read_ids)
L.info("Found %d tods" % read_ntot)
if read_ntot == 0:
    L.info("Giving up")
    sys.exit(1)
# Prune fully autocut scans
mydets = [len(scan.dets) for scan in myscans]
myinds = [ind for ind, ndet in zip(myinds, mydets) if ndet > 0]
myscans = [scan for scan, ndet in zip(myscans, mydets) if ndet > 0]
L.info("Pruned %d fully autocut tods" %
       (read_ntot - comm.allreduce(len(myscans))))

# Try to get about the same amount of data for each mpi task.
# If we use distributed maps, we also try to make things as local as possible
mycosts = [s.nsamp * s.ndet for s in myscans]
示例#7
0
    el = np.mean(d.boresight[:, 2])
    az1, az2 = utils.minmax(d.boresight[:, 1])
    pat = np.array([el, az1, az2]) / utils.degree
    pat = tuple(np.round(pat / args.bsize) * args.bsize)
    ndet_array = d.layout.ndet
    # And record it
    if pat not in mypatids:
        mypatids[pat] = []
    mypatids[pat].append(id)
ndet_array = comm.allreduce(ndet_array, mpi.MAX)

# Get list of all patterns
L.info("Gathering pattern lists")
mypats = mypatids.keys()
if len(mypats) == 0: mypats = np.zeros([0, 3])
pats = utils.allgatherv(mypats, comm)
pats = list(set(sorted([tuple(pat) for pat in pats])))
# Collect ids for each pattern
patids = {}
for pat in pats:
    pids = mypatids[pat] if pat in mypatids else []
    pids = comm.allreduce(list(pids))
    patids[pat] = pids

if comm.rank == 0:
    L.info("Found %d patterns" % len(pats))
    for i, pat in enumerate(pats):
        L.debug("%2d: el %s az %s %s" % (i, pat[0], pat[1], pat[2]))

# Now process each pattern, one by one. This
# Means we only have to keep one in memory at a time.
示例#8
0
######## Filter parmeters ########
filter_params = setup_params("filter", ["scan", "sub"], {"use": "no"})

# Read in all our scans
L.info("Reading %d scans" % len(filelist))
myinds = np.arange(len(filelist))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(filelist,
                                       myinds,
                                       actscan.ACTScan,
                                       db,
                                       dets=args.dets,
                                       downsample=config.get("downsample"))

# Collect scan info. This currently fails if any task has empty myinds
read_ids = [filelist[ind] for ind in utils.allgatherv(myinds, comm)]
read_ntot = len(read_ids)
L.info("Found %d tods" % read_ntot)
if read_ntot == 0:
    L.info("Giving up")
    sys.exit(1)
read_ndets = utils.allgatherv([len(scan.dets) for scan in myscans], comm)
read_nsamp = utils.allgatherv(
    [scan.cut.size - scan.cut.sum() for scan in myscans], comm)
read_dets = utils.uncat(
    utils.allgatherv(np.concatenate([scan.dets for scan in myscans]), comm),
    read_ndets)
# Save accept list
if comm.rank == 0:
    with open(root + "accept.txt", "w") as f:
        for id, dets in zip(read_ids, read_dets):
示例#9
0
                                          mjd,
                                          site=scan.site)
    visible = np.any(object_pos[1] >= margin)
    if not visible:
        cut = rangelist.zeros((d.ndet, d.nsamp))
    else:
        pmap = pmat.PmatMap(scan, mask, sys="hor:%s" % args.objname)
        # Build a tod to project onto.
        tod = np.zeros((d.ndet, d.nsamp), dtype=dtype)
        # And project
        pmap.forward(tod, mask)
        # Any nonzero samples should be cut
        tod = np.rint(tod)
        cut = rangelist.Multirange([rangelist.Rangelist(t) for t in tod])
    print "%s %6.4f %d" % (id, float(cut.sum()) / cut.size, visible)
    mystats.append([ind, float(cut.sum()) / cut.size, visible])
    # Write cuts to output directory
    if args.persample:
        files.write_cut("%s/%s.cuts" % (args.odir, id),
                        d.dets,
                        cut,
                        nrow=d.array_info.nrow,
                        ncol=d.array_info.ncol)
mystats = np.array(mystats)
stats = utils.allgatherv(mystats, comm)
if comm.rank == 0:
    with open(args.odir + "/stats.txt", "w") as f:
        for stat in stats:
            f.write("%s %6.4f %d\n" %
                    (ids[int(stat[0])], stat[1], int(stat[2])))
示例#10
0
文件: tod2map2.py 项目: amaurea/tenki
	if sig["type"] in ["map", "dmap", "fmap", "fdmap"]:
		assert "value" in sig and sig["value"] is not None, "Map-type signals need a template map as argument. E.g. -S sky:foo.fits"

######## Filter parmeters ########
filter_params    = setup_params("filter", ["scan","sub"], {"use":"no"})
mapfilter_params = setup_params("mapfilter", [], {"use":"no"})

# Read in all our scans
L.info("Reading %d scans" % len(filelist))
myinds = np.arange(len(filelist))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(filelist, myinds, actscan.ACTScan,
		db, dets=args.dets, downsample=config.get("downsample"), hwp_resample=config.get("hwp_resample"))
myinds = np.array(myinds, int)

# Collect scan info. This currently fails if any task has empty myinds
read_ids  = [filelist[ind] for ind in utils.allgatherv(myinds, comm)]
read_ntot = len(read_ids)
L.info("Found %d tods" % read_ntot)
if read_ntot == 0:
	L.info("Giving up")
	sys.exit(1)
read_ndets= utils.allgatherv([len(scan.dets) for scan in myscans], comm)
read_nsamp= utils.allgatherv([scan.cut.size-scan.cut.sum() for scan in myscans], comm)
read_dets = utils.uncat(utils.allgatherv(
	np.concatenate([scan.dets for scan in myscans]) if len(myscans) > 0 else np.zeros(0,int)
	,comm), read_ndets)
ntod = np.sum(read_ndets>0)
ncut = np.sum(read_ndets==0)
# Save accept list
if comm.rank == 0:
	with open(root + "accept.txt", "w") as f:
示例#11
0
			cut = sampcut.from_mask(tod)
			del tod
		progress = 100.0*(ind-comm.rank*ntod//comm.size)/((comm.rank+1)*ntod//comm.size-comm.rank*ntod//comm.size)
		print("%3d %5.1f %s %6.4f %d  %8.3f %8.3f" % (comm.rank, progress, id, float(cut.sum())/cut.size, visible, memory.current()/1024.**3, memory.max()/1024.**3))
		mystats.append([ind, float(cut.sum())/cut.size, visible])
		# Add to my work file
		_, uids  = actdata.split_detname(d.dets)
		flags = flagrange.from_sampcut(cut, dets=uids)
		flags.write(hfile, group=id)

# Merge all the individual cut files into a single big one.
comm.Barrier()
if comm.rank == 0:
	with h5py.File(args.odir + "/cuts.hdf", "w") as ofile:
		for i in range(comm.size):
			print("Reducing %3d" % i)
			with h5py.File(args.odir + "/work_%03d.hdf" % i, "r") as ifile:
				for key in sorted(ifile.keys()):
					ifile.copy(key, ofile)
	print("Done")

# Output the overall statistics
if len(mystats) == 0: mystats = [-1,0,0]
mystats = np.array(mystats)
stats = utils.allgatherv(mystats, comm)
if comm.rank == 0:
	with open(args.odir + "/stats.txt","w") as f:
		for stat in stats:
			if stat[0] >= 0:
				f.write("%s %6.4f %d\n" % (ids[int(stat[0])], stat[1], int(stat[2])))
示例#12
0
	if sig["type"] in ["map", "dmap", "fmap", "fdmap"]:
		assert "value" in sig and sig["value"] is not None, "Map-type signals need a template map as argument. E.g. -S sky:foo.fits"

######## Filter parmeters ########
filter_params    = setup_params("filter", ["scan","sub"], {"use":"no"})
mapfilter_params = setup_params("mapfilter", [], {"use":"no"})

# Read in all our scans
L.info("Reading %d scans" % len(filelist))
myinds = np.arange(len(filelist))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(filelist, myinds, actscan.ACTScan,
		db, dets=args.dets, downsample=config.get("downsample"), hwp_resample=config.get("hwp_resample"))
myinds = np.array(myinds, int)

# Collect scan info. This currently fails if any task has empty myinds
read_ids  = [filelist[ind] for ind in utils.allgatherv(myinds, comm)]
read_ntot = len(read_ids)
L.info("Found %d tods" % read_ntot)
if read_ntot == 0:
	L.info("Giving up")
	sys.exit(1)
read_ndets= utils.allgatherv([len(scan.dets) for scan in myscans], comm)
read_nsamp= utils.allgatherv([scan.cut.size-scan.cut.sum() for scan in myscans], comm)
read_dets = utils.uncat(utils.allgatherv(
	np.concatenate([scan.dets for scan in myscans]) if len(myscans) > 0 else np.zeros(0,int)
	,comm), read_ndets)
ntod = np.sum(read_ndets>0)
ncut = np.sum(read_ndets==0)
# Save accept list
if comm.rank == 0:
	with open(root + "accept.txt", "w") as f:
示例#13
0
if args.ntod: ids = ids[:args.ntod]

is_dmap = os.path.isdir(args.imap)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank)
tshape= (720,720)

# Read in all our scans
L.info("Reading %d scans" % len(ids))
myinds = np.arange(len(ids))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(ids, myinds, actscan.ACTScan,
		filedb.data, dets=args.dets, downsample=config.get("downsample"))
myinds = np.array(myinds, int)

# Collect scan info. This currently fails if any task has empty myinds
read_ids  = [ids[ind] for ind in utils.allgatherv(myinds, comm)]
read_ntot = len(read_ids)
L.info("Found %d tods" % read_ntot)
if read_ntot == 0:
	L.info("Giving up")
	sys.exit(1)
# Prune fully autocut scans
mydets  = [len(scan.dets) for scan in myscans]
myinds  = [ind  for ind, ndet in zip(myinds, mydets) if ndet > 0]
myscans = [scan for scan,ndet in zip(myscans,mydets) if ndet > 0]
L.info("Pruned %d fully autocut tods" % (read_ntot - comm.allreduce(len(myscans))))

# Try to get about the same amount of data for each mpi task.
# If we use distributed maps, we also try to make things as local as possible
mycosts = [s.nsamp*s.ndet for s in myscans]
if is_dmap: