Exemple #1
0
def get_tod_groups(ids, samelen=True):
	times = np.array([float(id[:id.index(".")]) for id in ids])
	labels = utils.label_unique(times, rtol=0, atol=10)
	nlabel = np.max(labels)+1
	groups = [ids[labels==label] for label in range(nlabel)]
	# Try to preserve original ordering
	first  = [group[0] for group in groups]
	orig_inds = utils.find(ids, first)
	order  = np.argsort(orig_inds)
	groups = [groups[i] for i in order]
	if samelen:
		nsub = np.max(np.bincount(labels))
		groups = [g for g in groups if len(g) == nsub]
	return groups
Exemple #2
0
def classify_scanning_patterns(myscans, tol=0.5*utils.degree, comm=None):
	"""Classify scans into scanning patterns based on [az,el] bounds.
	Returns patterns[:,{ftom,to},{el,az}] and pids[len(myscans)], where
	pids contains the index of each myscan into patterns."""
	boxes = get_scan_bounds(myscans)
	rank  = np.full(len(boxes),comm.rank)
	if comm is not None:
		boxes = utils.allgatherv(boxes, comm)
		rank  = utils.allgatherv(rank,  comm)
	pids = utils.label_unique(boxes, axes=(1,2), atol=tol)
	npattern = np.max(pids)+1
	# For each scanning pattern, define a bounding box
	pboxes = np.array([utils.bounding_box(boxes[pids==pid]) for pid in xrange(npattern)])
	# Get the ids for the scans that we have
	if comm is not None:
		pids = pids[rank==comm.rank]
	return pboxes, pids
Exemple #3
0
	except errors.DataMissing as e:
		L.debug("Skipped %s (%s)" % (ids[ind], e.message))
		continue
	# Reorder from az,el to el,az
	boxes[ind] = [np.min(d.boresight[2:0:-1],1),np.max(d.boresight[2:0:-1],1)]
	L.info("%5d: %s" % (ind, id))
boxes = utils.allreduce(boxes, comm_world)

# Prune null boxes
usable = np.all(boxes!=0,(1,2))
moo = ids[usable]
cow = boxes[usable]

ids, boxes = ids[usable], boxes[usable]

pattern_ids = utils.label_unique(boxes, axes=(1,2), atol=tol)
npattern = np.max(pattern_ids)+1
pboxes = np.array([utils.bounding_box(boxes[pattern_ids==pid]) for pid in xrange(npattern)])
pscans = [np.where(pattern_ids==pid)[0] for pid in xrange(npattern)]

L.info("Found %d scanning patterns" % npattern)

# Build the set of tasks we should go through. This effectively
# collapses these two loops, avoiding giving rank 0 much more
# to do than the last ranks.
tasks = []
for pid, group in enumerate(pscans):
	# Start at pattern p0
	if pid < args.p0: continue
	ngroup = (len(group)+tods_per_map-1)/tods_per_map
	for gind in range(ngroup):
Exemple #4
0
    # Reorder from az,el to el,az
    boxes[ind] = [
        np.min(d.boresight[2:0:-1], 1),
        np.max(d.boresight[2:0:-1], 1)
    ]
    L.info("%5d: %s" % (ind, id))
boxes = utils.allreduce(boxes, comm_world)

# Prune null boxes
usable = np.all(boxes != 0, (1, 2))
moo = ids[usable]
cow = boxes[usable]

ids, boxes = ids[usable], boxes[usable]

pattern_ids = utils.label_unique(boxes, axes=(1, 2), atol=tol)
npattern = np.max(pattern_ids) + 1
pboxes = np.array([
    utils.bounding_box(boxes[pattern_ids == pid]) for pid in xrange(npattern)
])
pscans = [np.where(pattern_ids == pid)[0] for pid in xrange(npattern)]

L.info("Found %d scanning patterns" % npattern)

# Build the set of tasks we should go through. This effectively
# collapses these two loops, avoiding giving rank 0 much more
# to do than the last ranks.
tasks = []
for pid, group in enumerate(pscans):
    # Start at pattern p0
    if pid < args.p0: continue
Exemple #5
0
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("ofile")
parser.add_argument("--freqs", type=str, default="0,1,10,200")
parser.add_argument("--nbins", type=str, default="20,18,2")
parser.add_argument("--delay", type=int, default=0)
args = parser.parse_args()

filedb.init()
comm = mpi.COMM_WORLD
dtype = np.float32
delay = args.delay
# Group into ar1+ar2+... groups
ids = filedb.scans[args.sel]
times = np.array([float(id[:id.index(".")]) for id in ids])
labels = utils.label_unique(times, rtol=0, atol=10)
nlabel = np.max(labels) + 1
# We want to be consistent with how many tods are
# grouped together, so measure the largest group,
# and ignore any smaller group
nsub = np.max(np.bincount(labels))

# Define our bins
freqs = np.array([float(f) for f in args.freqs.split(",")])
nbins = np.array([int(n) for n in args.nbins.split(",")])
fbins = []
for i in range(len(nbins)):
    subfreqs = np.linspace(freqs[i], freqs[i + 1], nbins[i] + 1, endpoint=True)
    fbins.append(np.array([subfreqs[:-1], subfreqs[1:]]).T)
fbins = np.concatenate(fbins, 0)
nbin = len(fbins)
Exemple #6
0
parser = config.ArgumentParser(os.environ["HOME"]+"/.enkirc")
parser.add_argument("sel")
parser.add_argument("ofile")
parser.add_argument("--freqs", type=str, default="0,1,10,200")
parser.add_argument("--nbins", type=str, default="20,18,2")
parser.add_argument("--delay", type=int, default=0)
args = parser.parse_args()

filedb.init()
comm = mpi.COMM_WORLD
dtype = np.float32
delay = args.delay
# Group into ar1+ar2+... groups
ids = filedb.scans[args.sel]
times = np.array([float(id[:id.index(".")]) for id in ids])
labels = utils.label_unique(times, rtol=0, atol=10)
nlabel = np.max(labels)+1
# We want to be consistent with how many tods are
# grouped together, so measure the largest group,
# and ignore any smaller group
nsub = np.max(np.bincount(labels))

# Define our bins
freqs = np.array([float(f) for f in args.freqs.split(",")])
nbins = np.array([int(n)   for n in args.nbins.split(",")])
fbins = []
for i in range(len(nbins)):
	subfreqs = np.linspace(freqs[i],freqs[i+1],nbins[i]+1,endpoint=True)
	fbins.append(np.array([subfreqs[:-1],subfreqs[1:]]).T)
fbins = np.concatenate(fbins,0)
nbin  = len(fbins)
Exemple #7
0
utils.mkdir(args.odir)
utils.mkdir(detdir)

# Determine which arrays we have. We can't process arrays independently,
# as they in principle have correlated noise. But we also want to distinguish
# between them
pre, _, anames = np.char.rpartition(ids,".").T
if args.mode == "crosslink":
	# Treat rising and setting as separate arrays"
	rise = utils.rewind(db.data["baz"],0,360) > 0
	anames[rise]  = np.char.add(anames[rise], "r")
	anames[~rise] = np.char.add(anames[~rise],"s")
elif args.mode == "scanpat":
	# Treat each scanning pattern as a different array
	patterns = np.array([db.data["baz"],db.data["bel"],db.data["waz"]]).T
	pids     = utils.label_unique(patterns, axes=(1,), atol=1.0)
	npat     = np.max(pids)+1
	for pid in range(npat):
		anames[pids==pid] = np.char.add(anames[pids==pid], "p%d" % pid)

def ids2ctimes(ids): return np.char.partition(ids,".").T[0].astype(int)
def fix_aname(aname): return aname.replace("ar","pa").replace(":","_")
anames = np.array([fix_aname(aname) for aname in anames])
arrays, ais, nper = np.unique(anames, return_counts=True, return_inverse=True)
narray = len(arrays)
ctime  = ids2ctimes(pre)
sys.stderr.write("found arrays " + " ".join(arrays) + "\n")

# Get our block splitting parameters
toks = args.block.split(":")
block_mode = toks[0]
Exemple #8
0
					signal.post.append(mapmaking.PostAddMap(m, mul=-mul))
		elif param["name"] == "addphase" or param["name"] == "fitphase":
			if "map" not in param: raise ValueError("-F addphase/subphase/fitphase needs a phase dir to subtract. e.g. -F addphase:map=foo")
			mode, fname, mul, tmul = int(param["value"]), param["map"], float(param["mul"]), float(param["tmul"])
			tol = float(param["tol"])*utils.degree
			# Read the info file to see which scanning patterns were used in the phase dir
			phasemap = mapmaking.PhaseMap.read(fname)
			npat     = len(phasemap.patterns)
			# Find which phase map part each scan corresponds to. We get all the scan
			# boxes, and then add our existing scanning pattern boxes as references.
			# We can then just see which scans get grouped with which patterns.
			my_boxes = scanutils.get_scan_bounds(myscans)
			boxes = utils.allgatherv(my_boxes, comm)
			rank  = utils.allgatherv(np.full(len(my_boxes),comm.rank),      comm)
			boxes = np.concatenate([phasemap.patterns, boxes], 0)
			labels= utils.label_unique(boxes, axes=(1,2), atol=tol)
			if comm.rank == 0:
				print "labels"
				for b,l in zip(boxes, labels):
					print "%8.3f %8.3f %8.3f %5d" % (b[0,0]/utils.degree,b[0,1]/utils.degree,b[1,1]/utils.degree,l)
			pids  = utils.find(labels[:npat], labels[npat:])
			mypids= pids[rank==comm.rank]
			if param["name"] == "addphase":
				filter = mapmaking.FilterAddPhase(myscans, phasemap, mypids, mmul=mul, tmul=tmul)
			else:
				filter = mapmaking.FilterDeprojectPhase(myscans, phasemap, mypids, int(param["perdet"])>0, mmul=mul, tmul=tmul)
		elif param["name"] == "scale":
			value = float(param["value"])
			if value == 1: continue
			filter = mapmaking.FilterScale(value)
		elif param["name"] == "null":
Exemple #9
0
					signal.post.append(mapmaking.PostAddMap(m, mul=-mul))
		elif param["name"] == "addphase" or param["name"] == "fitphase":
			if "map" not in param: raise ValueError("-F addphase/subphase/fitphase needs a phase dir to subtract. e.g. -F addphase:map=foo")
			mode, fname, mul, tmul = int(param["value"]), param["map"], float(param["mul"]), float(param["tmul"])
			tol = float(param["tol"])*utils.degree
			# Read the info file to see which scanning patterns were used in the phase dir
			phasemap = mapmaking.PhaseMap.read(fname, rewind=True)
			npat     = len(phasemap.patterns)
			# Find which phase map part each scan corresponds to. We get all the scan
			# boxes, and then add our existing scanning pattern boxes as references.
			# We can then just see which scans get grouped with which patterns.
			my_boxes = scanutils.get_scan_bounds(myscans)
			boxes = utils.allgatherv(my_boxes, comm)
			rank  = utils.allgatherv(np.full(len(my_boxes),comm.rank),      comm)
			boxes = np.concatenate([phasemap.patterns, boxes], 0)
			labels= utils.label_unique(boxes, axes=(1,2), atol=tol)
			if comm.rank == 0:
				print("labels")
				for b,l in zip(boxes, labels):
					print("%8.3f %8.3f %8.3f %5d" % (b[0,0]/utils.degree,b[0,1]/utils.degree,b[1,1]/utils.degree,l))
			pids  = utils.find(labels[:npat], labels[npat:], default=-1)
			mypids= pids[rank==comm.rank]
			if np.any(mypids < 0):
				bad = np.where(mypids<0)[0]
				for bi in bad:
					print("Warning: No matching scanning pattern found for %s. Using pattern 0" % (myscans[bi].id))
					mypids[bi] = 0
			if param["name"] == "addphase":
				filter = mapmaking.FilterAddPhase(myscans, phasemap, mypids, mmul=mul, tmul=tmul)
			else:
				filter = mapmaking.FilterDeprojectPhase(myscans, phasemap, mypids, int(param["perdet"])>0, mmul=mul, tmul=tmul)
Exemple #10
0
optimize_subsets = (args.mode == "crosslink" or args.mode=="scanpat")
utils.mkdir(args.odir)

# Determine which arrays we have. We can't process arrays independently,
# as they in principle have correlated noise. But we also want to distinguish
# between them
pre, _, anames = np.char.rpartition(ids,".").T
if args.mode == "crosslink":
	# Treat rising and setting as separate arrays"
	rise = utils.rewind(db.data["baz"],0,360) > 0
	anames[rise]  = np.char.add(anames[rise], "r")
	anames[~rise] = np.char.add(anames[~rise],"s")
elif args.mode == "scanpat":
	# Treat each scanning pattern as a different array
	patterns = np.array([db.data["baz"],db.data["bel"],db.data["waz"]]).T
	pids     = utils.label_unique(patterns, axes=(1,), atol=1.0)
	npat     = np.max(pids)+1
	for pid in range(npat):
		anames[pids==pid] = np.char.add(anames[pids==pid], "p%d" % pid)

def ids2ctimes(ids): return np.char.partition(ids,".").T[0].astype(int)
arrays, ais, nper = np.unique(anames, return_counts=True, return_inverse=True)
narray = len(arrays)
ctime  = ids2ctimes(pre)
sys.stderr.write("found arrays " + " ".join(arrays) + "\n")

# Get our block splitting parameters
toks = args.block.split(":")
block_mode = toks[0]
block_size = float(toks[1]) if len(toks) > 1 else 1