Exemplo n.º 1
0
 def __call__(self, scan, tod):
     blocks = utils.find_equal_groups(scan.layout.pcb[scan.dets])
     todfilter.filter_common_blockwise(tod,
                                       blocks,
                                       cuts=scan.cut,
                                       niter=self.niter,
                                       inplace=True)
Exemplo n.º 2
0
 def __call__(self, scan, tod):
     blocks = utils.find_equal_groups(scan.layout.pcb[scan.dets])
     todfilter.filter_phase_blockwise(tod,
                                      blocks,
                                      scan.boresight[:, 1],
                                      daz=self.daz,
                                      cuts=scan.cut,
                                      niter=self.niter,
                                      inplace=True)
Exemplo n.º 3
0
def get_groups(ids, mode):
	if   mode == "tod":
		return [[i] for i, id in enumerate(ids)]
	elif mode == "array":
		# Group tods across arrays
		ids = np.asarray(ids)
		tids,  arr = np.char.partition(ids, ":").T[[0,2]]
		groups = utils.find_equal_groups(tids)
		return groups
	else:
		raise ValueError("Unknown group mode '%s'" % mode)
Exemplo n.º 4
0
def read_buddies(fname, mode="auto"):
	"""Read a beam decomposition of the near-sidelobe "buddies".
	Each line should contain xi eta T Q U for one buddy, or
	det xi eta T Q U for the detector-dependent format. The result
	will be dets, [ndet][nbuddy,{xi,eta,T,Q,U}]. For the
	buddy-independent format, dets is None."""
	res = np.loadtxt(fname, ndmin=2)
	if res.size == 0: return None, res.reshape(-1,5)
	if mode == "uniform" or mode == "auto" and res.shape[-1] == 5:
		# detector-independent format
		return None, [res]
	else:
		# detector-dependent format
		groups = utils.find_equal_groups(res[:,0])
		dets   = [int(res[g[0],0]) for g in groups]
		buds   = np.array([res[g,1:] for g in groups])
		return dets, buds
Exemplo n.º 5
0
def read_buddies(fname, mode="auto"):
	"""Read a beam decomposition of the near-sidelobe "buddies".
	Each line should contain xi eta T Q U for one buddy, or
	det xi eta T Q U for the detector-dependent format. The result
	will be dets, [ndet][nbuddy,{xi,eta,T,Q,U}]. For the
	buddy-independent format, dets is None."""
	res = np.loadtxt(fname, ndmin=2)
	if res.size == 0: return None, res.reshape(-1,5)
	if mode == "uniform" or mode == "auto" and res.shape[-1] == 5:
		# detector-independent format
		return None, [res]
	else:
		# detector-dependent format
		groups = utils.find_equal_groups(res[:,0])
		dets   = [int(res[g[0],0]) for g in groups]
		buds   = np.array([res[g,1:] for g in groups])
		return dets, buds
Exemplo n.º 6
0
def find_pairs_blind(det_pos, tol=0.2 * utils.arcmin):
    pairs = utils.find_equal_groups(det_pos, tol=tol)
    # Pair must have exactly two members
    pairs = [p for p in pairs if len(p) == 2]
    pairs = np.array(pairs)
    return pairs
Exemplo n.º 7
0
    only = [int(word) for word in args.only.split(",")] if args.only else []

    # Should we use distributed maps?
    npix = shape[-2] * shape[-1]
    use_dmap = npix > 5e7

    utils.mkdir(root + "log")
    logfile = root + "log/log%03d.txt" % comm.rank
    log_level = log.verbosity2level(config.get("verbosity"))
    L = log.init(level=log_level, file=logfile, rank=comm.rank)

    filedb.init()
    db = filedb.scans.select(args.sel)
    ids = db.ids
    mjd = utils.ctime2mjd(db.data["t"])
    chunks = utils.find_equal_groups(mjd // args.dt)
    chunks = [np.sort(chunk) for chunk in chunks]
    chunks = [chunks[i] for i in np.argsort([c[0] for c in chunks])]
    corr_pos = planet9.choose_corr_points(shape, wcs,
                                          args.corr_spacing * utils.degree)

    if args.inject:
        inject_params = np.loadtxt(args.inject,
                                   ndmin=2)  # [:,{ra0,dec0,R,vx,vy,flux}]

    asteroids = planet9.get_asteroids(args.asteroid_file, args.asteroid_list)

    # How to parallelize? Could do it over chunks. Usually there will be more chunks than
    # mpi tasks. But there will still be many tods per chunk too (about 6 tods per hour
    # and 72 hours per chunk gives 432 tods per chunk). That's quite a bit for one mpi
    # task to do. Could paralellize over both... No, keep things simple. Parallelize over tods
Exemplo n.º 8
0
def find_pairs_blind(det_pos, tol=0.2*utils.arcmin):
	pairs = utils.find_equal_groups(det_pos, tol=tol)
	# Pair must have exactly two members
	pairs = [p for p in pairs if len(p) == 2]
	pairs = np.array(pairs)
	return pairs