Esempio n. 1
0
def get_datasets(config, sel=None):
    # Set up boolean arrays for querys
    all_tags = set()
    for dataset in config.datasets:
        all_tags |= dataset.tags
    flags = {
        flag: np.array([flag in dataset.tags for dataset in config.datasets],
                       bool)
        for flag in all_tags
    }
    # Extract the relevant datasets
    datasets = config.datasets
    if sel is not None:
        sel = "&".join(["(" + w + ")" for w in utils.split_outside(sel, ",")])
        selinds = np.where(eval(sel, flags))[0]
        datasets = [datasets[i] for i in selinds]
    # Make all paths relative to us instead of the config file
    cdir = os.path.dirname(config.path)
    # In tiled input format, all input maps have the same geometry
    for dataset in datasets:
        for split in dataset.splits:
            split.map = os.path.join(cdir, split.map)
            split.div = os.path.join(cdir, split.div)
    # Read the geometry from all the datasets. Also make paths relative to us instead
    # of the config file
    for dataset in datasets:
        shape, wcs = read_geometry(dataset.splits[0].map)
        dataset.shape = shape
        dataset.wcs = wcs
        dataset.beam = setup_beam(dataset.beam_params)
    return datasets
Esempio n. 2
0
def parse_desc(desc, default={}):
    res = default.copy()
    # Parse normally now that the : are out of the way
    for tok in utils.split_outside(desc, ",", "[({", "])}"):
        subtoks = tok.split("=")
        if len(subtoks) == 1:
            res["value"] = subtoks[0]
        else:
            key, val = subtoks
            res[key] = val
    return res
Esempio n. 3
0
def parse_desc(desc, default={}):
	res = default.copy()
	# Parse normally now that the : are out of the way
	for tok in utils.split_outside(desc, ",", "[({", "])}"):
		subtoks = tok.split("=")
		if len(subtoks) == 1:
			res["value"] = subtoks[0]
		else:
			key, val = subtoks
			res[key] = val
	return res
Esempio n. 4
0
    def query(self, query=None, apply_default_query=True):
        """Query the database. The query takes the form
		tag,tag,tag,...:sort[slice], where all tags must be satisfied for an id to
		be returned. More general syntax is also available. For example,
		(a+b>c)|foo&bar,cow. This follows standard python and numpy syntax,
		except that , is treated as a lower-priority version of &."""
        # Make a copy of self.data so we can't modify it without changing ourself
        data = self.data.copy()
        # First split off any sorting field or slice
        if query is None: query = ""
        toks = utils.split_outside(query, ":")
        query, rest = toks[0], ":".join(toks[1:])
        # Hack: Support id fields as tags, even if they contain
        # illegal characters..
        t1 = time.time()
        for id in data["id"]:
            if id not in query: continue
            query = re.sub(r"""(?<!['"])\b%s\b""" % id, "(id=='%s')" % id,
                           query)
        # Split into ,-separated fields. Fields starting with a "+"
        # are taken to be tag markers, and are simply propagated to the
        # resulting ids.
        toks = utils.split_outside(query, ",")
        fields, subid = [], []
        override_ids = None
        for tok in toks:
            if len(tok) == 0: continue
            if tok.startswith("+"):
                # Tags starting with + will be interpreted as a subid specification
                subid.append(tok[1:])
            elif tok.startswith("/"):
                # Tags starting with / will be interpreted as special query flags
                if tok == "/all": apply_default_query = False
                else: raise ValueError("Unknown query flag '%s'" % tok)
            else:
                # Normal field. Perform a few convenience transformations first.
                if tok.startswith("@@"):
                    # Hack. *Force* the given ids to be returned, even if they aren't in the database.
                    override_ids = load_ids(tok[2:])
                    continue
                elif tok.startswith("@"):
                    # Restrict dataset to those in the given file
                    tok = "file_contains('%s',id)" % tok[1:]
                elif tok.startswith("~@"):
                    tok = "~file_contains('%s',id)" % tok[2:]
                fields.append(tok)
        if override_ids is not None:
            # Append subids to our ids, and return immediately. All other fields
            # and queries are ignored.
            subs = np.array(",".join(subid))
            subs = np.full(len(override_ids), subs, subs.dtype)
            return append_subs(override_ids, subs)
        # Apply our default queries here. These are things that we almost always
        # want in our queries, and that it's tedious to have to specify manually
        # each time. For example, this would be "selected" for act todinfo queries
        if apply_default_query:
            fields = fields + utils.split_outside(self.default_query, ",")
        subid = ",".join(subid)
        # Now evaluate our fields one by one. This is done so that
        # function fields can inspect the current state at that point
        for field in fields:
            scope = np.__dict__.copy()
            scope.update(data)
            for name, functor in self.functors.iteritems():
                scope[name] = functor(data)
            with utils.nowarn():
                hits = eval(field, scope)
            # Restrict all fields to the result
            data = dslice(data, hits)
        # Split the rest into a sorting field and a slice
        toks = rest.split("[")
        if len(toks) == 1: sort, fsel, dsel = toks[0], "", ""
        elif len(toks) == 2: sort, fsel, dsel = toks[0], "", "[" + toks[1]
        else:
            sort, fsel, dsel = toks[0], "[" + toks[1], "[" + "[".join(toks[2:])
        if self.sort and not sort: sort = self.sort
        if sort:
            # Evaluate sorting field
            field = data[sort]
            field = eval("field" + fsel)
            data = dslice(data, np.argsort(field))
        # Finally apply the data slice
        inds = np.arange(len(data["id"]))
        inds = eval("inds" + dsel)
        data = dslice(data, inds)
        # Build our subid extensions and append them to ids
        subs = np.array([merge_subid(subid, sub) for sub in data["subids"]])
        ids = append_subs(data["id"], subs)
        return ids
Esempio n. 5
0
def autocut(d, turnaround=None, ground=None, sun=None, moon=None, max_frac=None, pickup=None):
	"""Apply automatic cuts to calibrated data."""
	if not config.get("autocut"): return d
	ndet, nsamp = d.ndet, d.nsamp
	if not ndet or not nsamp: return d
	# Insert a cut into d if necessary
	if "cut" not in d:
		d += dataset.DataField("cut", sampcut.empty(ndet,nsamp))
	# insert an autocut datafield, to keep track of how much data each
	# automatic cut cost us
	d += dataset.DataField("autocut", [])
	def addcut(label, dcut, targets="c"):
		# det ndet part here allows for broadcasting of cuts from 1-det to full-det
		dn = dcut.sum()*d.ndet/dcut.ndet if dcut is not None else 0
		if dn == 0: d.autocut.append([label,0,0])
		else:
			n0, dn = d.cut.sum(), dcut.sum()
			dn = dn*d.cut.ndet/dcut.ndet
			if "c" in targets: d.cut *= dcut
			if "n" in targets: d.cut_noiseest *= dcut
			if "b" in targets: d.cut_basic *= dcut
			d.autocut.append([ label, dn, d.cut.sum() - n0 ]) # name, mycut, myeffect
	if config.get("cut_tconst") and "tau" in d:
		addcut("tconst", cuts.tconst_cut(nsamp, d.tau, config.get("cut_tconst")))
	if config.get("cut_stationary") and "boresight" in d:
		addcut("stationary", cuts.stationary_cut(d.boresight[1]))
	if config.get("cut_tod_ends") and "srate" in d:
		addcut("tod_ends", cuts.tod_end_cut(nsamp, d.srate))
	if config.get("cut_turnaround", turnaround) and "boresight" in d:
		addcut("turnaround",cuts.turnaround_cut(d.boresight[1]))
	if config.get("cut_ground", ground) and "boresight" in d and "point_offset" in d:
		addcut("ground", cuts.ground_cut(d.boresight, d.point_offset))
	if config.get("cut_sun", sun) and "boresight" in d and "point_offset" in d and "site" in d:
		addcut("avoidance",cuts.avoidance_cut(d.boresight, d.point_offset, d.site, "Sun", config.get("cut_sun_dist")*np.pi/180))
	if config.get("cut_moon", moon) and "boresight" in d and "point_offset" in d and "site" in d:
		addcut("moon",cuts.avoidance_cut(d.boresight, d.point_offset, d.site, "Moon", config.get("cut_moon_dist")*np.pi/180))
	if config.get("cut_pickup", pickup) and "boresight" in d and "pickup_cut" in d:
		addcut("pickup",cuts.pickup_cut(d.boresight[1], d.dets, d.pickup_cut))
	if config.get("cut_mostly_cut"):
		addcut("mostly_cut", cuts.cut_mostly_cut_detectors(d.cut_quality))
	if config.get("cut_obj"):
		objs = utils.split_outside(config.get("cut_obj"),",")
		for obj in objs:
			toks = obj.split(":")
			objname = toks[0]
			if objname.startswith("["):
				objname = [float(w)*utils.degree for w in objname[1:-1].split(",")]
			dist    = 0.2*utils.degree
			if len(toks) > 1: dist = float(toks[1])*utils.degree
			# Hack: only cut for noise estimation purposes if dist is negative
			targets = "cnb" if dist > 0 else "n"
			addcut(obj, cuts.avoidance_cut(d.boresight, d.point_offset, d.site, objname, dist), targets=targets)
	if config.get("cut_srcs"):
		cpar  = [tok.split(":") for tok in config.get("cut_srcs").split(",")]
		names, lims = [], []
		for par in cpar:
			if par[0] in ["map","nmat"]:
				names.append(par[0])
				lims.append(float(par[1]))
		if any(lims):
			params = pointsrcs.src2param(d.pointsrcs)
			params[:,5:7] = 1
			params[:,7]   = 0
			# Only bother with sources that are actually strong enough
			maxlim = max(lims+[0])
			params = params[params[:,2]>maxlim]
			cutlist = cuts.point_source_cut(d, params, lims)
			for name, c in zip(names, cutlist):
				if   name == "map":  addcut("point_srcs_m", c, "c")
				elif name == "nmat": addcut("point_srcs_n", c, "n")
	if config.get("cut_extra_srcs"):
		srclist = np.loadtxt(config.get("cut_extra_srcs"), usecols=(0,1), ndmin=2)
		srclim  = float(config.get("cut_extra_lim"))
		params  = np.zeros([len(srclist),8])
		params[:,:2]  = srclist[:,1::-1]*utils.degree
		params[:,2]   = 1
		params[:,5:7] = 1
		c = cuts.point_source_cut(d, params, srclim)
		addcut("point_srcs", c, "nbc")

	# What fraction is cut?
	cut_fraction = float(d.cut.sum())/d.cut.size
	# Get rid of completely cut detectors
	keep = np.where(d.cut.sum(axis=1) < nsamp)[0]
	d.restrict(d.dets[keep])
	ndet, nsamp = d.ndet, d.nsamp

	def cut_all_if(label, condition):
		if condition: dcut = sampcut.full(d.ndet, nsamp)
		else: dcut = None
		addcut(label, dcut)
	cut_all_if("max_frac",   config.get("cut_max_frac", max_frac) < cut_fraction)
	if "srate" in d:
		cut_all_if("tod_mindur", config.get("cut_tod_mindur") > nsamp/d.srate/60)
	cut_all_if("tod_mindet", config.get("cut_tod_mindet") > ndet)
	# Get rid of completely cut detectors again
	keep = np.where(d.cut.sum(axis=1) < nsamp)[0]
	d.restrict(dets=d.dets[keep])

	return d
Esempio n. 6
0
parser.add_argument("-a", "--apod-val", type=float, default=2e-1)
parser.add_argument("-A", "--apod-alpha", type=float, default=5)
parser.add_argument("-E", "--apod-edge", type=float, default=120)
parser.add_argument("--kxrad", type=float, default=20)
parser.add_argument("--kx-ymax-scale", type=float, default=1)
parser.add_argument("--highpass", type=float, default=200)
parser.add_argument("--cg-tol", type=float, default=1e-6)
parser.add_argument("--max-ps", type=float, default=0)
parser.add_argument("-F", "--filter", action="store_true")
parser.add_argument("-c", "--cont", action="store_true")
parser.add_argument("-v", "--verbose", action="store_true")
args = parser.parse_args()

enmap.extent_model.append("intermediate")

sel = "&".join(["(" + w + ")" for w in utils.split_outside(args.sel, ",")])
dtype = np.float32
comm = mpi.COMM_WORLD
utils.mkdir(args.odir)

# Set up configuration
config = imp.load_source("config", args.config)

# Set up boolean arrays for querys
all_tags = set()
for dataset in config.datasets:
    all_tags |= dataset.tags
flags = {
    flag: np.array([flag in dataset.tags for dataset in config.datasets], bool)
    for flag in all_tags
}