Ejemplo n.º 1
0
def gapfill_helper(tod, cut):
	method, context = config.get("gapfill"), config.get("gapfill_context")
	gapfiller = {
			"linear":gapfill.gapfill_linear,
			"joneig":gapfill.gapfill_joneig,
			}[method]
	gapfiller(tod, cut, inplace=True, overlap=context)
Ejemplo n.º 2
0
def build_interpol(transform, box, id="none", posunit=1.0, sys=None):
    sys = config.get("map_sys", sys)
    # We widen the bounding box slightly to avoid samples falling outside it
    # due to rounding errors.
    box = utils.widen_box(np.array(box), 1e-3)
    box[:,
        1:] = utils.widen_box(box[:, 1:],
                              config.get("pmat_interpol_pad") * utils.arcmin,
                              relative=False)
    acc = config.get("pmat_accuracy")
    ip_size = config.get("pmat_interpol_max_size")
    ip_time = config.get("pmat_interpol_max_time")
    # Build pointing interpolator
    errlim = np.array(
        [1e-3 * posunit, 1e-3 * posunit, utils.arcmin, utils.arcmin]) * acc
    ipol, obox, ok, err = interpol.build(transform,
                                         interpol.ip_linear,
                                         box,
                                         errlim,
                                         maxsize=ip_size,
                                         maxtime=ip_time,
                                         return_obox=True,
                                         return_status=True)
    if not ok and np.any(err > errlim):
        print "Warning: Accuracy %g was specified, but only reached %g for tod %s" % (
            acc, np.max(err / errlim) * acc, id)
    return ipol, obox, err
Ejemplo n.º 3
0
def detvecs_oof(fourier, srate, dets=None, type=None, nbin=None, nmin=None):
    nfreq = fourier.shape[1]
    ndet = fourier.shape[0]
    type = config.get("nmat_uncorr_type", type)
    nbin = config.get("nmat_uncorr_nbin", nbin)
    nmin = config.get("nmat_uncorr_nmin", nmin)
    fknee = config.get("nmat_oof_fknee")
    alpha = config.get("nmat_oof_alpha")

    if type is "exp":
        bins = utils.expbin(nfreq, nbin=nbin, nmin=nmin)
    elif type is "lin":
        bins = utils.linbin(nfreq, nbin=nbin, nmin=nmin)
    else:
        raise ValueError("No such power binning type '%s'" % type)
    nbin = bins.shape[0]  # expbin may not provide exactly what we want

    # Dummy detector correlation stuff, so we can reuse the sharedvecs stuff
    vecs = np.full([ndet, 0], 1)
    Nu = np.zeros([nbin, ndet])
    E = np.full([nbin, 0], 1e-10)
    V = [vecs]
    vinds = np.zeros(nbin, dtype=int)
    for bi, b in enumerate(bins):
        d = fourier[:, b[0]:b[1]]
        Nu[bi] = measure_power(d)
    # Replace Nu with functional form
    f = np.mean(bins, 1) * srate / 2 / nfreq
    Nu = np.mean(Nu**-1, 0)**-1 * (1 + (f / fknee)**alpha)[:, None]
    return prepare_sharedvecs(Nu, V, E, bins, srate, dets, vinds)
Ejemplo n.º 4
0
def detvecs_scaled(ft,
                   srate,
                   dets=None,
                   bsize=None,
                   lim=None,
                   lim2=None,
                   spikecut=None):
    bsize = config.get("nmat_scaled_bsize", bsize)
    lim = config.get("nmat_scaled_spike", lim)
    lim2 = config.get("nmat_scaled_smooth", lim2)
    spikecut = config.get("nmat_scaled_spikecut", spikecut)
    # First set up our sample points
    mps = calc_mean_ps(ft)
    bins, bins_spike = build_spec_bins(mps, bsize=bsize, lim=lim, lim2=lim2)
    # Measure mean power per bin
    brms = np.zeros(ft.shape[:-1] + (len(bins), ), mps.dtype)
    for bi, b in enumerate(bins):
        brms[:, bi] = np.mean(np.abs(ft[:, b[0]:b[1]])**2)**0.5
        ft[:, b[0]:b[1]] /= brms[:, bi, None]
    # Build interior noise model
    if not spikecut: sbins_spike = None
    nmat_inner = detvecs_jon(ft,
                             srate=srate,
                             dets=dets,
                             cut_bins=bins_spike,
                             cut_unit="inds")
    # Undo scaling of ft in case caller still needs it
    for bi, b in enumerate(bins):
        ft[:, b[0]:b[1]] *= brms[:, bi, None]
    return nmat.NmatScaled(brms, bins, nmat_inner)
Ejemplo n.º 5
0
def gapfill(arr, ranges, inplace=False, overlap=None):
    gapfiller = {
        "linear": gapfill_linear,
        "joneig": gapfill_joneig,
    }[config.get("gapfill")]
    overlap = config.get("gapfill_context", overlap)
    return gapfiller(arr, ranges, inplace=inplace, overlap=overlap)
Ejemplo n.º 6
0
Archivo: pmat.py Proyecto: Nium14/enlib
 def __init__(self,
              scan,
              template,
              beam_offs,
              beam_comps,
              sys=None,
              order=None):
     # beam_offs has format [nbeam,ndet,{dt,dra,ddec,}], which allows
     # each detector to have a separate beam. The dt part is pretty useless.
     # beam_comps has format [nbeam,ndet,{T,Q,U}].
     # Get the full box after taking all beam offsets into account
     ibox = np.array([
         np.min(scan.boresight, 0) + np.min(beam_offs, (0, 1)),
         np.max(scan.boresight, 0) + np.max(beam_offs, (0, 1))
     ])
     # Build our pointing interpolator
     sys = config.get("map_sys", sys)
     transform = pos2pix(scan, template, sys)
     ipol, obox, err = build_interpol(transform, ibox, id=scan.entry.id)
     self.rbox, self.nbox, self.yvals = extract_interpol_params(
         ipol, template.dtype)
     # And store our data
     self.beam_offs, self.beam_comps = beam_offs, beam_comps
     self.pixbox, self.nphi = build_pixbox(obox[:, :2], template)
     self.scan, self.dtype = scan, template.dtype
     self.order = config.get("pmat_map_order", order)
     self.err = err
Ejemplo n.º 7
0
    def get_samples(self, verbose=False):
        """Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
        # Because we've read the tod_shape field earlier, we know that reading tod
        # won't cause any additional truncation of the samples or detectors.
        # tags is only needed here for read_combo support, but that is mostly broken
        # anyway.
        t1 = time.time()
        self.d += actdata.read(self.entry,
                               fields=["tod", "tags"],
                               dets=self.d.dets)
        #if debug_inject is not None: self.d.tod += debug_inject
        t2 = time.time()
        if verbose: print("read  %-14s in %6.3f s" % ("tod", t2 - t1))
        if config.get("tod_skip_deconv"): ops = ["tod_real"]
        else: ops = ["tod"]
        actdata.calibrate(self.d, operations=ops, verbose=verbose)
        tod = self.d.tod
        # Remove tod from our local d, so we won't end up hauling it around forever
        del self.d.tod
        # HWP resample if needed
        if self.mapping is not None:
            tod = np.ascontiguousarray(
                utils.interpol(tod,
                               self.mapping.oimap[None],
                               order=1,
                               mask_nan=False))
        method = config.get("downsample_method")
        for s in self.sampslices:
            tod = scan.slice_tod_samps(tod, s, method=method)
        tod = np.ascontiguousarray(tod)
        return tod
Ejemplo n.º 8
0
	def get_samples(self, verbose=False):
		"""Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
		# Because we've read the tod_shape field earlier, we know that reading tod
		# won't cause any additional truncation of the samples or detectors.
		# tags is only needed here for read_combo support, but that is mostly broken
		# anyway.
		t1 = time.time()
		self.d += actdata.read(self.entry, fields=["tod", "tags"], dets=self.d.dets)
		t2 = time.time()
		if verbose: print "read  %-14s in %6.3f s" % ("tod", t2-t1)
		if config.get("tod_skip_deconv"): ops = ["tod_real"]
		else: ops = ["tod"]
		actdata.calibrate(self.d, operations=ops, verbose=verbose)
		tod = self.d.tod
		# Remove tod from our local d, so we won't end up hauling it around forever
		del self.d.tod
		# HWP resample if needed
		if self.mapping is not None:
			tod = np.ascontiguousarray(utils.interpol(tod, self.mapping.oimap[None], order=1, mask_nan=False))
		method = config.get("downsample_method")
		for s in self.sampslices:
			srange = slice(s.start, s.stop, np.sign(s.step) if s.step else None)
			tod = tod[:,srange]
			tod = resample.resample(tod, 1.0/np.abs(s.step or 1), method=method)
		tod = np.ascontiguousarray(tod)
		return tod
Ejemplo n.º 9
0
def detvecs_jon(ft, srate, dets=None, shared=False, cut_bins=None, apodization=None, cut_unit="freq", verbose=False):
	"""Build a Detvecs noise matrix based on Jon's noise model.
	ft is the *normalized* fourier-transform of a TOD: ft = fft.rfft(d)/nsamp.
	srate is the sampling rate, dets is the list of detectors, shared specifies
	whether the Detvecs object should use the compressed "shared" layout or not",
	and cut_freq_ranges is a [nbin,{freq_from,freq_2}] array of frequencies
	to completely cut."""
	apodization = config.get("nmat_jon_apod", apodization) or None
	downweight  = config.get("nmat_jon_downweight")
	spike_suppression = config.get("nmat_spike_suppression")
	nfreq    = ft.shape[1]
	if cut_unit == "freq":
		cut_bins = freq2ind(cut_bins, srate, nfreq, rfun=np.round)
	# Construct our mode bins. Interestingly, we skip
	# the f < 0.25 Hz area.
	mbins = makebins([0.25, 4.0], srate, nfreq, 1000, rfun=np.round)[1:]
	amp_thresholds = config.get("nmat_jon_amp_threshold")
	amp_thresholds = extend_list([float(w) for w in amp_thresholds.split(",")], len(mbins))
	single_threshold = config.get("nmat_jon_single_threshold")
	# Ok, compute our modes, and then measure them in each bin.
	# When using apodization, the vecs are not necessarily orthogonal,
	# so don't rely on that.
	vecs, weights = find_modes_jon(ft, mbins, amp_thresholds, single_threshold, apodization=apodization, verbose=verbose)
	bin_edges = np.array([
			0.10, 0.25, 0.35, 0.45, 0.55, 0.70, 0.85, 1.00,
			1.20, 1.40, 1.70, 2.00, 2.40, 2.80, 3.40, 3.80,
			4.60, 5.00, 5.50, 6.00, 6.50, 7.00, 8.00, 9.00, 10.0, 11.0,
			12.0, 13.0, 14.0, 16.0, 18.0, 20.0, 22.0,
			24.0, 26.0, 28.0, 30.0, 32.0, 36.5, 41.0,
			45.0, 50.0, 55.0, 65.0, 70.0, 80.0, 90.0,
			100., 110., 120., 130., 140., 150., 160., 170.,
			180., 190.
		])
	# Cut bins that extend beyond our max frequency
	bin_edges = bin_edges[bin_edges < srate/2 * 0.99]
	bins = makebins(bin_edges, srate, nfreq, 2*vecs.shape[1], rfun=np.round)
	bins, iscut = add_cut_bins(bins, cut_bins)

	if downweight: white_scale = extend_list([1e-4, 0.25, 0.50, 1.00], len(bins))
	else: white_scale = [1]*len(bins)
	white_scale = np.asarray(white_scale)
	white_scale[iscut] *= spike_suppression

	if vecs.size == 0: raise errors.ModelError("Could not find any noise modes")
	# Sharedvecs supports different sets of vecs per bin. But we only
	# use a single group here. So every bin refers to the first group.
	V     = [vecs]
	vinds = np.zeros(len(bins),dtype=int)
	Nu, Nd, E = measure_detvecs_bin(ft, bins, vecs, mask=None, weights=weights)
	# Apply white noise scaling
	Nu /= white_scale[:,None]

	if shared:
		res = prepare_sharedvecs(Nu, V, E, bins, srate, dets, vinds)
	else:
		# Expand V so we have one block of vectors per bin
		V = [V[i] for i in vinds]
		res = prepare_detvecs(Nu, V, E, bins, srate, dets)
	return res
Ejemplo n.º 10
0
def detvecs_jon(ft, srate, dets=None, shared=False, cut_bins=None, apodization=None, cut_unit="freq", verbose=False):
	"""Build a Detvecs noise matrix based on Jon's noise model.
	ft is the *normalized* fourier-transform of a TOD: ft = fft.rfft(d)/nsamp.
	srate is the sampling rate, dets is the list of detectors, shared specifies
	whether the Detvecs object should use the compressed "shared" layout or not",
	and cut_freq_ranges is a [nbin,{freq_from,freq_2}] array of frequencies
	to completely cut."""
	apodization = config.get("nmat_jon_apod", apodization) or None
	downweight  = config.get("nmat_jon_downweight")
	spike_suppression = config.get("nmat_spike_suppression")
	nfreq    = ft.shape[1]
	if cut_unit == "freq":
		cut_bins = freq2ind(cut_bins, srate, nfreq, rfun=np.round)
	# Construct our mode bins. Interestingly, we skip
	# the f < 0.25 Hz area.
	mbins = makebins([0.25, 4.0], srate, nfreq, 1000, rfun=np.round)[1:]
	amp_thresholds = config.get("nmat_jon_amp_threshold")
	amp_thresholds = extend_list([float(w) for w in amp_thresholds.split(",")], len(mbins))
	single_threshold = config.get("nmat_jon_single_threshold")
	# Ok, compute our modes, and then measure them in each bin.
	# When using apodization, the vecs are not necessarily orthogonal,
	# so don't rely on that.
	vecs, weights = find_modes_jon(ft, mbins, amp_thresholds, single_threshold, apodization=apodization, verbose=verbose)
	bin_edges = np.array([
			0.10, 0.25, 0.35, 0.45, 0.55, 0.70, 0.85, 1.00,
			1.20, 1.40, 1.70, 2.00, 2.40, 2.80, 3.40, 3.80,
			4.60, 5.00, 5.50, 6.00, 6.50, 7.00, 8.00, 9.00, 10.0, 11.0,
			12.0, 13.0, 14.0, 16.0, 18.0, 20.0, 22.0,
			24.0, 26.0, 28.0, 30.0, 32.0, 36.5, 41.0,
			45.0, 50.0, 55.0, 65.0, 70.0, 80.0, 90.0,
			100., 110., 120., 130., 140., 150., 160., 170.,
			180., 190.
		])
	# Cut bins that extend beyond our max frequency
	bin_edges = bin_edges[bin_edges < srate/2 * 0.99]
	bins = makebins(bin_edges, srate, nfreq, 2*vecs.shape[1], rfun=np.round)
	bins, iscut = add_cut_bins(bins, cut_bins)

	if downweight: white_scale = extend_list([1e-4, 0.25, 0.50, 1.00], len(bins))
	else: white_scale = [1]*len(bins)
	white_scale = np.asarray(white_scale)
	white_scale[iscut] *= spike_suppression

	if vecs.size == 0: raise errors.ModelError("Could not find any noise modes")
	# Sharedvecs supports different sets of vecs per bin. But we only
	# use a single group here. So every bin refers to the first group.
	V     = [vecs]
	vinds = np.zeros(len(bins),dtype=int)
	Nu, Nd, E = measure_detvecs_bin(ft, bins, vecs, mask=None, weights=weights)
	# Apply white noise scaling
	Nu /= white_scale[:,None]

	if shared:
		res = prepare_sharedvecs(Nu, V, E, bins, srate, dets, vinds)
	else:
		# Expand V so we have one block of vectors per bin
		V = [V[i] for i in vinds]
		res = prepare_detvecs(Nu, V, E, bins, srate, dets)
	return res
Ejemplo n.º 11
0
def parse_src_handling():
	res = {}
	res["mode"]   = config.get("src_handling")
	if res["mode"] == "none": return None
	res["amplim"] = config.get("src_handling_lim")
	res["srcs"]   = None
	srcfile = config.get("src_handling_list")
	if srcfile:
		res["srcs"] = pointsrcs.read(srcfile)
	return res
Ejemplo n.º 12
0
def parse_src_handling():
	res = {}
	res["mode"]   = config.get("src_handling")
	if res["mode"] == "none": return None
	res["amplim"] = config.get("src_handling_lim")
	res["srcs"]   = None
	srcfile = config.get("src_handling_list")
	if srcfile:
		res["srcs"] = pointsrcs.read(srcfile)
	return res
Ejemplo n.º 13
0
	def __init__(self, data, model=None, window=None):
		model  = config.get("noise_model", model)
		window = config.get("tod_window", window)*data.srate
		nmat.apply_window(data.tod, window)
		self.nmat = nmat_measure.NmatBuildDelayed(model, cut=data.cut_noiseest, spikes=data.spikes[:2].T)
		self.nmat = self.nmat.update(data.tod, data.srate)
		nmat.apply_window(data.tod, window, inverse=True)
		self.model, self.window = model, window
		self.ivar = self.nmat.ivar
		self.cut  = data.cut
Ejemplo n.º 14
0
    def __init__(self, scan, template, sys=None):
        sys = config.get("map_sys", sys)
        downsamp = config.get("pmat_moby_downsamp", 20)

        bore = scan.boresight.copy()
        bore[:, 0] += utils.mjd2ctime(scan.mjd0)
        opoint = get_moby_pointing(scan.entry,
                                   bore,
                                   scan.dets,
                                   downgrade=downsamp)
        # We will fit a polynomial to the pointing for each detector
        box = np.array([[np.min(a), np.max(a)] for a in scan.boresight.T]).T

        def scale(a, r):
            return 2 * (a - r[0]) / (r[1] - r[0]) - 1

        t = scale(scan.boresight[::downsamp, 0], box[:, 0])
        az = scale(scan.boresight[::downsamp, 1], box[:, 1])
        el = scale(scan.boresight[::downsamp, 2], box[:, 2])

        basis = np.array(
            [az**4, az**3, az**2, az**1, az**0, el**2, el, t**2, t, t * az]).T
        denom = basis.T.dot(basis)
        e, v = np.linalg.eigh(denom)
        if (np.min(e) < 1e-8 * np.max(e)):
            basis = np.concatenate([basis[:, :5], basis[:, 6:]], 1)
            denom = basis.T.dot(basis)
        # Convert from ra/dec to pixels, since we've confirmed that
        # our ra/dec is the same as ninkasi to 0.01".
        t1 = time.time()
        pix = template.sky2pix(opoint[1::-1], safe=True)
        t2 = time.time()
        #rafit  = np.linalg.solve(denom, basis.T.dot(opoint[0].T))
        #decfit = np.linalg.solve(denom, basis.T.dot(opoint[1].T))
        yfit = np.linalg.solve(denom, basis.T.dot(pix[0].T))
        xfit = np.linalg.solve(denom, basis.T.dot(pix[1].T))

        # Just use the same az and t as before for simplicity. The
        # coefficients will be different, but the result will be
        # the same.
        basis = np.array([az**0, az**1, az**2, az**3, t]).T
        denom = basis.T.dot(basis)

        cosfit = np.linalg.solve(denom, basis.T.dot(opoint[2].T))
        sinfit = np.linalg.solve(denom, basis.T.dot(opoint[3].T))

        # Parameters for pmat
        self.posfit = np.concatenate([yfit[:, :, None], xfit[:, :, None]], 2)
        self.polfit = np.concatenate([cosfit[:, :, None], sinfit[:, :, None]],
                                     2)
        self.box = box
        self.pixbox = np.array([[0, 0], template.shape[-2:]])
        self.scan = scan
        self.dtype = template.dtype
        self.core = get_core(self.dtype)
Ejemplo n.º 15
0
def detvecs_simple(fourier,
                   srate,
                   dets=None,
                   type=None,
                   nbin=None,
                   nmin=None,
                   vecs=None,
                   eigs=None):
    nfreq = fourier.shape[1]
    ndet = fourier.shape[0]
    type = config.get("nmat_uncorr_type", type)
    nbin = config.get("nmat_uncorr_nbin", nbin)
    nmin = config.get("nmat_uncorr_nmin", nmin)
    whiten = config.get("nmat_uncorr_whiten")

    if type is "exp":
        bins = utils.expbin(nfreq, nbin=nbin, nmin=nmin)
    elif type is "lin":
        bins = utils.linbin(nfreq, nbin=nbin, nmin=nmin)
    else:
        raise ValueError("No such power binning type '%s'" % type)
    nbin = bins.shape[0]  # expbin may not provide exactly what we want

    if vecs is None: vecs = np.full([ndet, 0], 1)
    # Initialize our noise vectors with default values
    vecs = np.asarray(vecs)
    nvec = vecs.shape[-1]
    Nu = np.zeros([nbin, ndet])
    E = np.full([nbin, nvec], 1e-10)
    V = [vecs]
    vinds = np.zeros(nbin, dtype=int)
    for bi, b in enumerate(bins):
        d = fourier[:, b[0]:b[1]]
        if vecs.size > 0:
            # Measure amps when we have non-orthogonal vecs
            rhs = vecs.T.dot(d)
            div = vecs.T.dot(vecs)
            amps = np.linalg.solve(div, rhs)
            E[bi] = np.mean(np.abs(amps)**2, 1)
            # Project out modes for every frequency individually
            d -= vecs.dot(amps)
        Nu[bi] = measure_power(d)
    if whiten != 1:
        white = np.percentile(Nu, 25, 0)
        Nu = np.maximum((Nu - white) / whiten + white, white)
        #Nu    = (Nu/white)**whiten * white
    # Override eigenvalues if necessary. This is useful
    # for e.g. forcing total common mode subtraction.
    # eigs must be broadcastable to [nbin,ndet]
    if eigs is not None: E[:] = eigs
    #return prepare_detvecs(Nd, V, E, bins, srate, dets)
    return prepare_sharedvecs(Nu, V, E, bins, srate, dets, vinds)
Ejemplo n.º 16
0
Archivo: pmat.py Proyecto: jit9/enlib
 def __init__(self, scan, template, sys=None, order=None):
     sys = config.get("map_sys", sys)
     transform = pos2pix(scan, template, sys)
     ipol, obox = build_interpol(transform, scan.box, id=scan.entry.id)
     self.rbox, self.nbox, self.yvals = extract_interpol_params(
         ipol, template.dtype)
     # Use obox to extract a pixel bounding box for this scan.
     # These are the only pixels pmat needs to concern itself with.
     # Reducing the number of pixels makes us more memory efficient
     self.pixbox, self.nphi = build_pixbox(obox[:, :2], template)
     self.scan, self.dtype = scan, template.dtype
     self.core = get_core(self.dtype)
     self.order = config.get("pmat_map_order", order)
Ejemplo n.º 17
0
def cut_mostly_cut_detectors(cuts, max_frac=None, max_nrange=None):
    """Mark detectors with too many cuts or too large cuts as completely cut."""
    max_frac = config.get("cut_mostly_cut_frac", max_frac)
    max_nrange = config.get("cut_mostly_cut_nrange", max_nrange)
    cut_samps = cuts.sum(axis=1)
    cut_nrange = cuts.nranges
    bad = (cut_samps > cuts.nsamp * max_frac)
    if max_nrange > 0:
        bad |= cut_nrange > max_nrange
    ocuts = []
    for b in bad:
        if b: ocuts.append(sampcut.full(1, cuts.nsamp))
        else: ocuts.append(sampcut.empty(1, cuts.nsamp))
    return sampcut.stack(ocuts)
Ejemplo n.º 18
0
def cut_mostly_cut_detectors(cuts, max_frac=None, max_nrange=None):
	"""Mark detectors with too many cuts or too large cuts as completely cut."""
	max_frac   = config.get("cut_mostly_cut_frac",   max_frac)
	max_nrange = config.get("cut_mostly_cut_nrange", max_nrange)
	cut_samps  = cuts.sum(axis=1)
	cut_nrange = cuts.nranges
	bad = (cut_samps > cuts.nsamp*max_frac)
	if max_nrange > 0:
		bad |= cut_nrange > max_nrange
	ocuts = []
	for b in bad:
		if b: ocuts.append(sampcut.full(1,cuts.nsamp))
		else: ocuts.append(sampcut.empty(1,cuts.nsamp))
	return sampcut.stack(ocuts)
Ejemplo n.º 19
0
def ground_cut(bore, det_offs, az_ranges=None, el_ranges=None):
	az_ranges = np.array([[float(w) for w in tok.split(":")] for tok in config.get("cut_ground_az", az_ranges).split(",")])*np.pi/180
	el_ranges = np.array([[float(w) for w in tok.split(":")] for tok in config.get("cut_ground_el", el_ranges).split(",")])*np.pi/180
	n = bore.shape[1]
	cuts = []
	for di, doff in enumerate(det_offs):
		p = bore[1:]+doff[:,None]
		mask_az = np.full([n],False,dtype=bool)
		for ar in az_ranges:
			mask_az |= utils.between_angles(p[0], ar)
		mask_el = np.full([n],False,dtype=bool)
		for er in el_ranges:
			mask_el |= utils.between_angles(p[1], er)
		cuts.append(sampcut.from_mask(mask_az&mask_el))
	return sampcut.stack(cuts)
Ejemplo n.º 20
0
def ground_cut(bore, det_offs, az_ranges=None, el_ranges=None):
	az_ranges = np.array([[float(w) for w in tok.split(":")] for tok in config.get("cut_ground_az", az_ranges).split(",")])*np.pi/180
	el_ranges = np.array([[float(w) for w in tok.split(":")] for tok in config.get("cut_ground_el", el_ranges).split(",")])*np.pi/180
	n = bore.shape[1]
	cuts = []
	for di, doff in enumerate(det_offs):
		p = bore[1:]+doff[:,None]
		mask_az = np.full([n],False,dtype=bool)
		for ar in az_ranges:
			mask_az |= utils.between_angles(p[0], ar)
		mask_el = np.full([n],False,dtype=bool)
		for er in el_ranges:
			mask_el |= utils.between_angles(p[1], er)
		cuts.append(sampcut.from_mask(mask_az&mask_el))
	return sampcut.stack(cuts)
Ejemplo n.º 21
0
def setup_filedb():
	"""Create a default filedb based on the root, dataset and filedb config
	variables. The result will be either a FormatDB or ExecDB based on the
	format of the fildb file."""
	override= config.get("file_override")
	if override is "none": override = None
	return execdb.ExecDB(cjoin(["root","dataset","filedb"]), cjoin(["root","filevars"]), override=override, root=cjoin(["root"]))
Ejemplo n.º 22
0
def setup_filedb():
	"""Create a default filedb based on the root, dataset and filedb config
	variables. The result will be either a FormatDB or ExecDB based on the
	format of the fildb file."""
	override= config.get("file_override")
	if override is "none": override = None
	return execdb.ExecDB(cjoin(["root","dataset","filedb"]), cjoin(["root","filevars"]), override=override, root=cjoin(["root"]))
Ejemplo n.º 23
0
 def __init__(self, scan, params=None):
     params = config.get("pmat_cut_type", params)
     # Extract the cut parameters. E.g. poly:foo_secs -> [4,foo_samps]
     par = np.array(self.parse_params(params, scan.srate))
     # Meaning of cuts array: [:,{dets,offset,length,out_length,type,args..}]
     self.cuts = np.zeros([scan.cut.nrange, 5 + len(par)], dtype=np.int32)
     # Detector each cut belongs to
     self.cuts[:, 0] = np.concatenate([
         np.full(nr, i, np.int32) for i, nr in enumerate(scan.cut.nranges)
     ])
     # Start of each cut
     self.cuts[:, 1] = scan.cut.ranges[:, 0]
     # Length of each cut
     self.cuts[:, 2] = scan.cut.ranges[:, 1] - scan.cut.ranges[:, 0]
     # Set up the parameter arguments
     self.cuts[:, 5:] = par[None, :]
     assert np.all(
         self.cuts[:,
                   2] > 0), "Empty cut range detected in %s" % scan.entry.id
     assert np.all(self.cuts[:, 1] >= 0) and np.all(
         scan.cut.ranges[:, 1] <= scan.nsamp
     ), "Out of bounds cut range detected in %s" % scan.entry.id
     if self.cuts.size > 0:
         get_core(np.float32).measure_cuts(self.cuts.T)
     self.cuts[:, 3] = utils.cumsum(self.cuts[:, 4])
     # njunk is the number of cut parameters for *this scan*
     self.njunk = np.sum(self.cuts[:, 4])
     self.params = params
     self.scan = scan
Ejemplo n.º 24
0
    def __init__(self,
                 signal,
                 signal_cut,
                 scans,
                 weights,
                 noise=True,
                 hits=True):
        """Binned preconditioner: (P'W"P)", where W" is a white
		nosie approximation of N". If noise=False, instead computes
		(P'P)". If hits=True, also computes a hitcount map."""
        ncomp = signal.area.shape[0]
        self.div = enmap.zeros((ncomp, ) + signal.area.shape, signal.area.wcs,
                               signal.area.dtype)
        calc_div_map(self.div, signal, signal_cut, scans, weights, noise=noise)
        self.idiv = array_ops.svdpow(self.div,
                                     -1,
                                     axes=[0, 1],
                                     lim=config.get("eig_limit"))
        #self.idiv[:] = np.eye(3)[:,:,None,None]
        if hits:
            # Build hitcount map too
            self.hits = signal.area.copy()
            self.hits = calc_hits_map(self.hits, signal, signal_cut, scans)
        else:
            self.hits = None
        self.signal = signal
Ejemplo n.º 25
0
    def __init__(self,
                 signal,
                 signal_cut,
                 scans,
                 weights,
                 noise=True,
                 hits=True):
        """Binned preconditioner: (P'W"P)", where W" is a white
		nosie approximation of N". If noise=False, instead computes
		(P'P)". If hits=True, also computes a hitcount map."""
        geom = signal.area.geometry.copy()
        geom.pre = (signal.area.shape[0], ) + geom.pre
        self.div = dmap.zeros(geom)
        calc_div_map(self.div, signal, signal_cut, scans, weights, noise=noise)
        self.idiv = self.div.copy()
        for dtile in self.idiv.tiles:
            dtile[:] = array_ops.svdpow(dtile,
                                        -1,
                                        axes=[0, 1],
                                        lim=config.get("eig_limit"))
        if hits:
            # Build hitcount map too
            self.hits = signal.area.copy()
            self.hits = calc_hits_map(self.hits, signal, signal_cut, scans)
        else:
            self.hits = None
        self.signal = signal
Ejemplo n.º 26
0
def gapfill_linear(arr, ranges, inplace=False, overlap=None):
    """Returns arr with the ranges given by ranges, which can be [:,{from,to}] or
	a Rangelist, filled using linear interpolation."""
    ranges = Rangelist(ranges, len(arr), copy=False)
    overlap = config.get("gapfill_context", overlap)
    if not inplace: arr = np.array(arr)
    nr = len(ranges.ranges)
    n = ranges.n
    for i, (r1, r2) in enumerate(ranges.ranges):
        left = max(0 if i == 0 else ranges.ranges[i - 1, 1], r1 - overlap)
        right = min(n if i == nr - 1 else ranges.ranges[i + 1, 0],
                    r2 + overlap)
        # If the cut coveres the whole array, fill with 0
        if r1 == 0 and r2 == len(arr):
            arr[r1:r2] = 0
        # If it goes all the way to one end, use the value from one side
        elif r1 == 0:
            arr[r1:r2] = np.mean(arr[r2:right])
        elif r2 == len(arr):
            arr[r1:r2] = np.mean(arr[left:r1])
        # Otherwise use linear interpolation
        else:
            arr[r1:r2] = np.linspace(np.mean(arr[left:r1]),
                                     np.mean(arr[r2:right]),
                                     r2 - r1 + 1,
                                     endpoint=False)[1:]
    return arr
Ejemplo n.º 27
0
 def __init__(self, scan, model=None, window=None, filter=None):
     model  = config.get("noise_model", model)
     window = config.get("tod_window", window)*scan.srate
     nmat.apply_window(scan.tod, window)
     self.nmat = nmat_measure.NmatBuildDelayed(model, cut=scan.cut_noiseest, spikes=scan.spikes)
     self.nmat = self.nmat.update(scan.tod, scan.srate)
     nmat.apply_window(scan.tod, window, inverse=True)
     self.model, self.window = model, window
     self.ivar = self.nmat.ivar
     self.cut  = scan.cut
     # Optional extra filter
     if filter:
         freq = fft.rfftfreq(scan.nsamp, 1/scan.srate)
         fknee, alpha = filter
         with utils.nowarn():
             self.filter = (1 + (freq/fknee)**-alpha)**-1
     else: self.filter = None
Ejemplo n.º 28
0
def turnaround_cut(az, margin=None):
    margin = config.get("cut_turnaround_margin", margin) * utils.degree
    # Use percentile just in case there's some outliers (for example a scan that's a bit
    # higher than the others.
    az1 = np.percentile(az, 0.1)
    az2 = np.percentile(az, 99.9)
    mask = (az < az1) | (az > az2)
    cut = sampcut.from_mask(mask)
    return cut
Ejemplo n.º 29
0
def turnaround_cut(az, margin=None):
	margin = config.get("cut_turnaround_margin", margin)*utils.degree
	# Use percentile just in case there's some outliers (for example a scan that's a bit
	# higher than the others.
	az1    = np.percentile(az,  0.1)
	az2    = np.percentile(az, 99.9)
	mask   = (az<az1)|(az>az2)
	cut    = sampcut.from_mask(mask)
	return cut
Ejemplo n.º 30
0
def stationary_cut(az, tol=None):
    """Cut samples where the telescope isn't moving at the beginning
	and end of the tod."""
    tol = config.get("cut_stationary_tol", tol) * utils.degree
    b1 = np.where(np.abs(az - az[0]) > tol)[0]
    b2 = np.where(np.abs(az - az[-1]) > tol)[0]
    if len(b1) == 0 or len(b2) == 0:
        # Entire tod cut!
        return sampcut.full(1, len(az))
    else:
        return sampcut.from_list([[[0, b1[0]], [b2[-1], len(az)]]], len(az))
Ejemplo n.º 31
0
def crop_fftlen(data, factors=None):
	"""Slightly crop samples in order to make ffts faster. This should
	be called at a point when the length won't be futher cropped by other
	effects."""
	if data.nsamp in [0, None]: raise errors.DataMissing("nsamp")
	if data.nsamp < 0: raise errors.DataMissing("nsamp")
	factors = config.get("fft_factors", factors)
	if isinstance(factors, basestring): factors = [int(w) for w in factors.split(",")]
	ncrop = fft.fft_len(data.nsamp, factors=factors)
	data += dataset.DataField("fftlen", samples=[data.samples[0],data.samples[0]+ncrop])
	return data
Ejemplo n.º 32
0
def detvecs_scaled(ft, srate, dets=None, bsize=None, lim=None, lim2=None, spikecut=None):
	bsize = config.get("nmat_scaled_bsize", bsize)
	lim   = config.get("nmat_scaled_spike", lim)
	lim2  = config.get("nmat_scaled_smooth", lim2)
	spikecut = config.get("nmat_scaled_spikecut", spikecut)
	# First set up our sample points
	mps   = calc_mean_ps(ft)
	bins, bins_spike = build_spec_bins(mps, bsize=bsize, lim=lim, lim2=lim2)
	# Measure mean power per bin
	brms  = np.zeros(ft.shape[:-1]+(len(bins),), mps.dtype)
	for bi, b in enumerate(bins):
		brms[:,bi] = np.mean(np.abs(ft[:,b[0]:b[1]])**2)**0.5
		ft[:,b[0]:b[1]] /= brms[:,bi,None]
	# Build interior noise model
	if not spikecut: sbins_spike = None
	nmat_inner = detvecs_jon(ft, srate=srate, dets=dets, cut_bins=bins_spike, cut_unit="inds")
	# Undo scaling of ft in case caller still needs it
	for bi, b in enumerate(bins):
		ft[:,b[0]:b[1]] *= brms[:,bi,None]
	return nmat.NmatScaled(brms, bins, nmat_inner)
Ejemplo n.º 33
0
def stationary_cut(az, tol=None):
	"""Cut samples where the telescope isn't moving at the beginning
	and end of the tod."""
	tol = config.get("cut_stationary_tol", tol)*utils.degree
	b1 = np.where(np.abs(az-az[0])>tol)[0]
	b2 = np.where(np.abs(az-az[-1])>tol)[0]
	if len(b1) == 0 or len(b2) == 0:
		# Entire tod cut!
		return sampcut.full(1,len(az))
	else:
		return sampcut.from_list([[[0,b1[0]],[b2[-1],len(az)]]],len(az))
Ejemplo n.º 34
0
def setup_params(category, predefined, defparams):
    """Set up parameters of a given category. With the following
	convention.
	 1. If a name matches a default's name, unspecified properties use the default's
	 2. A name can be specified multiple times, e.g. -S sky:foo -S sky:bar. These
	    do not override each other - each will be used separately.
	 2. If a name that has a default is not specified manually, then a single
	    instance of that name is instantiated, with the default parameters."""
    params = []
    argdict = vars(args)
    overrides = argdict[category]
    counts = {}
    if overrides:
        for oval in overrides:
            m = re.match(r'([^,:]+):(.*)', oval)
            if m:
                name, rest = m.groups()
                desc = config.get("%s_%s_default" %
                                  (category, name)) + ",use=yes," + rest
            elif "," not in oval:
                desc = config.get("%s_%s_default" %
                                  (category, oval)) + ",use=yes"
            else:
                desc = "use=yes," + oval
            param = parse_desc(desc, default=defparams)
            name = param["name"]
            if name in counts: counts[name] += 1
            else: counts[name] = 1
            param["i"] = counts[name]
            params.append(param)
    # For each predefined param, add it only if none of that name already exist
    defaults = []
    for p in predefined:
        if not p in counts:
            defaults.append(
                parse_desc(config.get("%s_%s_default" % (category, p))))
            defaults[-1]["i"] = 0
    params = defaults + params
    # Kill irrelevant parameters (those not in use)
    params = [p for p in params if p["use"] != "no"]
    return params
Ejemplo n.º 35
0
Archivo: scan.py Proyecto: Nium14/enlib
	def get_samples(self):
		"""Return the actual detector samples. Slow! Data is read from disk,
		so store the result if you need to reuse it."""
		with h5py.File(self.fname, "r") as hfile:
			tod = hfile["tod"].value[self.subdets]
		method = config.get("downsample_method")
		for s in self.sampslices:
			tod = resample.resample(tod, 1.0/np.abs(s.step or 1), method=method)
			s = slice(s.start, s.stop, np.sign(s.step) if s.step else None)
			tod = tod[:,s]
		res = np.ascontiguousarray(tod)
		return res
Ejemplo n.º 36
0
    def get_samples(self, verbose=False):
        """Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
        # Because we've read the tod_shape field earlier, we know that reading tod
        # won't cause any additional truncation of the samples or detectors.
        # tags is only needed here for read_combo support, but that is mostly broken
        # anyway.
        t1 = time.time()
        self.d += actdata.read(self.entry,
                               fields=["tod", "tags"],
                               dets=self.d.dets)
        t2 = time.time()
        if verbose: print("read  %-14s in %6.3f s" % ("tod", t2 - t1))
        if config.get("tod_skip_deconv"): ops = ["tod_real"]
        else: ops = ["tod"]
        actdata.calibrate(self.d, operations=ops, verbose=verbose)
        tod = self.d.tod
        # Remove tod from our local d, so we won't end up hauling it around forever
        del self.d.tod
        # HWP resample if needed
        if self.mapping is not None:
            tod = np.ascontiguousarray(
                utils.interpol(tod,
                               self.mapping.oimap[None],
                               order=1,
                               mask_nan=False))
        method = config.get("downsample_method")
        for s in self.sampslices:
            srange = slice(s.start, s.stop,
                           np.sign(s.step) if s.step else None)
            tod = tod[:, srange]
            # make sure we get exactly the same length the cuts will be expecting
            step = np.abs(s.step or 1)
            olen = (tod.shape[1] + step - 1) // step
            tod = resample.resample(tod,
                                    float(olen) / tod.shape[1],
                                    method=method)
        tod = np.ascontiguousarray(tod)
        return tod
Ejemplo n.º 37
0
def detvecs_simple(fourier, srate, dets=None, type=None, nbin=None, nmin=None, vecs=None, eigs=None):
	nfreq = fourier.shape[1]
	ndet  = fourier.shape[0]
	type  = config.get("nmat_uncorr_type", type)
	nbin  = config.get("nmat_uncorr_nbin", nbin)
	nmin  = config.get("nmat_uncorr_nmin", nmin)

	if type is "exp":
		bins = utils.expbin(nfreq, nbin=nbin, nmin=nmin)
	elif type is "lin":
		bins = utils.linbin(nfreq, nbin=nbin, nmin=nmin)
	else: raise ValueError("No such power binning type '%s'" % type)
	nbin  = bins.shape[0] # expbin may not provide exactly what we want

	if vecs is None: vecs = np.full([ndet,0],1)
	# Initialize our noise vectors with default values
	vecs = np.asarray(vecs)
	nvec = vecs.shape[-1]
	Nu    = np.zeros([nbin,ndet])
	E     = np.full([nbin,nvec],1e-10)
	V     = [vecs]
	vinds = np.zeros(nbin,dtype=int)
	for bi, b in enumerate(bins):
		d     = fourier[:,b[0]:b[1]]
		if vecs.size > 0:
			# Measure amps when we have non-orthogonal vecs
			rhs  = vecs.T.dot(d)
			div  = vecs.T.dot(vecs)
			amps = np.linalg.solve(div,rhs)
			E[bi] = np.mean(np.abs(amps)**2,1)
			# Project out modes for every frequency individually
			d -= vecs.dot(amps)
		Nu[bi] = measure_power(d)
	# Override eigenvalues if necessary. This is useful
	# for e.g. forcing total common mode subtraction.
	# eigs must be broadcastable to [nbin,ndet]
	if eigs is not None: E[:] = eigs
	#return prepare_detvecs(Nd, V, E, bins, srate, dets)
	return prepare_sharedvecs(Nu, V, E, bins, srate, dets, vinds)
Ejemplo n.º 38
0
def read_hwp(entry):
	dummy = dataset.DataSet([
		dataset.DataField("hwp", 0),
		dataset.DataField("hwp_id", "none"),
		dataset.DataField("hwp_source", "none")])
	epochs = try_read(files.read_hwp_epochs, "hwp_epochs", entry.hwp_epochs)
	t, _, ar = entry.id.split(".")
	t = float(t)
	if ar not in epochs: return dummy
	for epoch in epochs[ar]:
		if t >= epoch[0] and t < epoch[1]:
			# Ok, the HWP was active during this period. Try to read it. It can be in
			# several different formats.
			if   entry.hwp_format == "tod":
				# HWP angles in the tod, in the new, high-quality format
				hwp, flags = try_read(files.read_hwp_angle, "hwp_tod_angles", entry.tod)
				return dataset.DataSet([
					dataset.DataField("hwp", hwp, samples=[0, hwp.size], sample_index=0),
					dataset.DataField("hwp_id", epoch[2]),
					dataset.DataField("hwp_source", "tod")])
			elif entry.hwp_format == "raw":
				# HWP angles in the tod, in the old, inaccurate format
				hwp = try_read(files.read_hwp_raw, "hwp_raw_angles", entry.tod)
				return dataset.DataSet([
					dataset.DataField("hwp", hwp, samples=[0, hwp.size], sample_index=0),
					dataset.DataField("hwp_id", epoch[2]),
					dataset.DataField("hwp_source", "raw")])
			elif entry.hwp_format == "external":
				# HWP angles in external data files
				try:
					status = try_read(files.read_hwp_status, "hwp_status", entry.hwp_status)
				except errors.DataMissing as e:
					status = None
				# If there weren't any external files, possibly fall back to raw angles
				if status is None or get_dict_wild(status, entry.id, 0) != 1:
					if config.get("hwp_fallback") == "raw":
						hwp = try_read(files.read_hwp_raw, "hwp_raw_angles", entry.tod)
						return dataset.DataSet([
							dataset.DataField("hwp", hwp, samples=[0, hwp.size], sample_index=0),
							dataset.DataField("hwp_id", epoch[2]),
							dataset.DataField("hwp_source", "raw")])
					else:
						raise e if status is None else errors.DataMissing("Missing HWP angles!")
				# Try to read the angles themselves
				hwp = try_read(files.read_hwp_cleaned, "hwp_angles", entry.hwp)
				return dataset.DataSet([
					dataset.DataField("hwp", hwp, samples=[0,hwp.size], sample_index=0),
					dataset.DataField("hwp_id", epoch[2]),
					dataset.DataField("hwp_source","cleaned")])
	# Not in any epoch, so return 0 hwp angle (which effectively turns it off)
	return dummy
Ejemplo n.º 39
0
def setup_params(category, predefined, defparams):
	"""Set up parameters of a given category. With the following
	convention.
	 1. If a name matches a default's name, unspecified properties use the default's
	 2. A name can be specified multiple times, e.g. -S sky:foo -S sky:bar. These
	    do not override each other - each will be used separately.
	 2. If a name that has a default is not specified manually, then a single
	    instance of that name is instantiated, with the default parameters."""
	params  = []
	argdict = vars(args)
	overrides  = argdict[category]
	counts = {}
	if overrides:
		for oval in overrides:
			m = re.match(r'([^,:]+):(.*)', oval)
			if m:
				name, rest = m.groups()
				desc = config.get("%s_%s_default" % (category,name)) + ",use=yes," + rest
			elif "," not in oval:
				desc = config.get("%s_%s_default" % (category,oval)) + ",use=yes"
			else:
				desc = "use=yes,"+oval
			param = parse_desc(desc, default=defparams)
			name = param["name"]
			if name in counts: counts[name] += 1
			else: counts[name] = 1
			param["i"] = counts[name]
			params.append(param)
	# For each predefined param, add it only if none of that name already exist
	defaults = []
	for p in predefined:
		if not p in counts:
			defaults.append(parse_desc(config.get("%s_%s_default" % (category, p))))
			defaults[-1]["i"] = 0
	params = defaults + params
	# Kill irrelevant parameters (those not in use)
	params = [p for p in params if p["use"] != "no"]
	return params
Ejemplo n.º 40
0
 def __init__(self, scan, template, sys=None, order=None):
     self.sys = config.get("map_sys", sys)
     # Build the pointing interpolator
     self.trans = pos2pix(scan, template, self.sys)
     self.poly = PolyInterpol(self.trans, scan.boresight, scan.offsets)
     # Build the pixel shift information. This assumes ces-like scans in equ-like systems
     self.sdir = get_scan_dir(scan.boresight[:, 1])
     self.period = get_scan_period(scan.boresight[:, 1], scan.srate)
     self.wbox, self.wshift = build_work_shift(self.trans, scan.box,
                                               self.period)
     self.nphi = int(np.round(np.abs(360. / template.wcs.wcs.cdelt[0])))
     self.dtype = template.dtype
     self.core = get_core(self.dtype)
     self.scan = scan
     self.order = 0
Ejemplo n.º 41
0
Archivo: pmat.py Proyecto: Nium14/enlib
    def __call__(self, ipos):
        """Transform ipos[{t,az,el},nsamp] into opix[{y,x,c,s},nsamp]."""
        shape = ipos.shape[1:]
        ipos = ipos.reshape(ipos.shape[0], -1)
        time = self.scan.mjd0 + ipos[0] / utils.day2sec
        # We support sidelobe mapping by passing the detector pointing "ipos" as the "boresight"
        # pointing, which is only used in boresight-relative coordinates or sidelobe mapping.
        # This actually results in a better coordinate system than if we had passed in the actual
        # boresight, since we don't really want boresight-centered coordinates, we want detector
        # centered coordinates.
        opos = coordinates.transform(self.scan.sys,
                                     self.sys,
                                     ipos[1:],
                                     time=time,
                                     site=self.scan.site,
                                     pol=True,
                                     bore=ipos[1:])
        # Parallax correction
        sundist = config.get("pmat_parallax_au")
        if sundist:
            # Transform to a sun-centered coordinate system, assuming all objects
            # are at a distance of sundist from the sun
            opos[1::-1] = parallax.earth2sun(opos[1::-1], self.scan.mjd0,
                                             sundist)

        opix = np.zeros((4, ) + ipos.shape[1:])
        if self.template is not None:
            opix[:2] = self.template.sky2pix(opos[1::-1], safe=2)
            # When mapping the full sky, angle wraps can't be hidden
            # ouside the image. We must therefore unwind along each
            # interpolation axis to avoid discontinuous interpolation
            nx = int(np.round(np.abs(360 / self.template.wcs.wcs.cdelt[0])))
            opix[1] = utils.unwind(opix[1].reshape(shape),
                                   period=nx,
                                   axes=range(len(shape))).reshape(-1)
            # Prefer positive numbers
            opix[1] -= np.floor(opix[1].reshape(-1)[0] / nx) * nx
            # but not if they put everything outside our patch
            if np.min(opix[1]) > self.template.shape[-1]:
                opix[1] -= nx
        else:
            # If we have no template, output angles instead of pixels.
            # Make sure the angles don't have any jumps in them
            opix[:2] = opos[1::-1]  # output order is dec,ra
            opix[1] = utils.rewind(opix[1], self.ref_phi)
        opix[2] = np.cos(2 * opos[2])
        opix[3] = np.sin(2 * opos[2])
        return opix.reshape((opix.shape[0], ) + shape)
Ejemplo n.º 42
0
parser.add_argument("-n", "--nstep", type=int, default=50)
parser.add_argument("-m", "--method",type=str, default="messenger")
parser.add_argument(      "--ndet",  type=int, default=None)
parser.add_argument("-p", "--precompute", action="store_true")
parser.add_argument("-o", "--ostep", type=int, default=10)
args = parser.parse_args()

utils.mkdir(args.odir)
comm   = mpi.COMM_WORLD
dtype  = np.float32
ncomp  = 3
area   = enmap.read_map(args.area)
area   = enmap.zeros((ncomp,)+area.shape[-2:],area.wcs,dtype)
Tscale = 0.9
nstep  = args.nstep
downsample = config.get("downsample")

filedb.init()
ids   = filedb.scans[args.sel]
# Was 1e7
cooldown = sum([[10**j]*5 for j in range(6,0,-1)],[])+[1]

# Read my scans
njunk_tot = 0
cg_rhs    = area*0
cg_rjunk  = []
if args.precompute:
	prec_NNmap  = {lam: area*0 for lam in np.unique(cooldown)}
	prec_NNjunk = {lam: [] for lam in np.unique(cooldown)}
scans = []
for ind in range(comm.rank, len(ids), comm.size):
Ejemplo n.º 43
0
parser.add_argument("--ntest", type=int, default=1000)
parser.add_argument("--interpolator", type=str, default="grad")
args = parser.parse_args()

# Hardcode an arbitrary site
site = bunch.Bunch(
	lat  = -22.9585,
	lon  = -67.7876,
	alt  = 5188.,
	T    = 273.15,
	P    = 550.,
	hum  = 0.2,
	freq = 150.,
	lapse= 0.0065)

acc = config.get("pmat_accuracy")
max_size = config.get("pmat_interpol_max_size")
max_time = config.get("pmat_interpol_max_time")

nel = int(np.rint((args.el2-args.el1)/args.delta_el))+1
naz = int(np.rint((args.az2-args.az1)/args.daz))+1

def hor2cel(hor):
	shape = hor.shape[1:]
	hor = hor.reshape(hor.shape[0],-1)
	tmp = coordinates.transform("hor", "cel", hor[1:], time=hor[0]+t_mid, site=site, pol=True)
	res = np.zeros((4,)+tmp.shape[1:])
	res[0] = utils.rewind(tmp[0], tmp[0,0])
	res[1] = tmp[1]
	res[2] = np.cos(2*tmp[2])
	res[3] = np.sin(2*tmp[2])
Ejemplo n.º 44
0
config.default(
    "map_sys", "equ",
    "The coordinate system of the maps. Can be eg. 'hor', 'equ' or 'gal'.")
config.default("map_dist", False, "Whether to use distributed maps")

parser = config.ArgumentParser()
parser.add_argument("sel", help="TOD selction query")
parser.add_argument("area", help="Geometry to map")
parser.add_argument("odir", help="Output directory")
parser.add_argument("prefix", nargs="?", help="Output file name prefix")
parser.add_argument("--dets", type=str, default=0, help="Detector slice")
args = parser.parse_args()

utils.mkdir(args.odir)
comm = mpi.COMM_WORLD
dtype = np.float32 if config.get("map_bits") == 32 else np.float64
ncomp = 3
tsize = 720
root = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
down = config.get("downsample")
# Set up logging
utils.mkdir(root + ".log")
logfile = root + ".log/log%03d.txt" % comm.rank
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, file=logfile, rank=comm.rank, shared=True)
# Set up our geometry
shape, wcs = enmap.read_map_geometry(args.area)
shape = (ncomp, ) + shape[-2:]
msys = config.get("map_sys")
dist = config.get("map_dist")
# Filter parameters
Ejemplo n.º 45
0
from enact import actscan, nmat_measure, filedb, todinfo

config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("srcs")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("--nmax", type=int, default=10)
parser.add_argument("-s", "--src", type=int, default=None, help="Only analyze given source")
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
tcomm = mpi.COMM_SELF
nmax  = args.nmax
ncomp = 3
isys  = "hor"

utils.mkdir(args.odir)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank, shared=False)

# Get the source positions
srcs = np.loadtxt(args.srcs).T

# Read our area. Should be centered on 0,0
area = enmap.read_map(args.area)
Ejemplo n.º 46
0
	fields = ["gain","tconst","cut","tod","boresight"]
	if args.fields: fields = args.fields.split(",")
	d = actdata.read(entry, fields=fields)
	if absdets: d.restrict(dets=absdets)
	if subdets: d.restrict(dets=d.dets[subdets])
	if args.calib: d = actdata.calibrate(d, exclude=["autocut"])
	elif args.manual_calib:
		ops = args.manual_calib.split(",")
		if "safe" in ops: d.boresight[1:] = utils.unwind(d.boresight[1:], period=360)
		if "rad" in ops: d.boresight[1:] *= np.pi/180
		if "bgap" in ops:
			bad = (d.flags!=0)*(d.flags!=0x10)
			for b in d.boresight: gapfill.gapfill_linear(b, bad, inplace=True)
		if "gain" in ops: d.tod *= d.gain[:,None]
		if "tgap" in ops: 
			gapfiller = {"copy":gapfill.gapfill_copy, "linear":gapfill.gapfill_linear}[config.get("gapfill")]
			gapfiller(d.tod, d.cut, inplace=True)
		if "slope" in ops:
			utils.deslope(d.tod, w=8, inplace=True)
		if "deconv" in ops:
			d = actdata.calibrate(d, operations=["tod_fourier"])
	if args.bin > 1:
		d.tod = resample.downsample_bin(d.tod, steps=[args.bin])
		d.boresight = resample.downsample_bin(d.boresight, steps=[args.bin])
		d.flags = resample.downsample_bin(d.flags, steps=[args.bin])
	oname = args.ofile
	if len(ids) > 1: oname = "%s/%s.hdf" % (args.ofile, id)
	with h5py.File(oname, "w") as hfile:
		if "tod" in d: hfile["tod"] = d.tod
		if "boresight" in d:
			hfile["az"]  = d.boresight[1]
Ejemplo n.º 47
0
def calibrate_cut(data, n=None):
	n = [int(w) for w in config.get("pad_cuts", n).split(":")]
	for name in ["cut","cut_basic","cut_noiseest","cut_quality"]:
		if name in data:
			data[name] = data[name].widen(n)
	return data
Ejemplo n.º 48
0
import numpy as np, argparse, os, sys
from enlib import enmap, pmat, config
from enact import filedb, data
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("id")
parser.add_argument("area")
parser.add_argument("--di", type=int, default=0, help="Index into array of accepted detectors to use.")
args = parser.parse_args()
dtype = np.float64

eqsys = config.get("map_eqsys")

area  = enmap.read_map(args.area).astype(dtype)
area  = enmap.zeros((3,)+area.shape[-2:], area.wcs, dtype)
entry = filedb.data[args.id]

# First get the raw samples
d        = data.read(entry, subdets=[args.di])
raw_tod  = d.tod[0,d.sample_offset:d.cutafter].copy()
raw_bore = d.boresight[:,d.sample_offset:d.cutafter].T
# Then some calibrated samples
d        = data.calibrate(d)
cal_tod  = d.tod[0]
cal_bore = d.boresight.T
# Apply fourier-truncation to raw data
raw_tod = raw_tod[:cal_tod.shape[0]]
raw_bore = raw_bore[:cal_bore.shape[0]]

# And a proper ACTScan
scan = data.ACTScan(entry, subdets=[args.di])
# Detector pointing
Ejemplo n.º 49
0
config.default("filedb", "filedb.txt", "File describing the location of the TOD and their metadata")
config.default("map_bits", 32, "Bit-depth to use for maps and TOD")
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("srcs")
parser.add_argument("odir")
parser.add_argument("--ncomp",      type=int,   default=3)
parser.add_argument("--ndet",       type=int,   default=0)
parser.add_argument("--minamp",     type=float, default=100)
parser.add_argument("-c",           action="store_true")
parser.add_argument("--oldformat",  action="store_true")
args = parser.parse_args()

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
myid  = comm.rank
nproc = comm.size

filedb.init()
db = filedb.data
filelist = todinfo.get_tods(args.filelist, filedb.scans)

def compress_beam(sigma, phi):
	c,s=np.cos(phi),np.sin(phi)
	R = np.array([[c,-s],[s,c]])
	C = np.diag(sigma**-2)
	C = R.dot(C).dot(R.T)
	return np.array([C[0,0],C[1,1],C[0,1]])
Ejemplo n.º 50
0
parser.add_argument("--daz", type=float, default=3.0)
parser.add_argument("--nt", type=int, default=10)
parser.add_argument("--dets", type=str, default=0)
parser.add_argument("--ntod", type=int, default=0)
parser.add_argument("-w", "--weighted", type=int, default=1)
parser.add_argument("-D", "--deslope", type=int, default=0)
args = parser.parse_args()

comm = mpi.COMM_WORLD
filedb.init()

ids = [line.split()[0] for line in open(args.idlist, "r")]
if args.ntod: ids = ids[:args.ntod]

is_dmap = os.path.isdir(args.imap)
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=comm.rank)
tshape = (720, 720)

# Read in all our scans
L.info("Reading %d scans" % len(ids))
myinds = np.arange(len(ids))[comm.rank::comm.size]
myinds, myscans = scanutils.read_scans(ids,
                                       myinds,
                                       actscan.ACTScan,
                                       filedb.data,
                                       dets=args.dets,
                                       downsample=config.get("downsample"))
myinds = np.array(myinds, int)

# Collect scan info. This currently fails if any task has empty myinds
Ejemplo n.º 51
0
import numpy as np, os, h5py
from enlib import config, mpi, coordinates, utils, errors, tagdb
from enact import filedb, actdata, todinfo
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("tagfile")
parser.add_argument("sel", nargs="?", default="")
parser.add_argument("ofile")
args = parser.parse_args()

file_db = filedb.setup_filedb()
scan_db = todinfo.read(args.tagfile, vars={"root":config.get("root")})
comm    = mpi.COMM_WORLD

# We want to process *all* tods, not just selected ones. Could also
# achieve this by adding /all to the selector.
scan_db = scan_db.select(scan_db.query(args.sel, apply_default_query=False))
ids     = scan_db.ids

stats = []
for ind in range(comm.rank, len(ids), comm.size):
	id    = ids[ind]
	entry = file_db[id]
	try:
		stats.append(todinfo.build_tod_stats(entry))
	except (errors.DataMissing,AttributeError) as e:
		print "%3d %4d/%d %5.1f%% Skipping %s (%s)" % (comm.rank, ind+1, len(ids), (ind+1)/float(len(ids))*100, id, str(e))
		continue
	print "%3d %4d/%d %5.1f%% %s" % (comm.rank, ind+1, len(ids), (ind+1)/float(len(ids))*100, id)
stats = todinfo.merge_tod_stats(stats)

if comm.rank == 0: print "Reducing"
Ejemplo n.º 52
0
parser.add_argument("-q", "--quiet",   action="count", default=0)
parser.add_argument("-c", "--cont",    action="store_true")
args = parser.parse_args()

comm = mpi.COMM_WORLD
filedb.init()
ids  = filedb.scans[args.sel]
R    = args.dist * utils.degree
csize= 100
verbose = args.verbose - args.quiet > 0

dtype= np.float32
model_fknee = 10
model_alpha = 10
sys = "hor:"+args.planet+"/0_0"
tod_sys = config.get("tod_sys")
utils.mkdir(args.odir)
prefix = args.odir + "/"

show = bench.show if verbose else bench.dummy

def estimate_ivar(tod):
	tod -= np.mean(tod,1)[:,None]
	tod  = tod.astype(dtype)
	diff = tod[:,1:]-tod[:,:-1]
	diff = diff[:,:diff.shape[-1]/csize*csize].reshape(tod.shape[0],-1,csize)
	ivar = 1/(np.median(np.mean(diff**2,-1),-1)/2**0.5)
	return ivar

def estimate_atmosphere(tod, region_cut, srate, fknee, alpha):
	model = gapfill.gapfill_joneig(tod, region_cut, inplace=False)
Ejemplo n.º 53
0
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("filelist")
parser.add_argument("srcs")
parser.add_argument("odir")
parser.add_argument("-R", "--radius", type=float, default=5.0)
parser.add_argument("-r", "--resolution", type=float, default=0.25)
args = parser.parse_args()

comm  = mpi4py.MPI.COMM_WORLD
myid  = comm.rank
nproc = comm.size

log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, rank=myid)

# Allow filelist to take the format filename:[slice]
toks = args.filelist.split(":")
filelist, fslice = toks[0], ":".join(toks[1:])
filelist = [line.split()[0] for line in open(filelist,"r") if line[0] != "#"]
filelist = eval("filelist"+fslice)

utils.mkdir(args.odir)
srcs = np.loadtxt(args.srcs).T

# create minimaps around each source
nsrc  = srcs.shape[1]
ncomp = 1
n = int(np.round(2*args.radius/args.resolution))
Ejemplo n.º 54
0
def tod_end_cut(nsamp, srate, cut_secs=None):
	"""Cut cut_secs seconds of data at each end of the tod"""
	ncut = int(config.get("cut_tod_ends_nsec",cut_secs)*srate)
	return sampcut.from_list([[[0,ncut],[nsamp-ncut,nsamp]]], nsamp)
Ejemplo n.º 55
0
parser.add_argument("-S", "--signal",    action="append", help="Signals to solve for. For example -S sky:area.fits -S scan would solve for the sky map and scan pickup maps jointly, using area.fits as the map template.")
parser.add_argument("-F", "--filter",    action="append")
parser.add_argument("-M", "--mapfilter", action="append")
parser.add_argument("--group-tods", action="store_true")
parser.add_argument("--individual", action="store_true")
parser.add_argument("--group",      type=int, default=0)
parser.add_argument("--tod-debug",  action="store_true")
parser.add_argument("--prepost",    action="store_true")
parser.add_argument("--define-planets", type=str, default=None)
args = parser.parse_args()

if args.dump_config:
	print config.to_str()
	sys.exit(0)

dtype = np.float32 if config.get("map_bits") == 32 else np.float64
comm  = mpi.COMM_WORLD
nmax  = config.get("map_cg_nmax")
ext   = config.get("map_format")
tshape= (720,720)
#tshape= (100,100)
resume= config.get("resume")

if args.define_planets:
	for pdesc in args.define_planets.split(","):
		name, elemfile = pdesc.split(":")
		ephemeris.register_object(name, ephemeris.read_object(elemfile))

def parse_src_handling():
	res = {}
	res["mode"]   = config.get("src_handling")
Ejemplo n.º 56
0
config.default("verbosity", 1, "Verbosity for output. Higher means more verbose. 0 outputs only errors etc. 1 outputs INFO-level and 2 outputs DEBUG-level messages.")
config.default("map_format", "fits", "File format to use when writing maps. Can be 'fits', 'fits.gz' or 'hdf'.")
config.default("tod_window", 5.0, "Number of samples to window the tod by on each end")

parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("sel")
parser.add_argument("area")
parser.add_argument("odir")
parser.add_argument("prefix",nargs="?")
parser.add_argument("--ndet",       type=int, default=0,  help="Max number of detectors")
args = parser.parse_args()
filedb.init()

utils.mkdir(args.odir)
root      = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
log_level = log.verbosity2level(config.get("verbosity"))
dtype     = np.float32 if config.get("map_bits") == 32 else np.float64
area      = enmap.read_map(args.area)
comm      = mpi.COMM_WORLD
ids       = filedb.scans[args.sel]
L = log.init(level=log_level, rank=comm.rank)

# Set up our output map.
osig = enmap.zeros((1,)+area.shape[-2:], area.wcs, dtype)
odiv = osig*0
sig_all = np.zeros(len(ids))
sig_med = sig_all*0
div_all, div_med = sig_all*0, sig_med*0

# Read in all our scans
for ind in range(comm.rank, len(ids), comm.size):
Ejemplo n.º 57
0
    def __init__(self, scan, srcs, sys=None, tmul=None, pmul=None):
        # We support a srcs which is either [nsrc,nparam], [nsrc,ndir,nparam] or [nsrc,ndir,ndet,nparam], where
        # ndir is either 1 or 2 depending on whether one wants to separate different
        # scanning directions.
        srcs = np.array(srcs)
        while srcs.ndim < 4:
            srcs = srcs[:, None]
        # srcs is [nsrc,ndir,ndet_or_1,{dec,ra,T,Q,U,ibeams}]
        sys = config.get("map_sys", sys)
        cres = config.get("pmat_ptsrc_cell_res") * utils.arcmin
        nsrc, ndir, src_ndet = srcs.shape[:3]
        self.scan = scan
        maxcell = 50  # max numer of sources per cell

        # Compute parallax displacement if necessary
        sundist = config.get("pmat_parallax_au")
        self.dpos = 0
        if sundist:
            # Transformation to a sun-centered system
            self.dpos = parallax.earth2sun(srcs.T[:2],
                                           self.scan.mjd0,
                                           sundist,
                                           diff=True).T
        srcs[..., :2] += self.dpos

        # Investigate the beam to find the max relevant radius
        sigma_lim = config.get("pmat_ptsrc_rsigma")
        value_lim = np.exp(-0.5 * sigma_lim**2)
        rmax = np.where(scan.beam[1] >= value_lim)[0][-1] * scan.beam[0, 1]
        rmul = max([
            utils.expand_beam(src[-3:])[0][0]
            for src in srcs.reshape(-1, srcs.shape[-1])
        ])
        rmax *= rmul

        # Build interpolator (dec,ra output ordering)
        transform = build_pos_transform(scan, sys=config.get("map_sys", sys))
        ipol, obox, err = build_interpol(transform,
                                         scan.box,
                                         scan.entry.id,
                                         posunit=0.5 * utils.arcsec)
        self.rbox, self.nbox, self.yvals = extract_interpol_params(
            ipol, srcs.dtype)

        self.cbox = obox[:, :2]
        self.ref = np.mean(self.cbox, 0)
        self.ncell, self.cells = pointsrcs.build_src_cells(self.cbox,
                                                           srcs[..., :2],
                                                           cres,
                                                           unwind=True)

        ## Build source hit grid
        #cbox    = obox[:,:2]
        #cshape  = tuple(np.ceil(((cbox[1]-cbox[0])/cres)).astype(int))
        #self.ref = np.mean(cbox,0)
        #srcs[...,:2] = utils.rewind(srcs[...,:2], self.ref)

        ## A cell is hit if it overlaps both horizontally and vertically
        ## with the point source +- rmax
        ## ncell is [ndir,nsrc_det,ncy,ncx]
        #ncell = np.zeros((ndir,src_ndet)+cshape,dtype=np.int32)
        ## cells is [ndir,nsrc_det,ncy,ncx,maxcell]
        #cells = np.zeros((ndir,src_ndet)+cshape+(maxcell,),dtype=np.int32)
        #c0 = cbox[0]; inv_dc = cshape/(cbox[1]-cbox[0])
        #for si in range(nsrc):
        #	for sdir in range(ndir):
        #		for sdi in range(src_ndet):
        #			src = srcs[si,sdir,sdi]
        #			i1 = (src[:2]-rmax-c0)*inv_dc
        #			i2 = (src[:2]+rmax-c0)*inv_dc+1 # +1 because this is a half-open interval
        #			# Truncate to edges - any source outside of our region
        #			# will be put on one of the edge cells
        #			i1 = np.maximum(i1.astype(int), 0)
        #			i2 = np.minimum(i2.astype(int), np.array(cshape)-1)
        #			#print si, sdir, i1, i2, cshape
        #			if np.any(i1 >= cshape) or np.any(i2 < 0): continue
        #			sel= (sdir,sdi,slice(i1[0],i2[0]),slice(i1[1],i2[1]))
        #			cells[sel][:,:,ncell[sel]] = si
        #			ncell[sel] += 1

        #self.cells, self.ncell = cells, ncell
        #print self.cells.shape, self.ncell.shape

        self.rmax = rmax
        self.tmul = 1 if tmul is None else tmul
        self.pmul = 1 if pmul is None else pmul
        self.err = err
Ejemplo n.º 58
0
	def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
		self.fields = ["gain","mce_filter","tags","polangle","tconst","hwp","cut","point_offsets","boresight","site","tod_shape","array_info","beam","pointsrcs", "buddies"]
		if dark: self.fields += ["dark"]
		if config.get("noise_model") == "file":
			self.fields += ["noise"]
		else:
			if config.get("cut_noise_whiteness"):
				self.fields += ["noise_cut"]
			if config.get("cut_spikes"):
				self.fields += ["spikes"]
		if d is None:
			d = actdata.read(entry, self.fields, verbose=verbose)
			d = actdata.calibrate(d, verbose=verbose)
			if subdets is not None:
				d.restrict(dets=d.dets[subdets])
		if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan")
		ndet = d.ndet
		# Necessary components for Scan interface
		self.mjd0      = utils.ctime2mjd(d.boresight[0,0])
		self.boresight = np.ascontiguousarray(d.boresight.T.copy()) # [nsamp,{t,az,el}]
		self.boresight[:,0] -= self.boresight[0,0]
		self.offsets   = np.zeros([ndet,self.boresight.shape[1]])
		self.offsets[:,1:] = d.point_offset
		self.cut       = d.cut.copy()
		self.cut_noiseest = d.cut_noiseest.copy()
		self.comps     = np.zeros([ndet,4])
		self.beam      = d.beam
		self.pointsrcs = d.pointsrcs
		self.comps     = d.det_comps
		self.hwp = d.hwp
		self.hwp_phase = d.hwp_phase
		self.dets  = d.dets
		self.dgrid = (d.array_info.nrow, d.array_info.ncol)
		self.array_info = d.array_info
		self.sys = config.get("tod_sys")
		self.site = d.site
		self.speed = d.speed
		if "noise" in d:
			self.noise = d.noise
		else:
			spikes = d.spikes[:2].T if "spikes" in d else None
			self.noise = nmat_measure.NmatBuildDelayed(model = config.get("noise_model"), spikes=spikes,
					cut=self.cut_noiseest)
		if "dark_tod" in d:
			self.dark_tod = d.dark_tod
		if "dark_cut" in d:
			self.dark_cut = d.dark_cut
		if "buddy_comps" in d:
			# Expand buddy_offs to {dt,daz,ddec}
			self.buddy_comps = d.buddy_comps
			self.buddy_offs  = np.concatenate([d.buddy_offs[...,:1]*0,d.buddy_offs],-1)
		self.autocut = d.autocut if "autocut" in d else []
		# Implementation details. d is our DataSet, which we keep around in
		# because we need it to read tod consistently later. It will *not*
		# take part in any sample slicing operations, as that might make the
		# delayed tod read inconsistent with the rest. It could take part in
		# detector slicing as long as calibrate_tod operates on each detector
		# independently. This is true now, but would not be so if we did stuff
		# like common mode subtraction there. On the other hand, not doing this
		# would prevent slicing before reading from giving any speedup or memory
		# savings. I don't think allowing this should be a serious problem.
		self.d = d
		self.entry = entry
		def fmt_id(entry):
			if isinstance(entry, list): return "+".join([fmt_id(e) for e in entry])
			else:
				if entry.tag: return entry.id + ":" + entry.tag
				else: return entry.id
		self.id = fmt_id(entry)
		self.sampslices = []
		self.mapping = None

		# FIXME: debug test
		if config.get("dummy_cut") > 0:
			nmax  = int(config.get("dummy_cut_len"))
			# Power law between 1 and nmax, with slope -1.
			# C(w) = log(w)/log(nmax)
			# P(w) = w**-1/log(nmax)
			# w(C) = n**C
			# Mean: (nmax-1)/log(nmax)
			nmean = (nmax-1)/np.log(nmax)
			ncut = int(self.nsamp * config.get("dummy_cut") / nmean)
			cut_ranges = np.zeros([self.ndet, ncut, 2],int)
			w = (nmax**np.random.uniform(0, 1, size=[self.ndet, ncut])).astype(int)
			np.clip(w, 1, nmax)
			cut_ranges[:,:,0] = np.random.uniform(0, self.nsamp, size=[self.ndet, ncut]).astype(int)
			cut_ranges[:,:,0] = np.sort(cut_ranges[:,:,0],1)
			cut_ranges[:,:,1] = cut_ranges[:,:,0] + w
			np.clip(cut_ranges[:,:,1], 0, self.nsamp)
			cut_dummy = sampcut.from_list(cut_ranges, self.nsamp)
			print np.mean(w), nmean, nmax, ncut
			print "cut fraction before", float(self.cut.sum())/self.cut.size
			self.cut *= cut_dummy
			print "cut fraction after", float(self.cut.sum())/self.cut.size
Ejemplo n.º 59
0
parser.add_argument("--p0", type=int, default=0)
parser.add_argument("-g,", "--group", type=int, default=1)
parser.add_argument("--dedark",     action="store_true")
parser.add_argument("--demode",     action="store_true")
parser.add_argument("--decommon",   action="store_true")
parser.add_argument("-c", "--cont", action="store_true")
args = parser.parse_args()
filedb.init()

comm_world = mpi.COMM_WORLD
comm_group = comm_world.Split(comm_world.rank%args.nsub, comm_world.rank/args.nsub)
comm_sub   = comm_world.Split(comm_world.rank/args.nsub, comm_world.rank%args.nsub)
ids  = todinfo.get_tods(args.sel, filedb.scans)
tol  = args.tol*utils.arcmin
daz  = args.daz*utils.arcmin
dtype = np.float32 if config.get("map_bits") == 32 else np.float64
tods_per_map = args.group

utils.mkdir(args.odir)
root = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
# Set up logging
utils.mkdir(root + "log")
logfile   = root + "log/log%03d.txt" % comm_world.rank
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, file=logfile, rank=comm_world.rank, shared=False)

# Run through all tods to determine the scanning patterns
L.info("Detecting scanning patterns")
boxes = np.zeros([len(ids),2,2])
for ind in range(comm_world.rank, len(ids), comm_world.size):
	id    = ids[ind]
Ejemplo n.º 60
0
def autocut(d, turnaround=None, ground=None, sun=None, moon=None, max_frac=None, pickup=None):
	"""Apply automatic cuts to calibrated data."""
	if not config.get("autocut"): return d
	ndet, nsamp = d.ndet, d.nsamp
	if not ndet or not nsamp: return d
	# Insert a cut into d if necessary
	if "cut" not in d:
		d += dataset.DataField("cut", sampcut.empty(ndet,nsamp))
	# insert an autocut datafield, to keep track of how much data each
	# automatic cut cost us
	d += dataset.DataField("autocut", [])
	def addcut(label, dcut, targets="c"):
		# det ndet part here allows for broadcasting of cuts from 1-det to full-det
		dn = dcut.sum()*d.ndet/dcut.ndet if dcut is not None else 0
		if dn == 0: d.autocut.append([label,0,0])
		else:
			n0, dn = d.cut.sum(), dcut.sum()
			dn = dn*d.cut.ndet/dcut.ndet
			if "c" in targets: d.cut *= dcut
			if "n" in targets: d.cut_noiseest *= dcut
			if "b" in targets: d.cut_basic *= dcut
			d.autocut.append([ label, dn, d.cut.sum() - n0 ]) # name, mycut, myeffect
	if config.get("cut_tconst") and "tau" in d:
		addcut("tconst", cuts.tconst_cut(nsamp, d.tau, config.get("cut_tconst")))
	if config.get("cut_stationary") and "boresight" in d:
		addcut("stationary", cuts.stationary_cut(d.boresight[1]))
	if config.get("cut_tod_ends") and "srate" in d:
		addcut("tod_ends", cuts.tod_end_cut(nsamp, d.srate))
	if config.get("cut_turnaround", turnaround) and "boresight" in d:
		addcut("turnaround",cuts.turnaround_cut(d.boresight[1]))
	if config.get("cut_ground", ground) and "boresight" in d and "point_offset" in d:
		addcut("ground", cuts.ground_cut(d.boresight, d.point_offset))
	if config.get("cut_sun", sun) and "boresight" in d and "point_offset" in d and "site" in d:
		addcut("avoidance",cuts.avoidance_cut(d.boresight, d.point_offset, d.site, "Sun", config.get("cut_sun_dist")*np.pi/180))
	if config.get("cut_moon", moon) and "boresight" in d and "point_offset" in d and "site" in d:
		addcut("moon",cuts.avoidance_cut(d.boresight, d.point_offset, d.site, "Moon", config.get("cut_moon_dist")*np.pi/180))
	if config.get("cut_pickup", pickup) and "boresight" in d and "pickup_cut" in d:
		addcut("pickup",cuts.pickup_cut(d.boresight[1], d.dets, d.pickup_cut))
	if config.get("cut_mostly_cut"):
		addcut("mostly_cut", cuts.cut_mostly_cut_detectors(d.cut_quality))
	if config.get("cut_obj"):
		objs = utils.split_outside(config.get("cut_obj"),",")
		for obj in objs:
			toks = obj.split(":")
			objname = toks[0]
			if objname.startswith("["):
				objname = [float(w)*utils.degree for w in objname[1:-1].split(",")]
			dist    = 0.2*utils.degree
			if len(toks) > 1: dist = float(toks[1])*utils.degree
			# Hack: only cut for noise estimation purposes if dist is negative
			targets = "cnb" if dist > 0 else "n"
			addcut(obj, cuts.avoidance_cut(d.boresight, d.point_offset, d.site, objname, dist), targets=targets)
	if config.get("cut_srcs"):
		cpar  = [tok.split(":") for tok in config.get("cut_srcs").split(",")]
		names, lims = [], []
		for par in cpar:
			if par[0] in ["map","nmat"]:
				names.append(par[0])
				lims.append(float(par[1]))
		if any(lims):
			params = pointsrcs.src2param(d.pointsrcs)
			params[:,5:7] = 1
			params[:,7]   = 0
			# Only bother with sources that are actually strong enough
			maxlim = max(lims+[0])
			params = params[params[:,2]>maxlim]
			cutlist = cuts.point_source_cut(d, params, lims)
			for name, c in zip(names, cutlist):
				if   name == "map":  addcut("point_srcs_m", c, "c")
				elif name == "nmat": addcut("point_srcs_n", c, "n")
	if config.get("cut_extra_srcs"):
		srclist = np.loadtxt(config.get("cut_extra_srcs"), usecols=(0,1), ndmin=2)
		srclim  = float(config.get("cut_extra_lim"))
		params  = np.zeros([len(srclist),8])
		params[:,:2]  = srclist[:,1::-1]*utils.degree
		params[:,2]   = 1
		params[:,5:7] = 1
		c = cuts.point_source_cut(d, params, srclim)
		addcut("point_srcs", c, "nbc")

	# What fraction is cut?
	cut_fraction = float(d.cut.sum())/d.cut.size
	# Get rid of completely cut detectors
	keep = np.where(d.cut.sum(axis=1) < nsamp)[0]
	d.restrict(d.dets[keep])
	ndet, nsamp = d.ndet, d.nsamp

	def cut_all_if(label, condition):
		if condition: dcut = sampcut.full(d.ndet, nsamp)
		else: dcut = None
		addcut(label, dcut)
	cut_all_if("max_frac",   config.get("cut_max_frac", max_frac) < cut_fraction)
	if "srate" in d:
		cut_all_if("tod_mindur", config.get("cut_tod_mindur") > nsamp/d.srate/60)
	cut_all_if("tod_mindet", config.get("cut_tod_mindet") > ndet)
	# Get rid of completely cut detectors again
	keep = np.where(d.cut.sum(axis=1) < nsamp)[0]
	d.restrict(dets=d.dets[keep])

	return d