Esempio n. 1
0
def scan_iterator(filelist, inds, reader, db=None, dets=None, quiet=False, downsample=1, hwp_resample=False):
	"""Given a set of ids/files and a set of indices into that list. Try
	to read each of these scans. Returns a list of successfully read scans
	and a list of their indices."""
	for ind in inds:
		try:
			if isinstance(filelist[ind],list): raise IOError
			d = enscan.read_scan(filelist[ind])
			actdata.read(filedb.data[filelist[ind]])
		except IOError:
			try:
				if isinstance(filelist[ind],list):
					entry = [db[id] for id in filelist[ind]]
				else:
					entry = db[filelist[ind]]
				d = reader(entry)
				if d.ndet == 0 or d.nsamp == 0:
					raise errors.DataMissing("Tod contains no valid data")
			except errors.DataMissing as e:
				if not quiet: L.debug("Skipped %s (%s)" % (str(filelist[ind]), e.message))
				continue
		if dets:
			if dets.startswith("@"):
				uids = [int(line.split()[0]) for line in open(dets[1:],"r")]
				_,det_inds = utils.common_inds([uids,d.dets])
				d = d[det_inds]
			else:
				d = eval("d[%s]" % dets)
		hwp_active = np.any(d.hwp_phase[0] != 0)
		if hwp_resample and hwp_active:
			mapping = enscan.build_hwp_sample_mapping(d.hwp)
			d = d.resample(mapping)
		d = d[:,::downsample]
		if not quiet: L.debug("Read %s" % str(filelist[ind]))
		yield ind, d
Esempio n. 2
0
    def get_samples(self, verbose=False):
        """Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
        # Because we've read the tod_shape field earlier, we know that reading tod
        # won't cause any additional truncation of the samples or detectors.
        # tags is only needed here for read_combo support, but that is mostly broken
        # anyway.
        t1 = time.time()
        self.d += actdata.read(self.entry,
                               fields=["tod", "tags"],
                               dets=self.d.dets)
        #if debug_inject is not None: self.d.tod += debug_inject
        t2 = time.time()
        if verbose: print("read  %-14s in %6.3f s" % ("tod", t2 - t1))
        if config.get("tod_skip_deconv"): ops = ["tod_real"]
        else: ops = ["tod"]
        actdata.calibrate(self.d, operations=ops, verbose=verbose)
        tod = self.d.tod
        # Remove tod from our local d, so we won't end up hauling it around forever
        del self.d.tod
        # HWP resample if needed
        if self.mapping is not None:
            tod = np.ascontiguousarray(
                utils.interpol(tod,
                               self.mapping.oimap[None],
                               order=1,
                               mask_nan=False))
        method = config.get("downsample_method")
        for s in self.sampslices:
            tod = scan.slice_tod_samps(tod, s, method=method)
        tod = np.ascontiguousarray(tod)
        return tod
Esempio n. 3
0
def get_rangedata(id):
    entry = filedb.data[id]
    # Read the tod as usual
    with show("read"):
        d = actdata.read(entry)
    with show("calibrate"):
        # Don't apply time constant (and hence butterworth) deconvolution since we
        # will fit these ourselves
        d = actdata.calibrate(d, exclude=["autocut", "tod_fourier"])
    if d.ndet == 0 or d.nsamp < 2: raise errors.DataMissing("no data in tod")
    tod = d.tod
    del d.tod
    # Very simple white noise model
    with show("noise"):
        ivar = estimate_ivar(tod)
        asens = np.sum(ivar)**-0.5 / d.srate**0.5
    with show("planet mask"):
        # Generate planet cut
        planet_cut = cuts.avoidance_cut(d.boresight, d.point_offset, d.site,
                                        args.planet, R)
    with show("atmosphere"):
        # Subtract atmospheric model
        tod -= estimate_atmosphere(tod, planet_cut, d.srate, model_fknee,
                                   model_alpha)
        tod = tod.astype(dtype, copy=False)
    with show("extract"):
        # Should now be reasonably clean of correlated noise. Extract our range data
        rdata = build_rangedata(tod, planet_cut, d, ivar)
    return rdata
Esempio n. 4
0
	def get_samples(self, verbose=False):
		"""Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
		# Because we've read the tod_shape field earlier, we know that reading tod
		# won't cause any additional truncation of the samples or detectors.
		# tags is only needed here for read_combo support, but that is mostly broken
		# anyway.
		t1 = time.time()
		self.d += actdata.read(self.entry, fields=["tod", "tags"], dets=self.d.dets)
		t2 = time.time()
		if verbose: print "read  %-14s in %6.3f s" % ("tod", t2-t1)
		if config.get("tod_skip_deconv"): ops = ["tod_real"]
		else: ops = ["tod"]
		actdata.calibrate(self.d, operations=ops, verbose=verbose)
		tod = self.d.tod
		# Remove tod from our local d, so we won't end up hauling it around forever
		del self.d.tod
		# HWP resample if needed
		if self.mapping is not None:
			tod = np.ascontiguousarray(utils.interpol(tod, self.mapping.oimap[None], order=1, mask_nan=False))
		method = config.get("downsample_method")
		for s in self.sampslices:
			srange = slice(s.start, s.stop, np.sign(s.step) if s.step else None)
			tod = tod[:,srange]
			tod = resample.resample(tod, 1.0/np.abs(s.step or 1), method=method)
		tod = np.ascontiguousarray(tod)
		return tod
Esempio n. 5
0
def get_rangedata(id):
	entry = filedb.data[id]
	# Read the tod as usual
	with show("read"):
		d = actdata.read(entry)
	with show("calibrate"):
		# Don't apply time constant (and hence butterworth) deconvolution since we
		# will fit these ourselves
		d = actdata.calibrate(d, exclude=["autocut","tod_fourier"])
	if d.ndet == 0 or d.nsamp < 2: raise errors.DataMissing("no data in tod")
	tod = d.tod; del d.tod
	# Very simple white noise model
	with show("noise"):
		ivar  = estimate_ivar(tod)
		asens = np.sum(ivar)**-0.5 / d.srate**0.5
	with show("planet mask"):
		# Generate planet cut
		planet_cut = cuts.avoidance_cut(d.boresight, d.point_offset, d.site, args.planet, R)
	with show("atmosphere"):
		# Subtract atmospheric model
		tod -= estimate_atmosphere(tod, planet_cut, d.srate, model_fknee, model_alpha)
		tod  = tod.astype(dtype, copy=False)
	with show("extract"):
		# Should now be reasonably clean of correlated noise. Extract our range data
		rdata = build_rangedata(tod, planet_cut, d, ivar)
	return rdata
Esempio n. 6
0
def read_metadata(entry):
    '''
    Parameters
    ----------
    entry : filedb.data object

    Returns
    -------
    data : enlib.dataset.DataSet instance
    '''

    data = actdata.read(entry, exclude=['tod'])
    data = actdata.calibrate(data, exclude=['autocut'])
    if data.ndet == 0 or data.nsamp == 0:
        raise errors.DataMissing("No data in tod")
    return data
Esempio n. 7
0
    def get_samples(self, verbose=False):
        """Return the actual detector samples. Slow! Data is read from disk and
		calibrated on the fly, so store the result if you need to reuse it."""
        # Because we've read the tod_shape field earlier, we know that reading tod
        # won't cause any additional truncation of the samples or detectors.
        # tags is only needed here for read_combo support, but that is mostly broken
        # anyway.
        t1 = time.time()
        self.d += actdata.read(self.entry,
                               fields=["tod", "tags"],
                               dets=self.d.dets)
        t2 = time.time()
        if verbose: print("read  %-14s in %6.3f s" % ("tod", t2 - t1))
        if config.get("tod_skip_deconv"): ops = ["tod_real"]
        else: ops = ["tod"]
        actdata.calibrate(self.d, operations=ops, verbose=verbose)
        tod = self.d.tod
        # Remove tod from our local d, so we won't end up hauling it around forever
        del self.d.tod
        # HWP resample if needed
        if self.mapping is not None:
            tod = np.ascontiguousarray(
                utils.interpol(tod,
                               self.mapping.oimap[None],
                               order=1,
                               mask_nan=False))
        method = config.get("downsample_method")
        for s in self.sampslices:
            srange = slice(s.start, s.stop,
                           np.sign(s.step) if s.step else None)
            tod = tod[:, srange]
            # make sure we get exactly the same length the cuts will be expecting
            step = np.abs(s.step or 1)
            olen = (tod.shape[1] + step - 1) // step
            tod = resample.resample(tod,
                                    float(olen) / tod.shape[1],
                                    method=method)
        tod = np.ascontiguousarray(tod)
        return tod
Esempio n. 8
0
def to_pairdiff(d, pairs):
	dtod = d.tod[pairs[:,0]]-d.tod[pairs[:,1]]
	dcuts = {key: d[key][pairs[:,0]]+d[key][pairs[:,1]] for key in ["cut","cut_basic","cut_noiseest"]}
	d = d.restrict(dets=d.dets[pairs[:,0]])
	d.tod = dtod
	for key in dcuts:
		d[key] = dcuts[key]
	return d

# Loop through and analyse each tod-group
for ind in range(comm.rank, len(groups), comm.size):
	ids     = groups[ind]
	entries = [filedb.data[id] for id in ids]
	try:
		d = actdata.read(entries, verbose=args.verbose)
		d = actdata.calibrate(d,  verbose=args.verbose, exclude=["autocut"])
		if args.pairdiff:
			pairs = find_pairs_blind(d.point_template)
			d = to_pairdiff(d, pairs)
		if d.ndet < 2 or d.nsamp < 2: raise errors.DataMissing("No data in tod")
	except (errors.DataMissing, AssertionError, IndexError) as e:
		print "Skipping %s (%s)" % (str(ids), e.message)
		continue
	print "Processing %s: %4d %6d" % (str(ids), d.ndet, d.nsamp)
	tod  = d.tod
	del d.tod
	tod -= np.mean(tod,1)[:,None]
	tod  = tod.astype(dtype)
	ft   = fft.rfft(tod)
	nfreq= ft.shape[-1]
Esempio n. 9
0
from enact import actdata, filedb
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("pickup_map")
parser.add_argument("template")
parser.add_argument("sel_repr")
parser.add_argument("el", type=float)
parser.add_argument("ofile")
args = parser.parse_args()

filedb.init()
nrow, ncol = 33, 32
# Read our template, which represents the output horizontal coordinates
template = enmap.read_map(args.template)
# Use our representative selector to get focalplane offsets and polangles
entry = filedb.data[filedb.scans[args.sel_repr][0]]
d = actdata.read(entry, ["boresight", "point_offsets", "polangle"])
d.boresight[2] = args.el  # In degrees, calibrated in next step
d = actdata.calibrate(d, exclude=["autocut"])


def reorder(map, nrow, ncol, dets):
    return enmap.samewcs(map[utils.transpose_inds(dets, nrow, ncol)], map)


# Read our map, and give each row a weight
pickup = enmap.read_map(args.pickup_map)
pickup = reorder(pickup, nrow, ncol, d.dets)
weight = np.median((pickup[:, 1:] - pickup[:, :-1])**2, -1)
weight[weight > 0] = 1 / weight[weight > 0]

# Find the output pixel for each input pixel
Esempio n. 10
0
 srates = np.zeros([nctod], dtype=dtype)
 mce_fsamps = np.zeros([nctod], dtype=dtype)
 mce_params = np.zeros([nctod, 4], dtype=dtype)
 for ind in range(ind1 + comm.rank, ind2, comm.size):
     i = ind - ind1
     id = ids[ind]
     entry = filedb.data[id]
     try:
         # Do not apply time constants. We want raw spectra so that we can
         # use them to estimate time constants ourselves.
         fields = [
             "array_info", "tags", "site", "mce_filter", "gain", "cut",
             "tod", "boresight"
         ]
         if args.tconst: fields.append("tconst")
         d = actdata.read(entry, fields=fields)
         d = actdata.calibrate(
             d, exclude=(["autocut"] if not args.no_autocut else []))
         if d.ndet == 0 or d.nsamp == 0:
             raise errors.DataMissing("empty tod")
     except (IOError, OSError, errors.DataMissing) as e:
         print "Skipped (%s)" % (str(e))
         continue
     print "Processing %s" % id
     srates[i] = d.srate
     mce_fsamps[i] = d.mce_fsamp
     mce_params[i] = d.mce_params[:4]
     # Compute the power spectrum
     d.tod = d.tod.astype(dtype)
     nsamp = d.nsamp
     srate = d.srate
Esempio n. 11
0
	if len(sids) == 0:
		print("%s has 0 srcs: skipping" % id)
		continue
	try:
		nsrc = len(sids)
		print("%s has %d srcs: %s" % (id,nsrc,", ".join(["%d (%.1f)" % (i,a) for i,a in zip(sids,amps[sids])])))
	except TypeError as e:
		print("Weird: %s" % e)
		print(sids)
		print(amps)
		continue

	# Read the data
	entry = filedb.data[id]
	try:
		data = actdata.read(entry, exclude=["tod"], verbose=verbose)
		data+= actdata.read_tod(entry)
		data = actdata.calibrate(data, verbose=verbose)
		#print("fixme") # FIXME
		#data.restrict(dets=data.dets[100:150])
		# Avoid planets while building noise model
		if planet is not None:
			data.cut_noiseest *= actdata.cuts.avoidance_cut(data.boresight, data.point_offset, data.site, planet, R)
		if data.ndet < 2 or data.nsamp < 1: raise errors.DataMissing("no data in tod")
	except errors.DataMissing as e:
		print("%s skipped: %s" % (id, e))
		continue
	# Prepeare our samples
	#data.tod -= np.mean(data.tod,1)[:,None]
	data.tod -= data.tod[:,None,0].copy()
	data.tod  = data.tod.astype(dtype)
Esempio n. 12
0
    filedb.init()

    min_samps = 20e3
    log_level = log.verbosity2level(config.get("verbosity"))
    L = log.init(level=log_level, rank=comm.rank)
    tagger = WorkspaceTagger()

    ids = filedb.scans[args.sel]
    for ind in range(comm.rank, len(ids), comm.size):
        id = ids[ind]
        entry = filedb.data[id]
        try:
            # We need the tod and all its dependences to estimate which noise
            # category the tod falls into. But we don't need all the dets.
            # Speed things up by only reading 25% of them.
            d = actdata.read(entry, ["boresight", "point_offsets", "site"])
            d = actdata.calibrate(d, exclude=["autocut"])
            if d.ndet == 0 or d.nsamp == 0:
                raise errors.DataMissing("Tod contains no valid data")
            if d.nsamp < min_samps:
                raise errors.DataMissing("Tod is too short")
        except errors.DataMissing as e:
            L.debug("Skipped %s (%s)" % (id, e.message))
            continue
        L.debug(id)

        # Get the scan el and az bounds
        az1 = np.min(d.boresight[1])
        az2 = np.max(d.boresight[1])
        el = np.mean(d.boresight[2])
Esempio n. 13
0
utils.mkdir(args.odir)
root = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
# Set up logging
utils.mkdir(root + "log")
logfile = root + "log/log%03d.txt" % comm_world.rank
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, file=logfile, rank=comm_world.rank, shared=False)

# Run through all tods to determine the scanning patterns
L.info("Detecting scanning patterns")
boxes = np.zeros([len(ids), 2, 2])
for ind in range(comm_world.rank, len(ids), comm_world.size):
    id = ids[ind]
    entry = filedb.data[id]
    try:
        d = actdata.read(entry, ["boresight", "tconst", "cut", "cut_noiseest"])
        d = actdata.calibrate(d, exclude=["autocut"])
        if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("no data")
    except errors.DataMissing as e:
        L.debug("Skipped %s (%s)" % (ids[ind], str(e)))
        continue
    # Reorder from az,el to el,az
    boxes[ind] = [
        np.min(d.boresight[2:0:-1], 1),
        np.max(d.boresight[2:0:-1], 1)
    ]
    L.info("%5d: %s" % (ind, id))
boxes = utils.allreduce(boxes, comm_world)

# Prune null boxes
usable = np.all(boxes != 0, (1, 2))
Esempio n. 14
0
fbins = np.concatenate(fbins, 0)
nbin = len(fbins)
del freqs, nbins

corr = None
pos = None

for ind in range(comm.rank, nlabel, comm.size):
    id = ids[labels == ind]
    order = np.argsort([i[-1] for i in id])
    id = id[order]
    if len(id) != nsub: continue
    print "Processing " + "+".join(id)
    entries = [filedb.data[i] for i in id]
    try:
        d = actdata.read(entries)
        d = actdata.calibrate(d, exclude=["autocut"])
    except errors.DataMissing as e:
        print "Skipping %s: %s" % (id, str(e))
        continue
    tod = d.tod.astype(dtype)
    del d.tod
    ft = fft.rfft(tod)
    del tod

    if corr is None:
        ndet = d.array_info.ndet
        corr = np.zeros((nbin, ndet, ndet), dtype=dtype)
        hits = np.zeros((nbin, ndet, ndet), dtype=int)
        var = np.zeros((nbin, ndet))
        pos = np.zeros((ndet, 2))
Esempio n. 15
0
    help=
    "det: output will be in det-seconds per square arcmin. arr: arr-seconds per square arcmin"
)
args = parser.parse_args()

filedb.init()
ids = filedb.scans[args.sel]
db = filedb.scans.select(ids)

comm = mpi.COMM_WORLD
shape, wcs = enmap.read_map_geometry(args.template)

# We assume that site and pointing offsets are the same for all tods,
# so get them based on the first one
entry = filedb.data[ids[0]]
site = actdata.read(entry, ["site"]).site

omap = fastweight.fastweight(shape,
                             wcs,
                             db,
                             weight=args.weight,
                             array_rad=args.rad * utils.degree,
                             comm=comm,
                             dtype=np.float64,
                             daz=args.daz * utils.degree,
                             chunk_size=args.chunk_size,
                             site=site,
                             verbose=True)

if comm.rank == 0:
    enmap.write_map(args.omap, omap)
Esempio n. 16
0
		res[i] = len(np.unique(tod[i]))
	return res

def write_cuts(ofile, cuts, array_info, dets):
	with open(ofile, "w") as f:
		f.write("""format = 'TODCuts'
format_version = 2
n_det = %d
n_samp = %d
samp_offset = 0\n""" % (array_info.ndet, cuts.shape[1]))
		for ind, det in enumerate(dets):
			rstr = " ".join(["(%d,%d)" % tuple(rn) for rn in cuts[ind].ranges])
			f.write("%4d: %s\n" % (det, rstr))

for ind in range(comm.rank, len(ids), comm.size):
	id = ids[ind]
	print id
	entry = filedb.data[id]
	# Read uncalibrated data
	d = actdata.read(entry, fields=["tod","array_info"], verbose=True)
	dmask = np.zeros(d.ndet, int)
	quant = measure_quant(d.tod)
	dmask |= quant < 1000
	cuts  = det_mask_to_cuts(dmask, d.nsamp)
	#cuts_null = find_null(d.tod)
	#cuts_jump = find_jumps(d.tod)
	#cuts = cuts_null # cuts_jump + cuts_null
	# Write cut file
	dets = np.arange(cuts.shape[0])
	write_cuts(args.odir + "/%s.cuts" % id, cuts, d.array_info, dets)
Esempio n. 17
0
bin_freqs = parse_bin_freqs(args.bins)
bin_freqs_status = parse_bin_freqs(args.bins_status)
nbin  = len(bin_freqs)
stats = np.zeros([3,ntod,nbin])
examples = np.zeros([ntod,nex,nbin])
nchunk= (ntod+chunk_size-1)//chunk_size

for chunk in range(nchunk):
	ind1 = chunk*chunk_size
	ind2 = min(ind1+chunk_size, ntod)
	for ind in range(ind1+comm.rank, ind2, comm.size):
		id    = ids[ind]
		entry = filedb.data[id]
		try:
			d     = actdata.read(entry, fields=["gain","tconst","cut","tod","boresight","apex"])
			d     = actdata.calibrate(d, exclude=["autocut"])
			if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("empty tod")
		except (IOError, errors.DataMissing) as e:
			print("Skipped (%s)" % (e))
			continue
		# Compute the power spectrum
		nsamp = d.nsamp
		srate = d.srate
		ft    = fft.rfft(d.tod)
		del d.tod
		ps    = np.abs(ft)**2/(nsamp*srate)
		del ft
		# Choose a random set of detectors to be examples.
		iex = np.random.permutation(len(ps))[:nex]
		examples[ind] = ps[iex]
Esempio n. 18
0
	ind_ownership   = match_existing(aset, ctime)
	block_ownership = get_block_ownership(ind_ownership, block_inds)
else:
	block_ownership = np.full(len(block_inds),-1,int)
fixed_blocks = np.where(block_ownership>=0)[0]
free_blocks  = np.where(block_ownership<0)[0]
nfixed = len(fixed_blocks)
nfree  = len(free_blocks)

sys.stderr.write("splitting %d:[%s] tods into %d splits via %d blocks%s" % (
	ntod, atolist(nper), nsplit, nblock, (" with %d:%d free:fixed" % (nfree,nfixed)) if nfixed > 0 else "") + "\n")

# We assume that site and pointing offsets are the same for all tods,
# so get them based on the first one
entry = filedb.data[ids[0]]
site  = actdata.read(entry, ["site"]).site

# Determine the bounding box of our selected data
bounds    = db.data["bounds"].reshape(2,-1).copy()
bounds[0] = utils.rewind(bounds[0], bounds[0,0], 360)
box = utils.widen_box(utils.bounding_box(bounds.T), 4*args.rad, relative=False)
waz, wel = box[1]-box[0]
# Use fullsky horizontally if we wrap too far
if waz <= 180:
	shape, wcs = enmap.geometry(pos=box[:,::-1]*utils.degree, res=args.res*utils.degree, proj="car", ref=(0,0))
else:
	shape, wcs = enmap.fullsky_geometry(res=args.res*utils.degree)
	y1, y2 = np.sort(enmap.sky2pix(shape, wcs, [box[:,1]*utils.degree,[0,0]])[0].astype(int))
	shape, wcs = enmap.slice_geometry(shape, wcs, (slice(y1,y2),slice(None)))

sys.stderr.write("using %s workspace with resolution %.2f deg" % (str(shape), args.res) + "\n")
Esempio n. 19
0
    def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
        self.fields = [
            "gain", "mce_filter", "tags", "polangle", "tconst", "hwp", "cut",
            "point_offsets", "boresight", "site", "tod_shape", "array_info",
            "beam", "pointsrcs", "buddies"
        ]
        if dark: self.fields += ["dark"]
        if config.get("noise_model") == "file":
            self.fields += ["noise"]
        else:
            if config.get("cut_noise_whiteness"):
                self.fields += ["noise_cut"]
            if config.get("cut_spikes"):
                self.fields += ["spikes"]
        if d is None:
            d = actdata.read(entry, self.fields, verbose=verbose)
            d = actdata.calibrate(d, verbose=verbose)
            if subdets is not None:
                d.restrict(dets=d.dets[subdets])
        if d.ndet == 0 or d.nsamp == 0:
            raise errors.DataMissing("No data in scan")
        ndet = d.ndet
        # Necessary components for Scan interface
        self.mjd0 = utils.ctime2mjd(d.boresight[0, 0])
        self.boresight = np.ascontiguousarray(
            d.boresight.T.copy())  # [nsamp,{t,az,el}]
        self.boresight[:, 0] -= self.boresight[0, 0]
        self.offsets = np.zeros([ndet, self.boresight.shape[1]])
        self.offsets[:, 1:] = d.point_offset
        self.cut = d.cut.copy()
        self.cut_noiseest = d.cut_noiseest.copy()
        self.comps = np.zeros([ndet, 4])
        self.beam = d.beam
        self.pointsrcs = d.pointsrcs
        self.comps = d.det_comps
        self.hwp = d.hwp
        self.hwp_phase = d.hwp_phase
        self.dets = d.dets
        self.dgrid = (d.array_info.nrow, d.array_info.ncol)
        self.array_info = d.array_info
        self.sys = config.get("tod_sys",
                              entry.tod_sys if "tod_sys" in entry else None)
        self.site = d.site
        self.speed = d.speed
        if "noise" in d:
            self.noise = d.noise
        else:
            spikes = d.spikes[:2].T if "spikes" in d else None
            self.noise = nmat_measure.NmatBuildDelayed(
                model=config.get("noise_model"),
                spikes=spikes,
                cut=self.cut_noiseest)
        if "dark_tod" in d:
            self.dark_tod = d.dark_tod
        if "dark_cut" in d:
            self.dark_cut = d.dark_cut
        if "buddy_comps" in d:
            # Expand buddy_offs to {dt,daz,ddec}
            self.buddy_comps = d.buddy_comps
            self.buddy_offs = np.concatenate(
                [d.buddy_offs[..., :1] * 0, d.buddy_offs], -1)
        self.autocut = d.autocut if "autocut" in d else []
        # Implementation details. d is our DataSet, which we keep around in
        # because we need it to read tod consistently later. It will *not*
        # take part in any sample slicing operations, as that might make the
        # delayed tod read inconsistent with the rest. It could take part in
        # detector slicing as long as calibrate_tod operates on each detector
        # independently. This is true now, but would not be so if we did stuff
        # like common mode subtraction there. On the other hand, not doing this
        # would prevent slicing before reading from giving any speedup or memory
        # savings. I don't think allowing this should be a serious problem.
        self.d = d
        self.entry = entry

        def fmt_id(entry):
            if isinstance(entry, list):
                return "+".join([fmt_id(e) for e in entry])
            else:
                if entry.tag: return entry.id + ":" + entry.tag
                else: return entry.id

        self.id = fmt_id(entry)
        self.sampslices = []
        self.mapping = None

        # FIXME: debug test
        if config.get("dummy_cut") > 0:
            nmax = int(config.get("dummy_cut_len"))
            # Power law between 1 and nmax, with slope -1.
            # C(w) = log(w)/log(nmax)
            # P(w) = w**-1/log(nmax)
            # w(C) = n**C
            # Mean: (nmax-1)/log(nmax)
            nmean = (nmax - 1) / np.log(nmax)
            ncut = int(self.nsamp * config.get("dummy_cut") / nmean)
            cut_ranges = np.zeros([self.ndet, ncut, 2], int)
            w = (nmax**np.random.uniform(0, 1, size=[self.ndet,
                                                     ncut])).astype(int)
            np.clip(w, 1, nmax)
            cut_ranges[:, :, 0] = np.random.uniform(0,
                                                    self.nsamp,
                                                    size=[self.ndet,
                                                          ncut]).astype(int)
            cut_ranges[:, :, 0] = np.sort(cut_ranges[:, :, 0], 1)
            cut_ranges[:, :, 1] = cut_ranges[:, :, 0] + w
            np.clip(cut_ranges[:, :, 1], 0, self.nsamp)
            cut_dummy = sampcut.from_list(cut_ranges, self.nsamp)
            print(np.mean(w), nmean, nmax, ncut)
            print("cut fraction before", float(self.cut.sum()) / self.cut.size)
            self.cut *= cut_dummy
            print("cut fraction after", float(self.cut.sum()) / self.cut.size)
Esempio n. 20
0
	# We can't allocate the full buffer, since we don't know the
	# length of each tod a priori
	ind1   = chunk*chunk_size
	ind2   = min(ind1+chunk_size,ntod)
	cntod  = ind2-ind1
	lens   = np.zeros(cntod,int)
	mystats= []
	myinds = []
	for ind in range(ind1+comm.rank, ind2, comm.size):
		i     = ind-ind1
		id    = ids[ind]
		entry = filedb.data[id]
		try:
			# Get completely raw tods. No cut gapfilling. No gains. Output
			# will be in ADC units.
			d = actdata.read(entry, fields=["tod"])
			if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("empty tod")
		except (IOError, errors.DataMissing) as e:
			print "Skipped %s [%3d/%d] (%s)" % (id, i+1, ind2-ind1, e.message)
			continue
		print "Processing %s [%3d/%d]" % (id, i+1, ind2-ind1)
		# Get rid of non-data bits
		d.tod /= 128
		nsamp = d.nsamp
		# Find the number of segments in this tod. We only want whole segments
		nseg  = nsamp/seg_size
		if nseg < 1:
			print "Skipped %s: To short tod" % id
			continue
		tod   = d.tod[:,:nseg*seg_size]
		stat  = np.zeros([2,nstat,ndet,nseg],dtype=dtype)
Esempio n. 21
0
mask = enmap.zeros((3,)+imask.shape[-2:], imask.wcs, dtype)
mask[0] = imask.reshape((-1,)+imask.shape[-2:])[0]
del imask

utils.mkdir(args.odir)

# Each mpi tasks opens its own work file
wfname  = args.odir + "/work_%03d.hdf" % comm.rank
mystats = []
with h5py.File(wfname, "w") as hfile:
	for ind in range(comm.rank*ntod//comm.size, (comm.rank+1)*ntod//comm.size):
		id    = ids[ind]
		entry = filedb.data[id]
		# We only need pointing to build this cut
		try:
			d = actdata.read(entry, ["point_offsets","boresight","site","array_info"])
			d = actdata.calibrate(d, exclude=["autocut"])
		except (errors.DataMissing, AttributeError) as e:
			print("Skipping %s (%s)" % (id, e))
			continue
		# Build a projector between samples and mask. This
		# requires us to massage d into scan form. It's getting
		# annoying that scan and data objects aren't compatible.
		bore = d.boresight.T.copy()
		bore[:,0] -= bore[0,0]
		scan = enscan.Scan(
			boresight = bore,
			offsets = np.concatenate([np.zeros(d.ndet)[:,None],d.point_offset],1),
			comps = np.concatenate([np.ones(d.ndet)[:,None],np.zeros((d.ndet,3))],1),
			mjd0 = utils.ctime2mjd(d.boresight[0,0]),
			sys = "hor", site = d.site)
Esempio n. 22
0
fbins = np.concatenate(fbins,0)
nbin  = len(fbins)
del freqs, nbins

corr = None
pos  = None

for ind in range(comm.rank, nlabel, comm.size):
	id = ids[labels==ind]
	order = np.argsort([i[-1] for i in id])
	id = id[order]
	if len(id) != nsub: continue
	print "Processing " + "+".join(id)
	entries = [filedb.data[i] for i in id]
	try:
		d = actdata.read(entries)
		d = actdata.calibrate(d, exclude=["autocut"])
	except errors.DataMissing as e:
		print "Skipping %s: %s" % (id,e.message)
		continue
	tod = d.tod.astype(dtype)
	del d.tod
	ft = fft.rfft(tod)
	del tod

	if corr is None:
		ndet = d.array_info.ndet
		corr = np.zeros((nbin,ndet,ndet),dtype=dtype)
		hits = np.zeros((nbin,ndet,ndet),dtype=int)
		var  = np.zeros((nbin,ndet))
		pos  = np.zeros((ndet,2))
Esempio n. 23
0
	filedb.init()

	min_samps = 20e3
	log_level = log.verbosity2level(config.get("verbosity"))
	L = log.init(level=log_level, rank=comm.rank)
	tagger = WorkspaceTagger()

	ids = filedb.scans[args.sel]
	for ind in range(comm.rank, len(ids), comm.size):
		id    = ids[ind]
		entry = filedb.data[id]
		try:
			# We need the tod and all its dependences to estimate which noise
			# category the tod falls into. But we don't need all the dets.
			# Speed things up by only reading 25% of them.
			d = actdata.read(entry, ["boresight","point_offsets","site"])
			d = actdata.calibrate(d, exclude=["autocut"])
			if d.ndet == 0 or d.nsamp == 0:
				raise errors.DataMissing("Tod contains no valid data")
			if d.nsamp < min_samps:
				raise errors.DataMissing("Tod is too short")
		except errors.DataMissing as e:
			L.debug("Skipped %s (%s)" % (id, e.message))
			continue
		L.debug(id)

		# Get the scan el and az bounds
		az1 = np.min(d.boresight[1])
		az2 = np.max(d.boresight[1])
		el  = np.mean(d.boresight[2])
Esempio n. 24
0
					nsigma = (amps[i,0]**2*aicov[i,0])**0.5
					msg += " %7.3f %4.1f" % (amps[i,0]/self.amp_unit, nsigma)
				msg += " %12.5e %7.2f" % (self.chisq0-chisq, t2-t1)
				print(msg)
			self.i += 1
			return chisq
		return wrapper

for ind in range(comm.rank, len(ids), comm.size):
	id    = ids[ind]
	bid   = id.replace(":","_")
	entry = filedb.data[id]
	# Read the tod as usual
	try:
		with bench.show("read"):
			d = actdata.read(entry)
		with bench.show("calibrate"):
			d = actdata.calibrate(d, exclude=["autocut"])
		if d.ndet == 0 or d.nsamp < 2: raise errors.DataMissing("no data in tod")
	except errors.DataMissing as e:
		print("Skipping %s (%s)" % (id, e))
		continue
	print("Processing %s" % id)
	# Very simple white noise model
	with bench.show("ivar"):
		tod  = d.tod
		del d.tod
		tod -= np.mean(tod,1)[:,None]
		tod  = tod.astype(dtype)
		diff = tod[:,1:]-tod[:,:-1]
		diff = diff[:,:diff.shape[-1]//csize*csize].reshape(d.ndet,-1,csize)
Esempio n. 25
0
def build_tod_stats(entry, Naz=8, Nt=2):
    """Collect summary information for the tod in the given entry, returning
	it as a bunch. If some information can't be found, then those fields will
	be set to a placeholder value (usually NaN), but the fields will still all
	be present."""
    # At the very least we need the pointing, so no try catch around this
    d = actdata.read(entry, ["boresight", "site"])
    d += actdata.read_point_offsets(entry, no_correction=True)
    d = actdata.calibrate(d, exclude=["autocut"])

    # Get the array center and radius
    acenter = np.mean(d.point_offset, 0)
    arad = np.mean((d.point_offset - acenter)**2, 0)**0.5

    t, baz, bel = 0.5 * (np.min(d.boresight, 1) + np.max(d.boresight, 1))
    #t, baz, bel = np.mean(d.boresight,1)
    az = baz + acenter[0]
    el = bel + acenter[1]
    dur, waz, wel = np.max(d.boresight, 1) - np.min(d.boresight, 1)
    if waz > 180 * utils.degree:
        print("bad waz %8.3f for %s" % (waz / utils.degree, entry.id))
    mjd = utils.ctime2mjd(t)
    hour = t / 3600. % 24
    day = hour >= day_range[0] and hour < day_range[1]
    night = not day
    jon = (t - jon_ref) / (3600 * 24)

    ra, dec = coordinates.transform(tsys, "cel", [az, el], mjd, site=d.site)
    # Get the array center bounds on the sky, assuming constant elevation
    ts = utils.ctime2mjd(t + dur / 2 * np.linspace(-1, 1, Nt))
    azs = az + waz / 2 * np.linspace(-1, 1, Naz)
    E1 = coordinates.transform(tsys,
                               "cel", [azs, [el] * Naz],
                               time=[ts[0]] * Naz,
                               site=d.site)[:, 1:]
    E2 = coordinates.transform(tsys,
                               "cel", [[azs[-1]] * Nt, [el] * Nt],
                               time=ts,
                               site=d.site)[:, 1:]
    E3 = coordinates.transform(tsys,
                               "cel", [azs[::-1], [el] * Naz],
                               time=[ts[-1]] * Naz,
                               site=d.site)[:, 1:]
    E4 = coordinates.transform(tsys,
                               "cel", [[azs[0]] * Nt, [el] * Nt],
                               time=ts[::-1],
                               site=d.site)[:, 1:]
    bounds = np.concatenate([E1, E2, E3, E4], 1)
    bounds[0] = utils.rewind(bounds[0])
    ## Grow bounds by array radius
    #bmid = np.mean(bounds,1)
    #for i in range(2):
    #	bounds[i,bounds[i]<bmid[i]] -= arad[i]
    #	bounds[i,bounds[i]>bmid[i]] += arad[i]
    tot_id = entry.id + (":" + entry.tag if entry.tag else "")
    res = bunch.Bunch(id=tot_id,
                      nsamp=d.nsamp,
                      t=t,
                      mjd=mjd,
                      jon=jon,
                      hour=hour,
                      day=day,
                      night=night,
                      dur=dur,
                      az=az / utils.degree,
                      el=el / utils.degree,
                      baz=baz / utils.degree,
                      bel=bel / utils.degree,
                      waz=waz / utils.degree,
                      wel=wel / utils.degree,
                      ra=ra / utils.degree,
                      dec=dec / utils.degree,
                      bounds=bounds / utils.degree)

    if "gseason" in entry:
        res[entry.gseason] = True

    # Planets
    for obj in [
            "Sun", "Moon", "Mercury", "Venus", "Mars", "Jupiter", "Saturn",
            "Uranus", "Neptune"
    ]:
        res[obj] = coordinates.ephem_pos(obj,
                                         utils.ctime2mjd(t)) / utils.degree

    # Get our weather information, if available
    try:
        d += actdata.read(entry, ["apex"])
        d = actdata.calibrate_apex(d)
        res["pwv"] = d.apex.pwv
        res["wx"] = d.apex.wind[0]
        res["wy"] = d.apex.wind[1]
        res["wind_speed"] = d.apex.wind_speed
        res["T"] = d.apex.temperature
    except errors.DataMissing:
        res["pwv"] = np.NaN
        res["wx"] = np.NaN
        res["wy"] = np.NaN
        res["wind_speed"] = np.NaN
        res["T"] = np.NaN

    # Try to get our cut info, so that we can select on
    # number of detectors and cut fraction
    try:
        npre = d.nsamp * d.ndet
        d += actdata.read(entry, ["cut"])
        res["ndet"] = d.ndet
        res["cut"] = 1 - d.nsamp * d.ndet / float(npre)
    except errors.DataMissing:
        res["ndet"] = 0
        res["cut"] = 1.0

    # Try to get hwp info
    res["hwp"] = False
    res["hwp_name"] = "none"
    try:
        epochs = actdata.try_read(files.read_hwp_epochs, "hwp_epochs",
                                  entry.hwp_epochs)
        t, _, ar = entry.id.split(".")
        t = float(t)
        if ar in epochs:
            for epoch in epochs[ar]:
                if t >= epoch[0] and t < epoch[1]:
                    res["hwp"] = True
                    res["hwp_name"] = epoch[2]
    except errors.DataMissing:
        pass

    return res
Esempio n. 26
0

def write_cuts(ofile, cuts, array_info, dets):
    with open(ofile, "w") as f:
        f.write("""format = 'TODCuts'
format_version = 2
n_det = %d
n_samp = %d
samp_offset = 0\n""" % (array_info.ndet, cuts.shape[1]))
        for ind, det in enumerate(dets):
            rstr = " ".join(["(%d,%d)" % tuple(rn) for rn in cuts[ind].ranges])
            f.write("%4d: %s\n" % (det, rstr))


for ind in range(comm.rank, len(ids), comm.size):
    id = ids[ind]
    print id
    entry = filedb.data[id]
    # Read uncalibrated data
    d = actdata.read(entry, fields=["tod", "array_info"], verbose=True)
    dmask = np.zeros(d.ndet, int)
    quant = measure_quant(d.tod)
    dmask |= quant < 1000
    cuts = det_mask_to_cuts(dmask, d.nsamp)
    #cuts_null = find_null(d.tod)
    #cuts_jump = find_jumps(d.tod)
    #cuts = cuts_null # cuts_jump + cuts_null
    # Write cut file
    dets = np.arange(cuts.shape[0])
    write_cuts(args.odir + "/%s.cuts" % id, cuts, d.array_info, dets)
Esempio n. 27
0
 # We can't allocate the full buffer, since we don't know the
 # length of each tod a priori
 ind1 = chunk * chunk_size
 ind2 = min(ind1 + chunk_size, ntod)
 cntod = ind2 - ind1
 lens = np.zeros(cntod, int)
 mystats = []
 myinds = []
 for ind in range(ind1 + comm.rank, ind2, comm.size):
     i = ind - ind1
     id = ids[ind]
     entry = filedb.data[id]
     try:
         # Get completely raw tods. No cut gapfilling. No gains. Output
         # will be in ADC units.
         d = actdata.read(entry, fields=["tod"])
         if d.ndet == 0 or d.nsamp == 0:
             raise errors.DataMissing("empty tod")
     except (IOError, errors.DataMissing) as e:
         print "Skipped %s [%3d/%d] (%s)" % (id, i + 1, ind2 - ind1,
                                             e.message)
         continue
     print "Processing %s [%3d/%d]" % (id, i + 1, ind2 - ind1)
     # Get rid of non-data bits
     d.tod /= 128
     nsamp = d.nsamp
     # Find the number of segments in this tod. We only want whole segments
     nseg = nsamp / seg_size
     if nseg < 1:
         print "Skipped %s: To short tod" % id
         continue
Esempio n. 28
0
    return tod


abscal_dict_sub = {}  # For abscal values on this process.

for ind in range(comm.rank, len(ids), comm.size):
    id = ids[ind]
    bid = id.replace(":", "_")
    entry = filedb.data[id]
    if args.tags: entry.tag = args.tags

    # Read the tod as usual
    try:
        if not args.sim:
            with bench.show("read"):
                d = actdata.read(entry)
        else:
            sim_id = sim_ids[ind]
            sim_entry = filedb.data[sim_id]
            with bench.show("read"):
                d = actdata.read(entry, ["boresight"])
                d += actdata.read(sim_entry, exclude=["boresight"])

        # Store the abscal value.
        abscal_dict_sub[id] = d.gain_correction[entry.tag]
        abscal = d.gain_correction[entry.tag]

        if d.gain_mode == 'mce':
            abscal /= d.mce_gain
        elif d.gain_mode == 'mce_compat':
            abscal /= d.mce_gain * 1217.8583043
Esempio n. 29
0
	ft *= np.exp(-0.5*(sigma/skyspeed)**2*k**2)
	fft.ifft(ft, tod, normalize=True)

for ind in range(comm.rank, len(ids), comm.size):
	id    = ids[ind]
	bid   = id.replace(":","_")
	entry = filedb.data[id]
	oname = "%s%s_map.fits" % (prefix, bid)
	if args.cont and os.path.isfile(oname):
		print "Skipping %s (already done)" % (id)
		continue
	# Read the tod as usual
	try:
		if not args.sim:
			with bench.show("read"):
				d = actdata.read(entry)
		else:
			sim_id    = sim_ids[ind]
			sim_entry = filedb.data[sim_id]
			with bench.show("read"):
				d  = actdata.read(entry, ["boresight"])
				d += actdata.read(sim_entry, exclude=["boresight"])
		with bench.show("calibrate"):
			d = actdata.calibrate(d, exclude=["autocut"])
		if d.ndet == 0 or d.nsamp < 2: raise errors.DataMissing("no data in tod")
		# Select detectors if needed
		if dbox is not None:
			mid  = np.mean(utils.minmax(d.point_template, 0), 0)
			off  = d.point_template-mid
			good = np.all((off > dbox[0])&(off < dbox[1]),-1)
			d    = d.restrict(dets=d.dets[good])
Esempio n. 30
0
utils.mkdir(args.odir)
root = args.odir + "/" + (args.prefix + "_" if args.prefix else "")
# Set up logging
utils.mkdir(root + "log")
logfile   = root + "log/log%03d.txt" % comm_world.rank
log_level = log.verbosity2level(config.get("verbosity"))
L = log.init(level=log_level, file=logfile, rank=comm_world.rank, shared=False)

# Run through all tods to determine the scanning patterns
L.info("Detecting scanning patterns")
boxes = np.zeros([len(ids),2,2])
for ind in range(comm_world.rank, len(ids), comm_world.size):
	id    = ids[ind]
	entry = filedb.data[id]
	try:
		d = actdata.read(entry, ["boresight","tconst","cut","cut_noiseest"])
		d = actdata.calibrate(d, exclude=["autocut"])
		if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("no data")
	except errors.DataMissing as e:
		L.debug("Skipped %s (%s)" % (ids[ind], e.message))
		continue
	# Reorder from az,el to el,az
	boxes[ind] = [np.min(d.boresight[2:0:-1],1),np.max(d.boresight[2:0:-1],1)]
	L.info("%5d: %s" % (ind, id))
boxes = utils.allreduce(boxes, comm_world)

# Prune null boxes
usable = np.all(boxes!=0,(1,2))
moo = ids[usable]
cow = boxes[usable]
Esempio n. 31
0
        key: d[key][pairs[:, 0]] + d[key][pairs[:, 1]]
        for key in ["cut", "cut_basic", "cut_noiseest"]
    }
    d = d.restrict(dets=d.dets[pairs[:, 0]])
    d.tod = dtod
    for key in dcuts:
        d[key] = dcuts[key]
    return d


# Loop through and analyse each tod-group
for ind in range(comm.rank, len(groups), comm.size):
    ids = groups[ind]
    entries = [filedb.data[id] for id in ids]
    try:
        d = actdata.read(entries, verbose=args.verbose)
        d = actdata.calibrate(d, verbose=args.verbose, exclude=["autocut"])
        if args.pairdiff:
            pairs = find_pairs_blind(d.point_template)
            d = to_pairdiff(d, pairs)
        if d.ndet < 2 or d.nsamp < 2:
            raise errors.DataMissing("No data in tod")
    except (errors.DataMissing, AssertionError, IndexError) as e:
        print "Skipping %s (%s)" % (str(ids), str(e))
        continue
    print "Processing %s: %4d %6d" % (str(ids), d.ndet, d.nsamp)
    tod = d.tod
    del d.tod
    tod -= np.mean(tod, 1)[:, None]
    tod = tod.astype(dtype)
    ft = fft.rfft(tod)
Esempio n. 32
0
	def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False):
		self.fields = ["gain","mce_filter","tags","polangle","tconst","hwp","cut","point_offsets","boresight","site","tod_shape","array_info","beam","pointsrcs", "buddies"]
		if dark: self.fields += ["dark"]
		if config.get("noise_model") == "file":
			self.fields += ["noise"]
		else:
			if config.get("cut_noise_whiteness"):
				self.fields += ["noise_cut"]
			if config.get("cut_spikes"):
				self.fields += ["spikes"]
		if d is None:
			d = actdata.read(entry, self.fields, verbose=verbose)
			d = actdata.calibrate(d, verbose=verbose)
			if subdets is not None:
				d.restrict(dets=d.dets[subdets])
		if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan")
		ndet = d.ndet
		# Necessary components for Scan interface
		self.mjd0      = utils.ctime2mjd(d.boresight[0,0])
		self.boresight = np.ascontiguousarray(d.boresight.T.copy()) # [nsamp,{t,az,el}]
		self.boresight[:,0] -= self.boresight[0,0]
		self.offsets   = np.zeros([ndet,self.boresight.shape[1]])
		self.offsets[:,1:] = d.point_offset
		self.cut       = d.cut.copy()
		self.cut_noiseest = d.cut_noiseest.copy()
		self.comps     = np.zeros([ndet,4])
		self.beam      = d.beam
		self.pointsrcs = d.pointsrcs
		self.comps     = d.det_comps
		self.hwp = d.hwp
		self.hwp_phase = d.hwp_phase
		self.dets  = d.dets
		self.dgrid = (d.array_info.nrow, d.array_info.ncol)
		self.array_info = d.array_info
		self.sys = config.get("tod_sys")
		self.site = d.site
		self.speed = d.speed
		if "noise" in d:
			self.noise = d.noise
		else:
			spikes = d.spikes[:2].T if "spikes" in d else None
			self.noise = nmat_measure.NmatBuildDelayed(model = config.get("noise_model"), spikes=spikes,
					cut=self.cut_noiseest)
		if "dark_tod" in d:
			self.dark_tod = d.dark_tod
		if "dark_cut" in d:
			self.dark_cut = d.dark_cut
		if "buddy_comps" in d:
			# Expand buddy_offs to {dt,daz,ddec}
			self.buddy_comps = d.buddy_comps
			self.buddy_offs  = np.concatenate([d.buddy_offs[...,:1]*0,d.buddy_offs],-1)
		self.autocut = d.autocut if "autocut" in d else []
		# Implementation details. d is our DataSet, which we keep around in
		# because we need it to read tod consistently later. It will *not*
		# take part in any sample slicing operations, as that might make the
		# delayed tod read inconsistent with the rest. It could take part in
		# detector slicing as long as calibrate_tod operates on each detector
		# independently. This is true now, but would not be so if we did stuff
		# like common mode subtraction there. On the other hand, not doing this
		# would prevent slicing before reading from giving any speedup or memory
		# savings. I don't think allowing this should be a serious problem.
		self.d = d
		self.entry = entry
		def fmt_id(entry):
			if isinstance(entry, list): return "+".join([fmt_id(e) for e in entry])
			else:
				if entry.tag: return entry.id + ":" + entry.tag
				else: return entry.id
		self.id = fmt_id(entry)
		self.sampslices = []
		self.mapping = None

		# FIXME: debug test
		if config.get("dummy_cut") > 0:
			nmax  = int(config.get("dummy_cut_len"))
			# Power law between 1 and nmax, with slope -1.
			# C(w) = log(w)/log(nmax)
			# P(w) = w**-1/log(nmax)
			# w(C) = n**C
			# Mean: (nmax-1)/log(nmax)
			nmean = (nmax-1)/np.log(nmax)
			ncut = int(self.nsamp * config.get("dummy_cut") / nmean)
			cut_ranges = np.zeros([self.ndet, ncut, 2],int)
			w = (nmax**np.random.uniform(0, 1, size=[self.ndet, ncut])).astype(int)
			np.clip(w, 1, nmax)
			cut_ranges[:,:,0] = np.random.uniform(0, self.nsamp, size=[self.ndet, ncut]).astype(int)
			cut_ranges[:,:,0] = np.sort(cut_ranges[:,:,0],1)
			cut_ranges[:,:,1] = cut_ranges[:,:,0] + w
			np.clip(cut_ranges[:,:,1], 0, self.nsamp)
			cut_dummy = sampcut.from_list(cut_ranges, self.nsamp)
			print np.mean(w), nmean, nmax, ncut
			print "cut fraction before", float(self.cut.sum())/self.cut.size
			self.cut *= cut_dummy
			print "cut fraction after", float(self.cut.sum())/self.cut.size
Esempio n. 33
0
if len(ids) > 1:
	# Will process multiple files
	utils.mkdir(args.ofile)
for id in ids:
	print id
	entry = filedb.data[id]
	subdets = None
	absdets = None
	if args.absdets is not None:
		absdets = [int(w) for w in args.absdets.split(",")]
	elif args.dets is not None:
		subdets = [int(w) for w in args.dets.split(",")]

	fields = ["gain","tconst","cut","tod","boresight"]
	if args.fields: fields = args.fields.split(",")
	d = actdata.read(entry, fields=fields)
	if absdets: d.restrict(dets=absdets)
	if subdets: d.restrict(dets=d.dets[subdets])
	if args.calib: d = actdata.calibrate(d, exclude=["autocut"])
	elif args.manual_calib:
		ops = args.manual_calib.split(",")
		if "safe" in ops: d.boresight[1:] = utils.unwind(d.boresight[1:], period=360)
		if "rad" in ops: d.boresight[1:] *= np.pi/180
		if "bgap" in ops:
			bad = (d.flags!=0)*(d.flags!=0x10)
			for b in d.boresight: gapfill.gapfill_linear(b, bad, inplace=True)
		if "gain" in ops: d.tod *= d.gain[:,None]
		if "tgap" in ops: 
			gapfiller = {"copy":gapfill.gapfill_copy, "linear":gapfill.gapfill_linear}[config.get("gapfill")]
			gapfiller(d.tod, d.cut, inplace=True)
		if "slope" in ops:
Esempio n. 34
0
	nctod  = ind2-ind1
	dspecs = np.zeros([nctod,ndet,args.nbin_det], dtype=dtype)
	dzooms = np.zeros([nctod,ndet,args.nbin_zoom],dtype=dtype)
	tspecs = np.zeros([5,nctod,args.nbin],dtype=dtype)
	tcorrs = np.zeros([nctod,args.nbin],dtype=dtype)
	srates = np.zeros([nctod],dtype=dtype)
	mce_fsamps = np.zeros([nctod],dtype=dtype)
	mce_params = np.zeros([nctod,4],dtype=dtype)
	for ind in range(ind1+comm.rank, ind2, comm.size):
		i     = ind-ind1
		id    = ids[ind]
		entry = filedb.data[id]
		try:
			# Do not apply time constants. We want raw spectra so that we can
			# use them to estimate time constants ourselves.
			d     = actdata.read(entry, fields=["array_info", "tags", "site", "mce_filter", "gain","cut","tod","boresight"])
			d     = actdata.calibrate(d, exclude=["tod_fourier"]+(["autocut"] if not args.no_autocut else []))
			if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("empty tod")
		except (IOError, errors.DataMissing) as e:
			print "Skipped (%s)" % (e.message)
			continue
		print "Processing %s" % id
		srates[i] = d.srate
		mce_fsamps[i] = d.mce_fsamp
		mce_params[i] = d.mce_params[:4]
		# Compute the power spectrum
		d.tod = d.tod.astype(dtype)
		nsamp = d.nsamp
		srate = d.srate
		ifmax = d.srate/2
		ft    = fft.rfft(d.tod) / (nsamp*srate)**0.5
Esempio n. 35
0
imask = enmap.read_map(args.mask)
# Expand to 3 components, as the pointing code expects that
mask = enmap.zeros((3, ) + imask.shape[-2:], imask.wcs, dtype)
mask[0] = imask.reshape((-1, ) + imask.shape[-2:])[0]
del imask

utils.mkdir(args.odir)

myinds = np.arange(comm.rank, len(ids), comm.size)
mystats = []
for ind in myinds:
    id = ids[ind]
    entry = filedb.data[id]
    # We only need pointing to build this cut
    try:
        d = actdata.read(entry,
                         ["point_offsets", "boresight", "site", "array_info"])
        d = actdata.calibrate(d, exclude=["autocut"])
    except (errors.DataMissing, AttributeError) as e:
        print "Skipping %s (%s)" % (id, e.message)
        continue
    # Build a projector between samples and mask. This
    # requires us to massage d into scan form. It's getting
    # annoying that scan and data objects aren't compatible.
    bore = d.boresight.T.copy()
    bore[:, 0] -= bore[0, 0]
    scan = enscan.Scan(
        boresight=bore,
        offsets=np.concatenate([np.zeros(d.ndet)[:, None], d.point_offset], 1),
        comps=np.concatenate([np.ones(d.ndet)[:, None],
                              np.zeros((d.ndet, 3))], 1),
        mjd0=utils.ctime2mjd(d.boresight[0, 0]),
Esempio n. 36
0
model_fknee = 10
model_alpha = 10
sys = "hor:"+args.planet
if args.equator: sys += "/0_0"
utils.mkdir(args.odir)
prefix = args.odir + "/"
if args.tag: prefix += args.tag + "_"

for ind in range(comm.rank, len(ids), comm.size):
	id    = ids[ind]
	bid   = id.replace(":","_")
	entry = filedb.data[id]
	# Read the tod as usual
	try:
		with bench.show("read"):
			d = actdata.read(entry)
		with bench.show("calibrate"):
			d = actdata.calibrate(d, exclude=["autocut"])
		if d.ndet == 0 or d.nsamp < 2: raise errors.DataMissing("no data in tod")
	except errors.DataMissing as e:
		print "Skipping %s (%s)" % (id, e.message)
		continue
	print "Processing %s" % id
	# Very simple white noise model
	with bench.show("ivar"):
		tod  = d.tod
		del d.tod
		tod -= np.mean(tod,1)[:,None]
		tod  = tod.astype(dtype)
		diff = tod[:,1:]-tod[:,:-1]
		diff = diff[:,:diff.shape[-1]/csize*csize].reshape(d.ndet,-1,csize)
Esempio n. 37
0
		if len(sids) == 0:
			print("%s has 0 srcs: skipping" % id)
			continue
		try:
			nsrc = len(sids)
			print("%s has %d srcs: %s" % (id,nsrc,", ".join(["%d (%.1f)" % (i,a) for i,a in zip(sids,amps[sids])])))
		except TypeError as e:
			print("Weird: %s" % e)
			print(sids)
			print(amps)
			continue

		# Read the data
		entry = filedb.data[id]
		try:
			data = actdata.read(entry, exclude=["tod"], verbose=verbose)
			data+= actdata.read_tod(entry)
			data = actdata.calibrate(data, verbose=verbose)
			#print("fixme") # FIXME
			#data.restrict(dets=data.dets[100:150])
			# Avoid planets while building noise model
			if planet is not None:
				data.cut_noiseest *= actdata.cuts.avoidance_cut(data.boresight, data.point_offset, data.site, planet, R)
			if data.ndet < 2 or data.nsamp < 1: raise errors.DataMissing("no data in tod")
		except errors.DataMissing as e:
			print("%s skipped: %s" % (id, e))
			continue
		# Prepeare our samples
		#data.tod -= np.mean(data.tod,1)[:,None]
		data.tod -= data.tod[:,None,0].copy()
		data.tod  = data.tod.astype(dtype)
Esempio n. 38
0
filedb.init()
ids = filedb.scans[args.sel]
ntod = len(ids)

cuts = np.zeros([ntod, ndet], dtype=np.uint8)
stats = None
if args.full_stats: stats = np.zeros([ntod, ndet, 4])
for si in range(comm.rank, ntod, comm.size):
    try:
        id = ids[si]
        entry = filedb.data[id]
        ofile = "%s/%s.txt" % (args.odir, id)
        try:
            d = actdata.read(
                entry,
                fields=["gain", "tconst", "cut", "tod", "boresight", "hwp"])
            d = actdata.calibrate(d, exclude=["tod_fourier", "autocut"])
            if d.ndet == 0 or d.nsamp == 0:
                raise errors.DataMissing("empty tod")
        except (IOError, errors.DataMissing) as e:
            print "Skipped (%s)" % (e.message)
            continue
        print "Read %s" % id
        # Filter the HWP signal
        print "no hwp filter"
        #d.tod = todfilter.filter_poly_jon(d.tod, d.boresight[1], hwp=d.hwp)

        ft = fft.rfft(d.tod)
        ps = np.abs(ft)**2 / (d.tod.shape[1] * srate)
        inds = bins * ps.shape[1] / fmax
Esempio n. 39
0
from enact import actdata, filedb
parser = config.ArgumentParser(os.environ["HOME"] + "/.enkirc")
parser.add_argument("pickup_map")
parser.add_argument("template")
parser.add_argument("sel_repr")
parser.add_argument("el", type=float)
parser.add_argument("ofile")
args  = parser.parse_args()

filedb.init()
nrow, ncol = 33, 32
# Read our template, which represents the output horizontal coordinates
template = enmap.read_map(args.template)
# Use our representative selector to get focalplane offsets and polangles
entry = filedb.data[filedb.scans[args.sel_repr][0]]
d = actdata.read(entry, ["boresight", "point_offsets", "polangle"])
d.boresight[2] = args.el # In degrees, calibrated in next step
d = actdata.calibrate(d, exclude=["autocut"])

def reorder(map, nrow, ncol, dets):
	return enmap.samewcs(map[utils.transpose_inds(dets,nrow,ncol)],map)

# Read our map, and give each row a weight
pickup = enmap.read_map(args.pickup_map)
pickup = reorder(pickup, nrow, ncol, d.dets)
weight = np.median((pickup[:,1:]-pickup[:,:-1])**2,-1)
weight[weight>0] = 1/weight[weight>0]

# Find the output pixel for each input pixel
baz = pickup[:1].posmap()[1,0]
bel = baz*0 + args.el * utils.degree