def stationary_cut(az, tol=None): """Cut samples where the telescope isn't moving at the beginning and end of the tod.""" tol = config.get("cut_stationary_tol", tol) * utils.degree b1 = np.where(np.abs(az - az[0]) > tol)[0] b2 = np.where(np.abs(az - az[-1]) > tol)[0] if len(b1) == 0 or len(b2) == 0: # Entire tod cut! return sampcut.full(1, len(az)) else: return sampcut.from_list([[[0, b1[0]], [b2[-1], len(az)]]], len(az))
def stationary_cut(az, tol=None): """Cut samples where the telescope isn't moving at the beginning and end of the tod.""" tol = config.get("cut_stationary_tol", tol)*utils.degree b1 = np.where(np.abs(az-az[0])>tol)[0] b2 = np.where(np.abs(az-az[-1])>tol)[0] if len(b1) == 0 or len(b2) == 0: # Entire tod cut! return sampcut.full(1,len(az)) else: return sampcut.from_list([[[0,b1[0]],[b2[-1],len(az)]]],len(az))
def read_cut(fname, permissive=True): """Read the act cut format, returning ids, cuts, offset, where cuts is a Multirange object.""" nsamp, ndet, offset = None, None, None dets, cuts = [], [] for line in utils.lines(fname): if "=" in line: # Header key-value pair toks = line.split() if toks[0] == "n_det": ndet = int(toks[2]) elif toks[0] == "n_samp": nsamp = int(toks[2]) elif toks[0] == "samp_offset": offset = int(toks[2]) else: continue # Ignore others elif ":" in line: parts = line.split(":") uid = int(parts[0].split()[0]) if len(parts) > 1 and "(" in parts[1]: toks = parts[1].split() ranges = np.array([[int(w) for w in tok[1:-1].split(",")] for tok in toks]) ranges = np.minimum(ranges, nsamp) cuts.append(sampcut.from_list([ranges], nsamp)) # Handle uncut detectors else: cuts.append(sampcut.empty(1, nsamp)) dets.append(uid) # Add any missing detectors if we are in permissive mode if permissive: missing = set(range(ndet)) - set(dets) for uid in missing: dets.append(uid) cuts.append(sampcut.empty(1, nsamp)) # Filter out fully cut tods odets, ocuts = [], [] for det, cut in zip(dets, cuts): if cut.sum() < cut.nsamp: odets.append(det) ocuts.append(cut) if len(ocuts) == 0: ocuts = sampcut.full(0, nsamp) else: ocuts = sampcut.stack(ocuts) return odets, ocuts, offset
def read_cut(fname, permissive=True): """Read the act cut format, returning ids, cuts, offset, where cuts is a Multirange object.""" nsamp, ndet, offset = None, None, None dets, cuts = [], [] for line in utils.lines(fname): if "=" in line: # Header key-value pair toks = line.split() if toks[0] == "n_det": ndet = int(toks[2]) elif toks[0] == "n_samp": nsamp = int(toks[2]) elif toks[0] == "samp_offset": offset = int(toks[2]) else: continue # Ignore others elif ":" in line: parts = line.split(":") uid = int(parts[0].split()[0]) if len(parts) > 1 and "(" in parts[1]: toks = parts[1].split() ranges = np.array([[int(w) for w in tok[1:-1].split(",")] for tok in toks]) ranges = np.minimum(ranges, nsamp) cuts.append(sampcut.from_list([ranges],nsamp)) # Handle uncut detectors else: cuts.append(sampcut.empty(1, nsamp)) dets.append(uid) # Add any missing detectors if we are in permissive mode if permissive: missing = set(range(ndet))-set(dets) for uid in missing: dets.append(uid) cuts.append(sampcut.empty(1,nsamp)) # Filter out fully cut tods odets, ocuts = [], [] for det, cut in zip(dets, cuts): if cut.sum() < cut.nsamp: odets.append(det) ocuts.append(cut) if len(ocuts) == 0: ocuts = sampcut.full(0,nsamp) else: ocuts = sampcut.stack(ocuts) return odets, ocuts, offset
def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False): self.fields = ["gain","mce_filter","tags","polangle","tconst","hwp","cut","point_offsets","boresight","site","tod_shape","array_info","beam","pointsrcs", "buddies"] if dark: self.fields += ["dark"] if config.get("noise_model") == "file": self.fields += ["noise"] else: if config.get("cut_noise_whiteness"): self.fields += ["noise_cut"] if config.get("cut_spikes"): self.fields += ["spikes"] if d is None: d = actdata.read(entry, self.fields, verbose=verbose) d = actdata.calibrate(d, verbose=verbose) if subdets is not None: d.restrict(dets=d.dets[subdets]) if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan") ndet = d.ndet # Necessary components for Scan interface self.mjd0 = utils.ctime2mjd(d.boresight[0,0]) self.boresight = np.ascontiguousarray(d.boresight.T.copy()) # [nsamp,{t,az,el}] self.boresight[:,0] -= self.boresight[0,0] self.offsets = np.zeros([ndet,self.boresight.shape[1]]) self.offsets[:,1:] = d.point_offset self.cut = d.cut.copy() self.cut_noiseest = d.cut_noiseest.copy() self.comps = np.zeros([ndet,4]) self.beam = d.beam self.pointsrcs = d.pointsrcs self.comps = d.det_comps self.hwp = d.hwp self.hwp_phase = d.hwp_phase self.dets = d.dets self.dgrid = (d.array_info.nrow, d.array_info.ncol) self.array_info = d.array_info self.sys = config.get("tod_sys") self.site = d.site self.speed = d.speed if "noise" in d: self.noise = d.noise else: spikes = d.spikes[:2].T if "spikes" in d else None self.noise = nmat_measure.NmatBuildDelayed(model = config.get("noise_model"), spikes=spikes, cut=self.cut_noiseest) if "dark_tod" in d: self.dark_tod = d.dark_tod if "dark_cut" in d: self.dark_cut = d.dark_cut if "buddy_comps" in d: # Expand buddy_offs to {dt,daz,ddec} self.buddy_comps = d.buddy_comps self.buddy_offs = np.concatenate([d.buddy_offs[...,:1]*0,d.buddy_offs],-1) self.autocut = d.autocut if "autocut" in d else [] # Implementation details. d is our DataSet, which we keep around in # because we need it to read tod consistently later. It will *not* # take part in any sample slicing operations, as that might make the # delayed tod read inconsistent with the rest. It could take part in # detector slicing as long as calibrate_tod operates on each detector # independently. This is true now, but would not be so if we did stuff # like common mode subtraction there. On the other hand, not doing this # would prevent slicing before reading from giving any speedup or memory # savings. I don't think allowing this should be a serious problem. self.d = d self.entry = entry def fmt_id(entry): if isinstance(entry, list): return "+".join([fmt_id(e) for e in entry]) else: if entry.tag: return entry.id + ":" + entry.tag else: return entry.id self.id = fmt_id(entry) self.sampslices = [] self.mapping = None # FIXME: debug test if config.get("dummy_cut") > 0: nmax = int(config.get("dummy_cut_len")) # Power law between 1 and nmax, with slope -1. # C(w) = log(w)/log(nmax) # P(w) = w**-1/log(nmax) # w(C) = n**C # Mean: (nmax-1)/log(nmax) nmean = (nmax-1)/np.log(nmax) ncut = int(self.nsamp * config.get("dummy_cut") / nmean) cut_ranges = np.zeros([self.ndet, ncut, 2],int) w = (nmax**np.random.uniform(0, 1, size=[self.ndet, ncut])).astype(int) np.clip(w, 1, nmax) cut_ranges[:,:,0] = np.random.uniform(0, self.nsamp, size=[self.ndet, ncut]).astype(int) cut_ranges[:,:,0] = np.sort(cut_ranges[:,:,0],1) cut_ranges[:,:,1] = cut_ranges[:,:,0] + w np.clip(cut_ranges[:,:,1], 0, self.nsamp) cut_dummy = sampcut.from_list(cut_ranges, self.nsamp) print np.mean(w), nmean, nmax, ncut print "cut fraction before", float(self.cut.sum())/self.cut.size self.cut *= cut_dummy print "cut fraction after", float(self.cut.sum())/self.cut.size
def tod_end_cut(nsamp, srate, cut_secs=None): """Cut cut_secs seconds of data at each end of the tod""" ncut = int(config.get("cut_tod_ends_nsec",cut_secs)*srate) return sampcut.from_list([[[0,ncut],[nsamp-ncut,nsamp]]], nsamp)
def tod_end_cut(nsamp, srate, cut_secs=None): """Cut cut_secs seconds of data at each end of the tod""" ncut = int(config.get("cut_tod_ends_nsec", cut_secs) * srate) return sampcut.from_list([[[0, ncut], [nsamp - ncut, nsamp]]], nsamp)
def to_sampcut(self): # This could be optimized, since sampcuts and flagranges have some similariteis # int he internal representation ranges = self.to_ranges() return sampcut.from_list(ranges, self.nsamp)
def __init__(self, entry, subdets=None, d=None, verbose=False, dark=False): self.fields = [ "gain", "mce_filter", "tags", "polangle", "tconst", "hwp", "cut", "point_offsets", "boresight", "site", "tod_shape", "array_info", "beam", "pointsrcs", "buddies" ] if dark: self.fields += ["dark"] if config.get("noise_model") == "file": self.fields += ["noise"] else: if config.get("cut_noise_whiteness"): self.fields += ["noise_cut"] if config.get("cut_spikes"): self.fields += ["spikes"] if d is None: d = actdata.read(entry, self.fields, verbose=verbose) d = actdata.calibrate(d, verbose=verbose) if subdets is not None: d.restrict(dets=d.dets[subdets]) if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("No data in scan") ndet = d.ndet # Necessary components for Scan interface self.mjd0 = utils.ctime2mjd(d.boresight[0, 0]) self.boresight = np.ascontiguousarray( d.boresight.T.copy()) # [nsamp,{t,az,el}] self.boresight[:, 0] -= self.boresight[0, 0] self.offsets = np.zeros([ndet, self.boresight.shape[1]]) self.offsets[:, 1:] = d.point_offset self.cut = d.cut.copy() self.cut_noiseest = d.cut_noiseest.copy() self.comps = np.zeros([ndet, 4]) self.beam = d.beam self.pointsrcs = d.pointsrcs self.comps = d.det_comps self.hwp = d.hwp self.hwp_phase = d.hwp_phase self.dets = d.dets self.dgrid = (d.array_info.nrow, d.array_info.ncol) self.array_info = d.array_info self.sys = config.get("tod_sys", entry.tod_sys if "tod_sys" in entry else None) self.site = d.site self.speed = d.speed if "noise" in d: self.noise = d.noise else: spikes = d.spikes[:2].T if "spikes" in d else None self.noise = nmat_measure.NmatBuildDelayed( model=config.get("noise_model"), spikes=spikes, cut=self.cut_noiseest) if "dark_tod" in d: self.dark_tod = d.dark_tod if "dark_cut" in d: self.dark_cut = d.dark_cut if "buddy_comps" in d: # Expand buddy_offs to {dt,daz,ddec} self.buddy_comps = d.buddy_comps self.buddy_offs = np.concatenate( [d.buddy_offs[..., :1] * 0, d.buddy_offs], -1) self.autocut = d.autocut if "autocut" in d else [] # Implementation details. d is our DataSet, which we keep around in # because we need it to read tod consistently later. It will *not* # take part in any sample slicing operations, as that might make the # delayed tod read inconsistent with the rest. It could take part in # detector slicing as long as calibrate_tod operates on each detector # independently. This is true now, but would not be so if we did stuff # like common mode subtraction there. On the other hand, not doing this # would prevent slicing before reading from giving any speedup or memory # savings. I don't think allowing this should be a serious problem. self.d = d self.entry = entry def fmt_id(entry): if isinstance(entry, list): return "+".join([fmt_id(e) for e in entry]) else: if entry.tag: return entry.id + ":" + entry.tag else: return entry.id self.id = fmt_id(entry) self.sampslices = [] self.mapping = None # FIXME: debug test if config.get("dummy_cut") > 0: nmax = int(config.get("dummy_cut_len")) # Power law between 1 and nmax, with slope -1. # C(w) = log(w)/log(nmax) # P(w) = w**-1/log(nmax) # w(C) = n**C # Mean: (nmax-1)/log(nmax) nmean = (nmax - 1) / np.log(nmax) ncut = int(self.nsamp * config.get("dummy_cut") / nmean) cut_ranges = np.zeros([self.ndet, ncut, 2], int) w = (nmax**np.random.uniform(0, 1, size=[self.ndet, ncut])).astype(int) np.clip(w, 1, nmax) cut_ranges[:, :, 0] = np.random.uniform(0, self.nsamp, size=[self.ndet, ncut]).astype(int) cut_ranges[:, :, 0] = np.sort(cut_ranges[:, :, 0], 1) cut_ranges[:, :, 1] = cut_ranges[:, :, 0] + w np.clip(cut_ranges[:, :, 1], 0, self.nsamp) cut_dummy = sampcut.from_list(cut_ranges, self.nsamp) print(np.mean(w), nmean, nmax, ncut) print("cut fraction before", float(self.cut.sum()) / self.cut.size) self.cut *= cut_dummy print("cut fraction after", float(self.cut.sum()) / self.cut.size)