def merge(tagdatas): """Merge two or more tagdbs into a total one, which will have the union of the ids.""" # First get rid of empty inputs tagdatas = [data for data in tagdatas if len(data["id"]) > 0] # Generate the union of ids, and the index of each # tagset into it. tot_ids = utils.union([data["id"] for data in tagdatas]) inds = [utils.find(tot_ids, data["id"]) for data in tagdatas] nid = len(tot_ids) data_tot = {} for di, data in enumerate(tagdatas): for key, val in data.iteritems(): if key not in data_tot: # Hard to find an appropriate default value for # all types. We use false for bool to let tags # accumulate, -1 as probably the most common # placeholder value for ints, and NaN for strings # and floats. oval = np.zeros(val.shape[:-1] + (nid, ), val.dtype) if oval.dtype == bool: oval[:] = False elif np.issubdtype(oval.dtype, np.integer): oval[:] = -1 else: oval[:] = np.NaN if oval.dtype == bool: oval[:] = False data_tot[key] = oval # Boolean flags combine OR-wise, to let us mention the same # id in multiple files if val.dtype == bool: data_tot[key][..., inds[di]] |= val else: data_tot[key][..., inds[di]] = val return data_tot
def match_existing(aset, ctimes): # Given an aset as returned by read_existing, convert TOD ids to ctimes and match them against # our existing ctime list. Returns an [nctime] array containing the id of the input split # it belongs to. If any id is present in aset but not in ctimes, an IndexError is raised. # Unrestricted ctimes will be set to -1. ind_ownership = np.full(len(ctimes),-1,int) for array in aset: for split, ids in enumerate(aset[array]): my_ctimes = ids2ctimes(ids) my_inds = utils.find(ctimes, my_ctimes) ind_ownership[my_inds] = split return ind_ownership
def select(self, ids): """Return a tagdb which only contains the selected ids.""" # Extract the subids ids, subids = split_ids(ids) # Restrict to the subset of these ids inds = utils.find(self.ids, ids) odata = {key:val[...,inds] for key, val in self.data.iteritems()} # Update subids odata["subids"] = np.array([merge_subid(a,b) for a, b in zip(odata["subids"], subids)]) res = self.copy() res.data = odata return res
def get_tod_groups(ids, samelen=True): times = np.array([float(id[:id.index(".")]) for id in ids]) labels = utils.label_unique(times, rtol=0, atol=10) nlabel = np.max(labels)+1 groups = [ids[labels==label] for label in range(nlabel)] # Try to preserve original ordering first = [group[0] for group in groups] orig_inds = utils.find(ids, first) order = np.argsort(orig_inds) groups = [groups[i] for i in order] if samelen: nsub = np.max(np.bincount(labels)) groups = [g for g in groups if len(g) == nsub] return groups
def select(self, ids): """Return a tagdb which only contains the selected ids.""" if isinstance(ids, basestring): ids = self.query(ids) # Extract the subids ids, subids = split_ids(ids) # Restrict to the subset of these ids inds = utils.find(self.ids, ids) odata = dslice(self.data, inds) # Update subids odata["subids"] = np.array( [merge_subid(a, b) for a, b in zip(odata["subids"], subids)]) res = self.copy() res.data = odata return res
def select(self, ids): """Return a tagdb which only contains the selected ids.""" if isinstance(ids, basestring): ids = self.query(ids) ids = np.asarray(ids) if issubclass(ids.dtype.type, np.integer): # Fast integer slicing return self.__class__(dslice(self.data, ids)) else: # Slice by id # Extract the subids ids, subids = split_ids(ids) # Restrict to the subset of these ids inds = utils.find(self.ids, ids) odata = dslice(self.data, inds) # Update subids odata["subids"] = np.array( [merge_subid(a, b) for a, b in zip(odata["subids"], subids)]) res = self.copy() res.data = odata return res
# Read the info file to see which scanning patterns were used in the phase dir phasemap = mapmaking.PhaseMap.read(fname) npat = len(phasemap.patterns) # Find which phase map part each scan corresponds to. We get all the scan # boxes, and then add our existing scanning pattern boxes as references. # We can then just see which scans get grouped with which patterns. my_boxes = scanutils.get_scan_bounds(myscans) boxes = utils.allgatherv(my_boxes, comm) rank = utils.allgatherv(np.full(len(my_boxes),comm.rank), comm) boxes = np.concatenate([phasemap.patterns, boxes], 0) labels= utils.label_unique(boxes, axes=(1,2), atol=tol) if comm.rank == 0: print "labels" for b,l in zip(boxes, labels): print "%8.3f %8.3f %8.3f %5d" % (b[0,0]/utils.degree,b[0,1]/utils.degree,b[1,1]/utils.degree,l) pids = utils.find(labels[:npat], labels[npat:]) mypids= pids[rank==comm.rank] if param["name"] == "addphase": filter = mapmaking.FilterAddPhase(myscans, phasemap, mypids, mmul=mul, tmul=tmul) else: filter = mapmaking.FilterDeprojectPhase(myscans, phasemap, mypids, int(param["perdet"])>0, mmul=mul, tmul=tmul) elif param["name"] == "scale": value = float(param["value"]) if value == 1: continue filter = mapmaking.FilterScale(value) elif param["name"] == "null": filter = mapmaking.FilterNull() elif param["name"] == "buddy": if "map" not in param: raise ValueError("-F buddy needs a map file to subtract. e.g. -F buddy:map=foo.fits") mode = int(param["value"]) sys = param["sys"]
def merge(franges): """Given a list of flagranges franges covering the same time period, merge them into a single flagrange containing the union of their cut causes""" # We don't support changing sample ranges or detectors present currently. They could # be added if necessary. for i, fr in enumerate(franges): assert fr.nsamp == franges[ 0].nsamp, "Inconsistent nsamp in Flagrange #%d: %d != %d" % ( i, fr.nsamp, franges[0].nsamp) assert fr.sample_offset == franges[ 0].sample_offset, "Inconsistent sample_offset in Flagrange #%d: %d != %d" % ( i, fr.sample_offset, franges[0].sample_offset) assert np.array_equal( fr.dets, franges[0].dets), "Inconsistent detectors in Flagrange #d" % i F = franges[0] # Find the flat names across all inputs name_list = [fr.flag_names for fr in franges] name_union = utils.union(name_list) # And find the index of each local name into the name union name_rel = [np.searchsorted(name_union, names) for names in name_list] # Translate these indices into an index and bit into the output flag array nbyte_out = (len(name_union) + 7) // 8 flag_ind_map = [nrel // 8 for nrel in name_rel] flag_bit_map = [1 << (nrel % 8) for nrel in name_rel] # How should we handle the derived flags? These should just be the union of their local # definitions. To do this we must translate each of their definitions into names derived_flags_dict = {} for fr in franges: for dname, fmask in zip(fr.derived_names, fr.derived_masks): if dname not in derived_flags_dict: derived_flags_dict[dname] = [set(), set()] for fi in range(fr.nflag): byte = fi // 8 bit = 1 << (fi % 8) for op in range(2): if fmask[op][byte] & bit: derived_flags_dict[dname][op].add(fr.flag_names[fi]) # Turn this set of names back into indices and bits derived_names = sorted(derived_flags_dict.keys()) derived_masks = np.zeros([len(derived_names), 2, nbyte_out], np.uint8) for fi, dname in enumerate(derived_names): for op in range(2): for fname in derived_flags_dict[dname][op]: find = utils.find(name_union, fname) derived_masks[fi][op][find // 8] |= 1 << (find % 8) # We can avoid a slow loop over detectors by expanding indices to a global indexing stack_inds_list = [] for fr in franges: nper = fr.stack_bounds[1:] - fr.stack_bounds[:-1] stack_inds_list.append(fr.index_stack + np.repeat(np.arange(fr.ndet) * fr.nsamp, nper)) # We will have an index anywhere any one of the input ranges has an index stack_inds_union = utils.union(stack_inds_list) # and we need to know how each input range maps to it stack_inds_rel = [ np.searchsorted(stack_inds_union, sinds) for sinds in stack_inds_list ] # Populate the output flag stack flag_stack = np.zeros([len(stack_inds_union), nbyte_out], np.uint8) for i, fr in enumerate(franges): for fi in range(fr.nflag): fo = name_rel[i][fi] ibyte, ibit = fi // 8, 1 << (fi % 8) obyte, obit = fo // 8, 1 << (fo % 8) # We we will use np.repeat to handle the flags staying on until they # are changed again. vals = np.full(len(fr.flag_stack), obit, np.uint8) vals[fr.flag_stack[:, ibyte] & ibit == 0] = 0 flag_stack[:, ibyte] |= fill_right(stack_inds_rel[i], vals, len(flag_stack)) # Undo expansion and recover stack bounds index_stack = stack_inds_union % F.nsamp stack_dets = stack_inds_union // F.nsamp stack_bounds = np.concatenate([[0], np.searchsorted(stack_dets, np.arange(F.ndet), side="right")]) # Phew! Finally done. Return the resulting Flagrange res = Flagrange(F.nsamp, index_stack, flag_stack, stack_bounds, dets=F.dets, flag_names=name_union, derived_masks=derived_masks, derived_names=derived_names, sample_offset=F.sample_offset) return res
# Read the info file to see which scanning patterns were used in the phase dir phasemap = mapmaking.PhaseMap.read(fname, rewind=True) npat = len(phasemap.patterns) # Find which phase map part each scan corresponds to. We get all the scan # boxes, and then add our existing scanning pattern boxes as references. # We can then just see which scans get grouped with which patterns. my_boxes = scanutils.get_scan_bounds(myscans) boxes = utils.allgatherv(my_boxes, comm) rank = utils.allgatherv(np.full(len(my_boxes),comm.rank), comm) boxes = np.concatenate([phasemap.patterns, boxes], 0) labels= utils.label_unique(boxes, axes=(1,2), atol=tol) if comm.rank == 0: print("labels") for b,l in zip(boxes, labels): print("%8.3f %8.3f %8.3f %5d" % (b[0,0]/utils.degree,b[0,1]/utils.degree,b[1,1]/utils.degree,l)) pids = utils.find(labels[:npat], labels[npat:], default=-1) mypids= pids[rank==comm.rank] if np.any(mypids < 0): bad = np.where(mypids<0)[0] for bi in bad: print("Warning: No matching scanning pattern found for %s. Using pattern 0" % (myscans[bi].id)) mypids[bi] = 0 if param["name"] == "addphase": filter = mapmaking.FilterAddPhase(myscans, phasemap, mypids, mmul=mul, tmul=tmul) else: filter = mapmaking.FilterDeprojectPhase(myscans, phasemap, mypids, int(param["perdet"])>0, mmul=mul, tmul=tmul) elif param["name"] == "scale": value = float(param["value"]) if value == 1: continue filter = mapmaking.FilterScale(value) elif param["name"] == "null":