def prec_div_helper(signal, signal_cut, scans, weights, iwork, owork, ijunk, ojunk, noise=True): # The argument list of this one is so long that it almost doesn't save any # code. for scan in scans: with bench.mark("div_Pr_" + signal.name): signal.precompute(scan) with bench.mark("div_P_" + signal.name): tod = np.zeros((scan.ndet, scan.nsamp), signal.dtype) signal.forward(scan, tod, iwork) signal_cut.forward(scan, tod, ijunk) with bench.mark("div_white"): for weight in weights: weight(scan, tod) if noise: scan.noise.white(tod) for weight in weights[::-1]: weight(scan, tod) with bench.mark("div_PT_" + signal.name): signal_cut.backward(scan, tod, ojunk) signal.backward(scan, tod, owork) with bench.mark("div_Fr_" + signal.name): signal.free() times = [ bench.stats[s]["time"].last for s in ["div_P_" + signal.name, "div_white", "div_PT_" + signal.name] ] L.debug("div %s %6.3f %6.3f %6.3f %s" % ((signal.name, ) + tuple(times) + (scan.id, )))
def A(self, x): """Apply the A-matrix P'N"P to the zipped vector x, returning the result.""" with bench.mark("A_init"): imaps = self.dof.unzip(x) omaps = [signal.zeros() for signal in self.signals] # Set up our input and output work arrays. The output work array will accumulate # the results, so it must start at zero. iwork = [ signal.prepare(map) for signal, map in zip(self.signals, imaps) ] owork = [signal.work() for signal in self.signals] #owork = [signal.prepare(map) for signal, map in zip(self.signals, omaps)] for scan in self.scans: # Set up a TOD for this scan tod = np.zeros([scan.ndet, scan.nsamp], self.dtype) # Project each signal onto the TOD (P) in reverse order. This is done # so that the cuts can override the other signals. with bench.mark("A_P"): for signal, work in zip(self.signals, iwork)[::-1]: with bench.mark("A_Pr_" + signal.name): signal.precompute(scan) with bench.mark("A_P_" + signal.name): signal.forward(scan, tod, work) # Apply the noise matrix (N") with bench.mark("A_N"): for weight in self.weights: weight(scan, tod) scan.noise.apply(tod) for weight in self.weights[::-1]: weight(scan, tod) # Project the TOD onto each signal (P') in normal order. This is done # to allow the cuts to zero out the relevant TOD samples first with bench.mark("A_PT"): for signal, work in zip(self.signals, owork): with bench.mark("A_PT_" + signal.name): signal.backward(scan, tod, work) with bench.mark("A_Fr_" + signal.name): signal.free() times = [ bench.stats[s]["time"].last for s in ["A_P", "A_N", "A_PT"] ] L.debug("A P %5.3f N %5.3f P' %5.3f %s %4d" % (tuple(times) + (scan.id, scan.ndet))) # Collect all the results, and flatten them with bench.mark("A_reduce"): for signal, map, work in zip(self.signals, omaps, owork): signal.finish(map, work) # priors with bench.mark("A_prior"): for signal, imap, omap in zip(self.signals, imaps, omaps): signal.prior(self.scans, imap, omap) return self.dof.zip(omaps)
def M(self, x): """Apply the preconditioner to the zipped vector x.""" with bench.mark("M"): maps = self.dof.unzip(x) for signal, map in zip(self.signals, maps): signal.precon(map) return self.dof.zip(maps)
def calc_hits_map(hits, signal, signal_cut, scans): work = signal.prepare(hits) ojunk = signal_cut.prepare(signal_cut.zeros()) for scan in scans: with bench.mark("hits_Pr_" + signal.name): signal.precompute(scan) with bench.mark("hits_PT"): tod = np.full((scan.ndet, scan.nsamp), 1, hits.dtype) signal_cut.backward(scan, tod, ojunk) signal.backward(scan, tod, work) with bench.mark("hits_Fr_" + signal.name): signal.free() times = [bench.stats[s]["time"].last for s in ["hits_PT"]] L.debug("hits %s %6.3f %s" % ((signal.name, ) + tuple(times) + (scan.id, ))) with bench.mark("hits_reduce"): signal.finish(hits, work) return hits[0].astype(np.int32)
def solve_cg(eq, nmax=1000, ofmt=None, dump_interval=10): cg = CG(eq.A, eq.b, M=eq.M, dot=eq.dof.dot) while cg.i < nmax: with bench.mark("cg_step"): cg.step() dt = bench.stats["cg_step"]["time"].last L.info("CG step %5d %15.7e %6.1f %6.3f" % (cg.i, cg.err, dt, dt / len(eq.scans))) xmap, xjunk = eq.dof.unzip(cg.x) if ofmt and cg.i % dump_interval == 0 and myid == 0: enmap.write_map(ofmt % cg.i, eq.dof.unzip(cg.x)[0]) # Output benchmarking information bench.stats.write(benchfile) return cg.x
def __init__(self, signal, scans): junk = signal.zeros() iwork = signal.prepare(junk) owork = signal.prepare(junk) iwork[:] = 1 for scan in scans: with bench.mark("div_" + signal.name): tod = np.zeros((scan.ndet, scan.nsamp), iwork.dtype) signal.forward(scan, tod, iwork) scan.noise.white(tod) signal.backward(scan, tod, owork) times = [ bench.stats[s]["time"].last for s in ["div_" + signal.name] ] L.debug("div %s %6.3f %s" % ((signal.name, ) + tuple(times) + (scan.id, ))) signal.finish(junk, owork) self.idiv = junk * 0 self.idiv[junk != 0] = 1 / junk[junk != 0] self.signal = signal
def dot(self, a, b): with bench.mark("dot"): return self.dof.dot(a, b)
def calc_b(self, itod=None): """Compute b = P'N"d, and store it as the .b member. This involves reading in the TOD data and potentially estimating a noise model, so it is a heavy operation.""" maps = [signal.zeros() for signal in self.signals] owork = [signal.work() for signal in self.signals] #owork = [signal.prepare(map) for signal, map in zip(self.signals,maps)] for scan in self.scans: # Get the actual TOD samples (d) if itod is None: with bench.mark("b_read"): tod = scan.get_samples() tod -= np.mean(tod, 1)[:, None] tod = tod.astype(self.dtype) else: tod = itod # Apply all filters (pickup filter, src subtraction, etc) with bench.mark("b_filter"): for filter in self.filters: filter(scan, tod) # Apply the noise model (N") with bench.mark("b_weight"): for weight in self.weights: weight(scan, tod) with bench.mark("b_N_build"): scan.noise = scan.noise.update(tod, scan.srate) with bench.mark("b_filter2"): for filter in self.filters2: filter(scan, tod) with bench.mark("b_N"): scan.noise.apply(tod) with bench.mark("b_weight"): for weight in self.weights[::-1]: weight(scan, tod) # Project onto signals with bench.mark("b_PT"): for signal, work in zip(self.signals, owork): with bench.mark("b_PT_" + signal.name): signal.precompute(scan) signal.backward(scan, tod, work) signal.free() del tod times = [ bench.stats[s]["time"].last for s in ["b_read", "b_filter", "b_N_build", "b_N", "b_PT"] ] L.debug("b get %5.1f f %5.1f NB %5.3f N %5.3f P' %5.3f %s" % (tuple(times) + (scan.id, ))) # Collect results with bench.mark("b_reduce"): for signal, map, work in zip(self.signals, maps, owork): signal.finish(map, work) with bench.mark("b_zip"): self.b = self.dof.zip(maps)
filemode='w') # Initialize geometry shape, wcs = enmap.fullsky_geometry(args.pix_size * utils.arcmin) # Load theory file and save for later reference ps = powspec.read_camb_full_lens(args.input_spec + "_lenspotentialCls.dat") shutil.copyfile(args.input_spec + "_lenspotentialCls.dat", f'{cmb_dir}/lenspotentialCls.dat') #make phi totally uncorrelated with both T and E. This is necessary due to the way that separate phi and CMB seeds were put forward in an update to the pixell library around mid-Nov 2018 ps[0, 1:, :] = 0. ps[1:, 0, :] = 0. # Initialize aberrator if not (args.skip_aberration): with bench.mark("init ab"): ab = aberration.Aberrator(shape, wcs, modulation=None) if rank == 0: logging.info(f'BENCH:\n{bench.stats}') # Log package info if rank == 0: logging.info("Saving package info...") logging.info( autil.pretty_info(autil.get_info(path=os.path.realpath(__file__)))) logging.info(autil.pretty_info(autil.get_info(package='pixell'))) # Loop over tasks for j, task in enumerate(my_tasks): # Get CMB and Phi seeds
hdu_divs = astropy.io.fits.ImageHDU(divs, map_to_header(divs), name="div"), hdu_ids = astropy.io.fits.TableHDU(src_ids, name="ids") hdus = astropy.io.fits.HDUList([hdu_maps, hdu_divs, hdu_ids]) with utils.nowarn(): hdus.writeto(fname, clobber=True) for ind in range(comm.rank, len(ids), comm.size): id = ids[ind] print "A", id, comm.rank bid = id.replace(":", "_") entry = filedb.data[id] # Read the tod as usual try: with bench.mark("read"): d = actdata.read(entry) with bench.mark("calibrate"): d = actdata.calibrate(d, exclude=["autocut"]) # Replace the beam with our dummy beam d.beam = beam if d.ndet < 2 or d.nsamp < 2: raise errors.DataMissing("no data in tod") except errors.DataMissing as e: print "Skipping %s (%s)" % (id, comm.rank, str(e)) # Make a dummy output file so we can skip this tod in the future with open("%s%s_empty.txt" % (prefix, bid), "w"): pass continue print "%3d Processing %s [ndet:%d, nsamp:%d, nsrc:%d]" % ( comm.rank, id, d.ndet, d.nsamp, len(tod_srcs[id]))
filters = [] if args.dedark: filters.append(mapmaking.FilterDedark()) if args.demode: filters.append( mapmaking.FilterPhaseBlockwise(daz=4 * utils.arcmin, niter=10)) if args.decommon: filters.append(mapmaking.FilterCommonBlockwise()) eq = mapmaking.Eqsys(scans, [signal_cut, signal_phase], weights=weights, filters=filters, dtype=dtype, comm=comm_sub) # Write precon signal_phase.precon.write(proot) # Solve for the given number of steps eq.calc_b() cg = CG(eq.A, eq.b, M=eq.M, dot=eq.dof.dot) while cg.i < args.nstep: with bench.mark("cg_step"): cg.step() dt = bench.stats["cg_step"]["time"].last if comm_sub.rank == 0: L.debug("CG step %5d %15.7e %6.1f %6.3f" % (cg.i, cg.err, dt, dt / max(1, len(eq.scans)))) eq.write(proot, "map%04d" % cg.i, cg.x) L.debug("Done")
# to the same side of the sky. poly = bounds[:,:,ind]*utils.degree poly[0] = utils.rewind(poly[0],poly[0,0]) srcpos = srcpos.copy() srcpos[0] = utils.rewind(srcpos[0], poly[0,0]) sids = np.where(utils.point_in_polygon(srcpos.T, poly.T))[0] sids = sorted(list(set(sids)&allowed)) if len(sids) == 0: print "%s has 0 srcs: skipping" % id continue nsrc = len(sids) print "%s has %d srcs: %s" % (id,nsrc,", ".join(["%d (%.1f)" % (i,a) for i,a in zip(sids,amps[sids])])) entry = filedb.data[id] try: with bench.mark("read"): d = actdata.read(entry) with bench.mark("calib"): d = actdata.calibrate(d, exclude=["autocut"]) if d.ndet < 2 or d.nsamp < 1: raise errors.DataMissing("no data in tod") except errors.DataMissing as e: print "%s skipped: %s" % (id, e.message) continue tod = d.tod.astype(dtype) del d.tod # Apply high-pass filter. Will assume white tod after this with bench.mark("filter"): freqs = fft.rfftfreq(d.nsamp)*d.srate ft = fft.rfft(tod) ps = np.abs(ft)**2 rpows = [measure_power(ps,rfreq,drfreq,d.srate) for rfreq,drfreq in zip(rfreqs, drfreqs)]
poly = bounds[:, :, ind] * utils.degree poly[0] = utils.rewind(poly[0], poly[0, 0]) srcpos = srcpos.copy() srcpos[0] = utils.rewind(srcpos[0], poly[0, 0]) sids = np.where(utils.point_in_polygon(srcpos.T, poly.T))[0] sids = sorted(list(set(sids) & allowed)) if len(sids) == 0: print "%s has 0 srcs: skipping" % id continue nsrc = len(sids) print "%s has %d srcs: %s" % (id, nsrc, ", ".join( ["%d (%.1f)" % (i, a) for i, a in zip(sids, amps[sids])])) entry = filedb.data[id] try: with bench.mark("read"): d = actdata.read(entry) with bench.mark("calib"): d = actdata.calibrate(d, exclude=["autocut"]) if d.ndet < 2 or d.nsamp < 1: raise errors.DataMissing("no data in tod") except errors.DataMissing as e: print "%s skipped: %s" % (id, str(e)) continue tod = d.tod.astype(dtype) del d.tod # Apply high-pass filter. Will assume white tod after this with bench.mark("filter"): freqs = fft.rfftfreq(d.nsamp) * d.srate ft = fft.rfft(tod) ps = np.abs(ft)**2
pos[0] -= pos[0,refy,refx] pos[1] -= pos[1,refy,refx] r2 = np.sum(pos**2,0) kernel = (r2 < rad**2).astype(dtype) / (np.pi*rad**2) / map.size**0.5 * map.area() kernel = np.roll(kernel,-refy,0) kernel = np.roll(kernel,-refx,1) res = enmap.ifft(enmap.fft(map)*np.conj(enmap.fft(kernel))).real return res nphi = np.abs(utils.nint(360/wcs.wcs.cdelt[0])) for chunk in range(comm.rank, nchunk, comm.size): i1 = chunk*csize i2 = min((chunk+1)*csize, ntod) # Split the hits into horizontal pixel ranges pix_ranges, weights = [], [] with bench.mark("get"): for i in range(i1,i2): pr, w = get_pix_ranges(shape, wcs, box[:,:,i], daz, nt, azdown=args.azdown, ndet=ndets[i]) pix_ranges.append(pr) weights.append(w) pix_ranges = np.concatenate(pix_ranges, 0) weights = np.concatenate(weights, 0) with bench.mark("add"): add_weight(omap, pix_ranges, weights, nphi) print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank, bench.stats.get("get"), bench.stats.get("add")) # Combine weights omap = utils.allreduce(omap, comm) # Change unit from seconds per pixel to seconds per square acmin if comm.rank == 0:
signal_cut = mapmaking.SignalCut(scans, dtype, comm_sub) signal_phase = mapmaking.SignalPhase(scans, pids=[0]*len(scans), patterns=pboxes[pid:pid+1], array_shape=(args.nrow,args.ncol), res=daz, dtype=dtype, comm=comm_sub, cuts=signal_cut, ofmt="phase") signal_cut.precon = mapmaking.PreconCut(signal_cut, scans) signal_phase.precon = mapmaking.PreconPhaseBinned(signal_phase, signal_cut, scans, weights) filters = [] if args.dedark: filters.append(mapmaking.FilterDedark()) if args.demode: filters.append(mapmaking.FilterPhaseBlockwise(daz=4*utils.arcmin, niter=10)) if args.decommon: filters.append(mapmaking.FilterCommonBlockwise()) eq = mapmaking.Eqsys(scans, [signal_cut, signal_phase], weights=weights, filters=filters, dtype=dtype, comm=comm_sub) # Write precon signal_phase.precon.write(proot) # Solve for the given number of steps eq.calc_b() cg = CG(eq.A, eq.b, M=eq.M, dot=eq.dof.dot) while cg.i < args.nstep: with bench.mark("cg_step"): cg.step() dt = bench.stats["cg_step"]["time"].last if comm_sub.rank == 0: L.debug("CG step %5d %15.7e %6.1f %6.3f" % (cg.i, cg.err, dt, dt/max(1,len(eq.scans)))) eq.write(proot, "map%04d" % cg.i, cg.x) L.debug("Done")
hits = area[0] * 0 nscan = 0 srate = 0 speed = 0 site = {} inspec = np.zeros(nbin) offsets = np.zeros([ndet_array, 2]) det_hit = np.zeros([ndet_array], dtype=int) for ind, d in scanutils.scan_iterator(pids, myinds, actscan.ACTScan, filedb.data, dets=args.dets, downsample=config.get("downsample")): id = pids[ind] with bench.mark("pbuild"): # Build pointing matrices pmap = pmat.PmatMap(d, area) pcut = pmat.PmatCut(d) with bench.mark("tod"): # Get tod tod = d.get_samples() tod -= np.mean(tod, 1)[:, None] tod = tod.astype(dtype) junk = np.zeros(pcut.njunk, dtype=dtype) with bench.mark("nmat"): # Build noise model ft = fft.rfft(tod) * tod.shape[1]**-0.5 nmat = nmat_measure.detvecs_simple(ft, d.srate) del ft with bench.mark("rhs"):
def fastweight(shape, wcs, db, weight="det", array_rad=0.7 * utils.degree, comm=None, dtype=np.float64, daz=0.5 * utils.degree, nt=4, chunk_size=100, site=None, verbose=False, normalize=True): # Get the boresight bounds for each TOD ntod = len(db) mids = np.array([db.data["t"], db.data["az"], db.data["el"]]) widths = np.array([db.data["dur"], db.data["waz"], db.data["wel"]]) box = np.array([mids - widths / 2, mids + widths / 2]) box[:, 1:] *= utils.degree ndets = db.data["ndet"] # Set up our output map omap = enmap.zeros(shape, wcs, dtype) # Sky horizontal period in pixels nphi = np.abs(utils.nint(360 / wcs.wcs.cdelt[0])) # Loop through chunks nchunk = (ntod + chunk_size - 1) / chunk_size if comm: rank, size = comm.rank, comm.size else: rank, size = 0, 1 for chunk in range(rank, nchunk, size): i1 = chunk * chunk_size i2 = min((chunk + 1) * chunk_size, ntod) # Split the hits into horizontal pixel ranges pix_ranges, weights = [], [] with bench.mark("get"): for i in range(i1, i2): ndet_eff = ndets[i] if weight == "det" else 1000.0 pr, w = get_pix_ranges(shape, wcs, box[:, :, i], daz, nt, ndet=ndet_eff, site=site) if pr is None: continue pix_ranges.append(pr) weights.append(w) if len(pix_ranges) == 0: continue pix_ranges = np.concatenate(pix_ranges, 0) weights = np.concatenate(weights, 0) with bench.mark("add"): add_weight(omap, pix_ranges, weights, nphi) if verbose: print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank, bench.stats.get("get"), bench.stats.get("add")) if comm: omap = utils.allreduce(omap, comm) # Change unit from seconds per pixel to seconds per square acmin if normalize: pixarea = omap.pixsizemap() / utils.arcmin**2 omap /= pixarea omap[~np.isfinite(omap)] = 0 if array_rad: omap = smooth_tophat(omap, array_rad) omap[omap < 1e-6] = 0 return omap