def reduce(self, comm): res = self.copy() res.rhs[:] = utils.allreduce(self.rhs, comm) res.hdiv[:] = utils.allreduce(self.hdiv, comm) res.wfilter[:] = utils.allreduce(self.wfilter, comm) res.ids = comm.allreduce(list(res.ids)) return res
def reduce(self, comm): res = self.copy() res.rhs[:] = utils.allreduce(self.rhs, comm) res.hdiv[:] = utils.allreduce(self.hdiv, comm) res.wfilter[:] = utils.allreduce(self.wfilter, comm) res.ids = comm.allreduce(list(res.ids)) return res
def calc_b(self): res = self.dof.unzip(np.zeros(self.dof.n)) for work in self.workspaces: wmap = np.ascontiguousarray(work.rhs.copy()) work.pmat.backward(wmap, res) res = utils.allreduce(res, self.comm) return res
def __call__(self, x): xmap = self.dof.unzip(x) res = xmap*0 for info in self.infos: t = [time.time()] work = xmap*info.H t.append(time.time()) umap = info.U.apply(work) t.append(time.time()) fmap = fft.fft(umap+0j, axes=[-2,-1]) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) if info.W is not None: fmap = info.W.apply(fmap) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) umap = fft.ifft(fmap, umap+0j, axes=[-2,-1], normalize=True).real t.append(time.time()) work = enmap.samewcs(info.U.trans(umap, work),work) t.append(time.time()) work *= info.H t.append(time.time()) t = np.array(t) print " %4.2f"*(len(t)-1) % tuple(t[1:]-t[:-1]) res += work res = utils.allreduce(res,comm) return self.dof.zip(res)
def calc_b(self): res = self.dof.unzip(np.zeros(self.dof.n)) for work in self.workspaces: wmap = np.ascontiguousarray(work.rhs.copy()) work.pmat.backward(wmap, res) res = utils.allreduce(res, self.comm) return res
def __call__(self, x): xmap = self.dof.unzip(x) res = xmap * 0 for info in self.infos: t = [time.time()] work = xmap * info.H t.append(time.time()) umap = info.U.apply(work) t.append(time.time()) fmap = fft.fft(umap + 0j, axes=[-2, -1]) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) if info.W is not None: fmap = info.W.apply(fmap) t.append(time.time()) fmap = info.N.apply(fmap, exp=0.5) t.append(time.time()) umap = fft.ifft(fmap, umap + 0j, axes=[-2, -1], normalize=True).real t.append(time.time()) work = enmap.samewcs(info.U.trans(umap, work), work) t.append(time.time()) work *= info.H t.append(time.time()) t = np.array(t) print " %4.2f" * (len(t) - 1) % tuple(t[1:] - t[:-1]) res += work res = utils.allreduce(res, comm) return self.dof.zip(res)
def A(x): map = x[:area.size].reshape(area.shape) junk = x[area.size:] omap = map * 0 ojunk = junk * 0 for scan in scans: tod = np.zeros([scan.ndet, scan.nsamp], dtype) scan.pmap.forward(tod, map) scan.pcut.forward(tod, junk[scan.cut_range[0]:scan.cut_range[1]]) scan.noise.apply(tod) scan.pcut.backward(tod, ojunk[scan.cut_range[0]:scan.cut_range[1]]) scan.pmap.backward(tod, omap) del tod omap = utils.allreduce(omap, comm) return np.concatenate([omap.reshape(-1), ojunk], 0)
def A(x): map = x[:area.size].reshape(area.shape) junk = x[area.size:] omap = map*0 ojunk= junk*0 for scan in scans: tod = np.zeros([scan.ndet,scan.nsamp],dtype) scan.pmap.forward(tod, map) scan.pcut.forward(tod, junk[scan.cut_range[0]:scan.cut_range[1]]) scan.noise.apply(tod) scan.pcut.backward(tod, ojunk[scan.cut_range[0]:scan.cut_range[1]]) scan.pmap.backward(tod, omap) del tod omap = utils.allreduce(omap, comm) return np.concatenate([omap.reshape(-1),ojunk],0)
def A(self, x): map = self.dof.unzip(x) res = map*0 for work in self.workspaces: # This is normall P'N"P. In our case wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs, work.geometry.dtype) work.pmat.forward(wmap, map) #wmap[:] = array_ops.matmul(work.hdiv_norm_sqrt, wmap, [0,1]) wmap *= work.hdiv_norm_sqrt ft = fft.rfft(wmap) ft *= work.wfilter fft.ifft(ft, wmap, normalize=True) wmap *= work.hdiv_norm_sqrt # Noise weighting would go here. No weighting for now #wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv_norm_sqrt,1), wmap, [0,1]) work.pmat.backward(wmap, res) res = utils.allreduce(res, self.comm) return self.dof.zip(res)
def A(self, x): map = self.dof.unzip(x) res = map * 0 for work in self.workspaces: # This is normall P'N"P. In our case wmap = enmap.zeros(work.geometry.shape, work.geometry.lwcs, work.geometry.dtype) work.pmat.forward(wmap, map) #wmap[:] = array_ops.matmul(work.hdiv_norm_sqrt, wmap, [0,1]) wmap *= work.hdiv_norm_sqrt ft = fft.rfft(wmap) ft *= work.wfilter fft.ifft(ft, wmap, normalize=True) wmap *= work.hdiv_norm_sqrt # Noise weighting would go here. No weighting for now #wmap[:] = array_ops.matmul(np.rollaxis(work.hdiv_norm_sqrt,1), wmap, [0,1]) work.pmat.backward(wmap, res) res = utils.allreduce(res, self.comm) return self.dof.zip(res)
def sum(dmap, axis=None): """Sum a dmap along the specified axis, or the flattened version if axis is None. The result is a Dmap if the pixel axes are not involved in the sum.""" # Full sum if axis is None: return dmap.geometry.comm.allreduce( np.sum([np.sum(t) for t in dmap.tiles])) # Non-pixel sums if axis < 0: axis = dmap.ndim + axis if axis < dmap.ndim - 2: pre = dmap.pre[:axis] + dmap.pre[axis + 1:] res = zeros(dmap.geometry.aspre(pre)) for itile, otile in zip(dmap.tiles, res.tiles): otile[:] = np.sum(itile, axis) return res # Pixel sums: Sum each tile along the specified direction. Then sum tiles # that are on the same row/column. Then stack along the remaining row/column res = np.zeros(dmap.shape[:axis] + dmap.shape[axis + 1:], dmap.dtype) paxis = axis - (dmap.ndim - 2) for tile, ind in zip(dmap.tiles, dmap.loc_inds): pbox = dmap.geometry.tile_boxes[ind] res[..., pbox[0, 1 - paxis]:pbox[1, 1 - paxis]] += np.sum(tile, axis) return utils.allreduce(res, dmap.comm)
ps = np.abs(ft)**2 / (nsamp * srate) del ft # Choose a random set of detectors to be examples. iex = np.random.permutation(len(ps))[:nex] examples[ind] = ps[iex] # Want mean and dev between detectors. These can # be built from ps and ps**2 stats[:, ind] = calc_bin_stats(ps, bin_freqs, nsamp, srate) n, a, a2 = calc_bin_stats(ps, bin_freqs_status, nsamp, srate) del ps amean = a / n adev = (a2 / n - amean**2)**0.5 print(id + " " + "".join([get_token(me, de) for me, de in zip(amean, adev)]) + " %5.2f" % d.apex.pwv) tot_stats = utils.allreduce(stats, comm) tot_examples = utils.allreduce(examples, comm) if comm.rank == 0: mask = np.all(tot_stats[0] != 0, 1) n, a, a2 = tot_stats[:, mask] # Individual statistics amean = a / n adev = (a2 / n - amean**2)**0.5 # Overall amean_tot = np.sum(a, 0) / np.sum(n, 0) adev_tot = np.std(amean, 0) with h5py.File(args.ofile, "w") as hfile: hfile["bins"] = bin_freqs hfile["ps"] = amean hfile["dps"] = adev hfile["hits"] = n
if rhs_tot is None: rhs_tot = rhs else: rhs_tot += rhs infos.append( bunch.Bunch(U=U, N=N, H=H, W=W, pattern=pattern, site=site, srate=srate, scale=scale, speed=speed)) rhs = utils.allreduce(rhs_tot, comm) #info = infos[0] #foo = rhs*info.H #enmap.write_map("test1.fits", foo) #bar = enmap.samewcs(info.U.apply(foo),foo) #enmap.write_map("test2.fits", bar) #foo = enmap.samewcs(info.U.trans(bar, foo),foo) #enmap.write_map("test3.fits", foo) #1/0 # # dof = zipper.ArrayZipper(rhs.copy()) A = Amat(dof, infos, comm) cg = enlib.cg.CG(A, dof.zip(rhs))
if col_major: array_dets = array_dets.reshape(nrow,ncol).T.reshape(-1) det_unit = nrow if col_major else ncol areas = mapmaking.PhaseMap.zeros(patterns, array_dets, res=res, det_unit=det_unit, dtype=dtype) signal = mapmaking.SignalPhase(active_scans, areas, mypids, comm, name=effname, ofmt=param["ofmt"], output=param["output"]=="yes") elif param["type"] == "noiserect": ashape, awcs = enmap.read_map_geometry(get_map_path(param["value"])) leftright = int(param["leftright"]) > 0 # Drift is in degrees per hour, but we want it per second drift = float(param["drift"])/3600 area = enmap.zeros((args.ncomp*(1+leftright),)+ashape[-2:], awcs, dtype) # Find the duration of each tod. We need this for the y offsets nactive = utils.allgather(np.array(len(active_scans)), comm) offs = utils.cumsum(nactive, endpoint=True) durs = np.zeros(np.sum(nactive)) for i, scan in enumerate(active_scans): durs[offs[comm.rank]+i] = scan.nsamp/scan.srate durs = utils.allreduce(durs, comm) ys = utils.cumsum(durs)*drift my_ys = ys[offs[comm.rank]:offs[comm.rank+1]] # That was surprisingly cumbersome signal = mapmaking.SignalNoiseRect(active_scans, area, drift, my_ys, comm, name=effname, mode=param["mode"], ofmt=param["ofmt"], output=param["output"]=="yes") elif param["type"] == "srcsamp": if param["srcs"] == "none": srcs = None else: srcs = pointsrcs.read(param["srcs"]) minamp = float(param["minamp"]) if "mask" in param: m = enmap.read_map(param["mask"]).astype(dtype) else: m = None signal = mapmaking.SignalSrcSamp(active_scans, dtype=dtype, comm=comm, srcs=srcs, amplim=minamp, mask=m) signal_srcsamp = signal else: raise ValueError("Unrecognized signal type '%s'" % param["type"])
scan.Nbd = scan.noise_bar.apply(tod.copy()) if args.method == "cg": scan.noise.apply(tod) tmp = np.zeros(scan.pcut.njunk, dtype) scan.pcut.backward(tod, tmp) scan.pmap.backward(tod, cg_rhs) cg_rjunk.append(tmp) except errors.DataMissing as e: print "Skipping %s (%s)" % (id, str(e)) continue print "Read %s" % id scans.append(scan) if args.precompute: for lam in prec_NNjunk: prec_NNmap[lam] = utils.allreduce(prec_NNmap[lam], comm) prec_NNjunk[lam] = np.concatenate(prec_NNjunk[lam]) if args.method == "cg": cg_rhs = utils.allreduce(cg_rhs, comm) cg_rjunk = np.concatenate(cg_rjunk) if comm.rank == 0: enmap.write_map(args.odir + "/map_rhs.fits", cg_rhs) with h5py.File(args.odir + "/cut_rhs_%02d.hdf" % comm.rank, "w") as hfile: hfile["data"] = cg_rjunk # Build div, which we need in both cases div = enmap.zeros((ncomp, ) + area.shape, area.wcs, dtype) for i in range(ncomp): work = div[0] * 0 work[i] = 1
print fbin f1,f2 = [min(nfreq-1,int(i*fmax/dfreq/nbin)) for i in [fbin,fbin+1]] fsub = ft[:,f1:f2] cov = array_ops.measure_cov(fsub) std = np.diag(cov)**0.5 corr = cov / std[:,None] / std[None,:] myrhs = project_mat(pix, template, corr) mydiv = project_mat(pix, template) return fbin, myrhs, mydiv def collect(args): fbin, myrhs, mydiv = args rhs[fbin] += myrhs div[fbin] += mydiv p = multiprocessing.Pool(args.nmulti) for fbin in range(nbin): p.apply_async(handle_bin, [fbin], callback=collect) p.close() p.join() del ft # Collect the results if comm.rank == 0: print "Reducing" rhs = enmap.samewcs(utils.allreduce(rhs, comm), rhs) div = enmap.samewcs(utils.allreduce(div, comm), div) with utils.nowarn(): map = rhs/div if comm.rank == 0: print "Writing" enmap.write_map(args.ofile, map)
if col_major: array_dets = array_dets.reshape(nrow,ncol).T.reshape(-1) det_unit = nrow if col_major else ncol areas = mapmaking.PhaseMap.zeros(patterns, array_dets, res=res, det_unit=det_unit, dtype=dtype) signal = mapmaking.SignalPhase(active_scans, areas, mypids, comm, name=effname, ofmt=param["ofmt"], output=param["output"]=="yes") elif param["type"] == "noiserect": ashape, awcs = enmap.read_map_geometry(param["value"]) leftright = int(param["leftright"]) > 0 # Drift is in degrees per hour, but we want it per second drift = float(param["drift"])/3600 area = enmap.zeros((args.ncomp*(1+leftright),)+ashape[-2:], awcs, dtype) # Find the duration of each tod. We need this for the y offsets nactive = utils.allgather(np.array(len(active_scans)), comm) offs = utils.cumsum(nactive, endpoint=True) durs = np.zeros(np.sum(nactive)) for i, scan in enumerate(active_scans): durs[offs[comm.rank]+i] = scan.nsamp/scan.srate durs = utils.allreduce(durs, comm) ys = utils.cumsum(durs)*drift my_ys = ys[offs[comm.rank]:offs[comm.rank+1]] # That was surprisingly cumbersome signal = mapmaking.SignalNoiseRect(active_scans, area, drift, my_ys, comm, name=effname, mode=param["mode"], ofmt=param["ofmt"], output=param["output"]=="yes") elif param["type"] == "srcsamp": if param["srcs"] == "none": srcs = None else: srcs = pointsrcs.read(param["srcs"]) minamp = float(param["minamp"]) signal = mapmaking.SignalSrcSamp(active_scans, dtype=dtype, comm=comm, srcs=srcs, amplim=minamp) signal_srcsamp = signal else: raise ValueError("Unrecognized signal type '%s'" % param["type"]) # Hack. Special source handling for some signals if white_src_handler and param["type"] in ["map","dmap","fmap","fdmap"]:
pmap.backward(tod, osig) # Also do the fiducial noise model tod[:] = 1 nmat.apply_window(tod, winsize) d.noise.white(tod) nmat.apply_window(tod, winsize) pcut.backward(tod, junk) pmap.backward(tod, odiv) # Collect some statistics sig_all[ind] = np.sum(vars) * d.nsamp sig_med[ind] = np.median(vars) * d.ndet * d.nsamp div_all[ind] = np.sum(tod) div_med[ind] = np.median(np.sum(tod, 1)) * d.ndet # Collect result osig[:] = utils.allreduce(osig, comm) odiv[:] = utils.allreduce(odiv, comm) sig_all = utils.allreduce(sig_all, comm) sig_med = utils.allreduce(sig_med, comm) div_all = utils.allreduce(div_all, comm) div_med = utils.allreduce(div_med, comm) if comm.rank == 0: enmap.write_map(root + "sig.fits", osig[0]) enmap.write_map(root + "div.fits", odiv[0]) with open(root + "stats.txt", "w") as f: for ind, id in enumerate(ids): f.write( "%s %15.7e %15.7e %15.7e %15.7e\n" % (id, sig_all[ind], sig_med[ind], div_all[ind], div_med[ind]))
(ci + 1, len(chunks), len(chunk), ids[chunk[0]])) myinds = np.arange(len(chunk))[comm.rank::comm.size] myinds, myscans = scanutils.read_scans( chunk_ids, myinds, actscan.ACTScan, filedb.data, downsample=config.get("downsample")) myinds = np.array(myinds, int) # Find the cost and bbox of each successful tod costs = np.zeros(len(chunk), int) boxes = np.zeros([len(chunk), 2, 2], np.float) for ind, scan in zip(myinds, myscans): costs[ind] = scan.ndet * scan.nsamp boxes[ind] = scanutils.calc_sky_bbox_scan(scan, sys) costs = utils.allreduce(costs, comm) boxes = utils.allreduce(boxes, comm) # Disqualify empty scans bad = costs == 0 L.info("Rejected %d bad tods" % (np.sum(bad))) inds = np.where(~bad)[0] costs, boxes = costs[~bad], boxes[~bad] ntod = len(inds) if ntod == 0: L.info("Chunk %d has no tods. Skipping" % (ci + 1)) continue # Redistribute if not use_dmap: myinds = scanutils.distribute_scans2(inds, costs, comm)
bad = itheta >= ntheta itheta[bad] = 2 * ntheta - itheta[bad] - 1 - atpole iphi[bad] = iphi[bad] + nphi / 2 bad = itheta < 0 itheta[bad] = -itheta[bad] - 1 + atpole iphi[bad] = iphi[bad] + nphi / 2 iphi %= nphi #theta = theta + np.arange(nring)*360./nring #for i in range(nring): # print "%4d %7.3f %4d %4d" % (i, theta[i], itheta[i], ntheta) #1/0 omap[..., itheta, iphi] += m[..., 0] hits[itheta, iphi] += 1 # Copy over poles if atpole: for t in [0, ntheta - 1]: i = np.where(itheta == t)[0] omap[..., itheta[i], (iphi[i] + nphi / 2) % nphi] += m[..., i, 0] hits[itheta[i], (iphi[i] + nphi / 2) % nphi] += 1 omap = utils.allreduce(omap, comm) hits = utils.allreduce(hits, comm) omap /= np.maximum(1, hits) omap.wcs = owcs if not args.individual: omap = np.mean(omap, 0) if comm.rank == 0: enmap.write_map(args.ofile, omap)
ft_b /= norm nsamp = bin[1]-bin[0]-delay if ft_b.size == 0: continue cov_b = array_ops.measure_cov(ft_b, delay) var_b = np.diag(cov_b) corr_b = cov_b / var_b[:,None]**0.5 / var_b[None,:]**0.5 mask = np.isfinite(corr_b) corr_b[~mask] = 0 for di, det in enumerate(d.dets): corr[bi,det,d.dets] += corr_b[di]*nsamp hits[bi,det,d.dets] += mask[di]*nsamp var[bi,d.dets] += var_b*nsamp*norm del ft if comm.rank == 0: print "Reducing" corr = utils.allreduce(corr, comm) print "B", np.sum(corr**2) hits = utils.allreduce(hits, comm) var = utils.allreduce(var, comm) if comm.rank == 0: # Reduce to hit subset mask = np.diag(np.sum(hits,0))>0 print np.sum(mask) corr = corr[:,mask][:,:,mask] print "C", np.sum(corr**2) hits = hits[:,mask][:,:,mask] var = var[:,mask] pos = pos[mask] dets = np.where(mask) # Normalize
ft_b /= norm nsamp = bin[1] - bin[0] - delay if ft_b.size == 0: continue cov_b = array_ops.measure_cov(ft_b, delay) var_b = np.diag(cov_b) corr_b = cov_b / var_b[:, None]**0.5 / var_b[None, :]**0.5 mask = np.isfinite(corr_b) corr_b[~mask] = 0 for di, det in enumerate(d.dets): corr[bi, det, d.dets] += corr_b[di] * nsamp hits[bi, det, d.dets] += mask[di] * nsamp var[bi, d.dets] += var_b * nsamp * norm del ft if comm.rank == 0: print "Reducing" corr = utils.allreduce(corr, comm) print "B", np.sum(corr**2) hits = utils.allreduce(hits, comm) var = utils.allreduce(var, comm) if comm.rank == 0: # Reduce to hit subset mask = np.diag(np.sum(hits, 0)) > 0 print np.sum(mask) corr = corr[:, mask][:, :, mask] print "C", np.sum(corr**2) hits = hits[:, mask][:, :, mask] var = var[:, mask] pos = pos[mask] dets = np.where(mask) # Normalize
scan.Nbd = scan.noise_bar.apply(tod.copy()) if args.method == "cg": scan.noise.apply(tod) tmp = np.zeros(scan.pcut.njunk,dtype) scan.pcut.backward(tod, tmp) scan.pmap.backward(tod, cg_rhs) cg_rjunk.append(tmp) except errors.DataMissing as e: print "Skipping %s (%s)" % (id, e.message) continue print "Read %s" % id scans.append(scan) if args.precompute: for lam in prec_NNjunk: prec_NNmap[lam] = utils.allreduce(prec_NNmap[lam], comm) prec_NNjunk[lam] = np.concatenate(prec_NNjunk[lam]) if args.method == "cg": cg_rhs = utils.allreduce(cg_rhs, comm) cg_rjunk = np.concatenate(cg_rjunk) if comm.rank == 0: enmap.write_map(args.odir + "/map_rhs.fits", cg_rhs) with h5py.File(args.odir + "/cut_rhs_%02d.hdf" % comm.rank, "w") as hfile: hfile["data"] = cg_rjunk # Build div, which we need in both cases div = enmap.zeros((ncomp,)+area.shape,area.wcs,dtype) for i in range(ncomp): work = div[0]*0 work[i] = 1
junk = np.zeros(pcut.njunk, dtype=dtype) pcut.backward(tod, junk) pmap.backward(tod, osig) # Also do the fiducial noise model tod[:] = 1 nmat.apply_window(tod, winsize) d.noise.white(tod) nmat.apply_window(tod, winsize) pcut.backward(tod, junk) pmap.backward(tod, odiv) # Collect some statistics sig_all[ind] = np.sum(vars)*d.nsamp sig_med[ind] = np.median(vars)*d.ndet*d.nsamp div_all[ind] = np.sum(tod) div_med[ind] = np.median(np.sum(tod,1))*d.ndet # Collect result osig[:] = utils.allreduce(osig, comm) odiv[:] = utils.allreduce(odiv, comm) sig_all = utils.allreduce(sig_all, comm) sig_med = utils.allreduce(sig_med, comm) div_all = utils.allreduce(div_all, comm) div_med = utils.allreduce(div_med, comm) if comm.rank == 0: enmap.write_map(root + "sig.fits", osig[0]) enmap.write_map(root + "div.fits", odiv[0]) with open(root + "stats.txt", "w") as f: for ind, id in enumerate(ids): f.write("%s %15.7e %15.7e %15.7e %15.7e\n" % (id, sig_all[ind], sig_med[ind], div_all[ind], div_med[ind]))
if nseg < 1: print "Skipped %s: To short tod" % id continue tod = d.tod[:, :nseg * seg_size] stat = np.zeros([2, nstat, ndet, nseg], dtype=dtype) for si in range(nstat): sub = tod.reshape(ndet, nseg, -1, nmed, nrms[si]) rmss = np.median(np.std(sub, -1), -1) stat[0, si] = np.mean(rmss, -1) stat[1, si] = np.std(rmss, -1) lens[i] = nseg mystats.append(stat) myinds.append(i) del d, tod, sub # Collect everybody's lengths lens = utils.allreduce(lens, comm) offs = utils.cumsum(lens, endpoint=True) # Allocate output stat buffer. This is a bit inefficient, since # only really the root should need to do this. But the stat arrays # aren't that big. stats = np.zeros([2, nstat, ndet, offs[-1]], dtype=dtype) for li, gi in enumerate(myinds): stats[:, :, :, offs[gi]:offs[gi + 1]] = mystats[li] del mystats stats = utils.allreduce(stats, comm) # And output if comm.rank == 0: print "Writing %s" % ofile with h5py.File(ofile, "w") as hfile: hfile["stats"] = stats hfile["lens"] = lens
if nseg < 1: print "Skipped %s: To short tod" % id continue tod = d.tod[:,:nseg*seg_size] stat = np.zeros([2,nstat,ndet,nseg],dtype=dtype) for si in range(nstat): sub = tod.reshape(ndet,nseg,-1,nmed,nrms[si]) rmss = np.median(np.std(sub,-1),-1) stat[0,si] = np.mean(rmss,-1) stat[1,si] = np.std(rmss,-1) lens[i] = nseg mystats.append(stat) myinds.append(i) del d, tod, sub # Collect everybody's lengths lens = utils.allreduce(lens, comm) offs = utils.cumsum(lens, endpoint=True) # Allocate output stat buffer. This is a bit inefficient, since # only really the root should need to do this. But the stat arrays # aren't that big. stats = np.zeros([2,nstat,ndet,offs[-1]],dtype=dtype) for li, gi in enumerate(myinds): stats[:,:,:,offs[gi]:offs[gi+1]] = mystats[li] del mystats stats = utils.allreduce(stats, comm) # And output if comm.rank == 0: print "Writing %s" % ofile with h5py.File(ofile, "w") as hfile: hfile["stats"]= stats hfile["lens"] = lens
tspecs[0, i] = np.median(dhigh, 0) tspecs[1, i] = np.percentile(dhigh, 15.86553, 0) tspecs[2, i] = np.percentile(dhigh, 84.13447, 0) tspecs[3, i] = np.min(dhigh, 0) tspecs[4, i] = np.max(dhigh, 0) del ps # Normalize ft in bins, since we want correlations for di in range(d.ndet): ft[di] /= (dhigh[di]**0.5)[binds] # Average correlation in bin sps = np.abs(np.sum(ft, 0))**2 tcorrs[i] = (bin(sps, args.nbin) - d.ndet) / (d.ndet**2 - d.ndet) del sps, ft, d # Ok, we've gone through all the data in our chunk with bench.show("Reduce"): dspecs = utils.allreduce(dspecs, comm) dzooms = utils.allreduce(dzooms, comm) tspecs = utils.allreduce(tspecs, comm) tcorrs = utils.allreduce(tcorrs, comm) ofile = prefix + "specs%03d.hdf" % chunk if comm.rank == 0: # Get rid of empty tods good = np.where(np.any(dspecs > 0, (1, 2)))[0] if len(good) == 0: print "No usable tods in chunk!" continue dspecs = dspecs[good] dzooms = dzooms[good] tspecs = tspecs[:, good] tcorrs = tcorrs[good] chunk_ids = ids[good + ind1]
try: scan = actscan.ACTScan(entry) if scan.ndet == 0 or scan.nsamp == 0: raise errors.DataMissing("Tod contains no valid data") except errors.DataMissing as e: L.debug("Skipped %s (%s)" % (str(id), e.message)) continue scan = scan[:, ::config.get("downsample")] L.debug("Processing %s" % str(id)) pmap = pmat.PmatMap(scan, hits) pcut = pmat.PmatCut(scan) tod = np.full([scan.ndet, scan.nsamp], 1, dtype=dtype) junk = np.zeros(pcut.njunk, dtype=dtype) pcut.backward(tod, junk) pmap.backward(tod, hits) hits = hits[0] # Collect result L.info("Reducing") hits[:] = utils.allreduce(hits, comm) # Undo effect of downsampling hits *= config.get("downsample") # And write it L.info("Writing") if comm.rank == 0: enmap.write_map(root + "hits.fits", hits) L.info("Done")
bad = itheta >= ntheta itheta[bad] = 2*ntheta - itheta[bad] - 1 - atpole iphi [bad] = iphi[bad] + nphi/2 bad = itheta < 0 itheta[bad] = -itheta[bad] -1 + atpole iphi [bad] = iphi[bad] + nphi/2 iphi %= nphi #theta = theta + np.arange(nring)*360./nring #for i in range(nring): # print "%4d %7.3f %4d %4d" % (i, theta[i], itheta[i], ntheta) #1/0 omap[...,itheta,iphi] += m[...,0] hits[itheta,iphi] += 1 # Copy over poles if atpole: for t in [0,ntheta-1]: i = np.where(itheta == t)[0] omap[...,itheta[i],(iphi[i]+nphi/2)%nphi] += m[...,i,0] hits[itheta[i],(iphi[i]+nphi/2)%nphi] += 1 omap = utils.allreduce(omap, comm) hits = utils.allreduce(hits, comm) omap /= np.maximum(1,hits) omap.wcs = owcs if not args.individual: omap = np.mean(omap,0) if comm.rank == 0: enmap.write_map(args.ofile, omap)
id = ids[ind] entry = filedb.data[id] try: d = actdata.read(entry, ["boresight", "tconst", "cut", "cut_noiseest"]) d = actdata.calibrate(d, exclude=["autocut"]) if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("no data") except errors.DataMissing as e: L.debug("Skipped %s (%s)" % (ids[ind], str(e))) continue # Reorder from az,el to el,az boxes[ind] = [ np.min(d.boresight[2:0:-1], 1), np.max(d.boresight[2:0:-1], 1) ] L.info("%5d: %s" % (ind, id)) boxes = utils.allreduce(boxes, comm_world) # Prune null boxes usable = np.all(boxes != 0, (1, 2)) moo = ids[usable] cow = boxes[usable] ids, boxes = ids[usable], boxes[usable] pattern_ids = utils.label_unique(boxes, axes=(1, 2), atol=tol) npattern = np.max(pattern_ids) + 1 pboxes = np.array([ utils.bounding_box(boxes[pattern_ids == pid]) for pid in xrange(npattern) ]) pscans = [np.where(pattern_ids == pid)[0] for pid in xrange(npattern)]
L.info("Detecting scanning patterns") boxes = np.zeros([len(ids),2,2]) for ind in range(comm_world.rank, len(ids), comm_world.size): id = ids[ind] entry = filedb.data[id] try: d = actdata.read(entry, ["boresight","tconst","cut","cut_noiseest"]) d = actdata.calibrate(d, exclude=["autocut"]) if d.ndet == 0 or d.nsamp == 0: raise errors.DataMissing("no data") except errors.DataMissing as e: L.debug("Skipped %s (%s)" % (ids[ind], e.message)) continue # Reorder from az,el to el,az boxes[ind] = [np.min(d.boresight[2:0:-1],1),np.max(d.boresight[2:0:-1],1)] L.info("%5d: %s" % (ind, id)) boxes = utils.allreduce(boxes, comm_world) # Prune null boxes usable = np.all(boxes!=0,(1,2)) moo = ids[usable] cow = boxes[usable] ids, boxes = ids[usable], boxes[usable] pattern_ids = utils.label_unique(boxes, axes=(1,2), atol=tol) npattern = np.max(pattern_ids)+1 pboxes = np.array([utils.bounding_box(boxes[pattern_ids==pid]) for pid in xrange(npattern)]) pscans = [np.where(pattern_ids==pid)[0] for pid in xrange(npattern)] L.info("Found %d scanning patterns" % npattern)
ft = fft.rfft(d.tod) del d.tod ps = np.abs(ft)**2/(nsamp*srate) del ft # Choose a random set of detectors to be examples. iex = np.random.permutation(len(ps))[:nex] examples[ind] = ps[iex] # Want mean and dev between detectors. These can # be built from ps and ps**2 stats[:,ind] = calc_bin_stats(ps, bin_freqs, nsamp, srate) n, a, a2 = calc_bin_stats(ps, bin_freqs_status, nsamp, srate) del ps amean = a/n adev = (a2/n - amean**2)**0.5 print(id + " " + "".join([get_token(me,de) for me,de in zip(amean,adev)]) + " %5.2f" % d.apex.pwv) tot_stats = utils.allreduce(stats, comm) tot_examples = utils.allreduce(examples, comm) if comm.rank == 0: mask = np.all(tot_stats[0] != 0, 1) n, a, a2 = tot_stats[:,mask] # Individual statistics amean = a/n adev = (a2/n - amean**2)**0.5 # Overall amean_tot = np.sum(a,0)/np.sum(n,0) adev_tot = np.std(amean,0) with h5py.File(args.ofile, "w") as hfile: hfile["bins"]= bin_freqs hfile["ps"] = amean hfile["dps"] = adev hfile["hits"]= n
tspecs[0,i] = np.median(dhigh,0) tspecs[1,i] = np.percentile(dhigh,15.86553,0) tspecs[2,i] = np.percentile(dhigh,84.13447,0) tspecs[3,i] = np.min(dhigh,0) tspecs[4,i] = np.max(dhigh,0) del ps # Normalize ft in bins, since we want correlations for di in range(d.ndet): ft[di] /= (dhigh[di]**0.5)[binds] # Average correlation in bin sps = np.abs(np.sum(ft,0))**2 tcorrs[i] = (bin(sps, args.nbin)-d.ndet)/(d.ndet**2-d.ndet) del sps, ft, d # Ok, we've gone through all the data in our chunk with bench.show("Reduce"): dspecs = utils.allreduce(dspecs, comm) dzooms = utils.allreduce(dzooms, comm) tspecs = utils.allreduce(tspecs, comm) tcorrs = utils.allreduce(tcorrs, comm) srates = utils.allreduce(srates, comm) mce_fsamps = utils.allreduce(mce_fsamps, comm) mce_params = utils.allreduce(mce_params, comm) ofile = prefix + "specs%03d.hdf" % chunk if comm.rank == 0: # Get rid of empty tods good = np.where(np.any(dspecs>0,(1,2)))[0] if len(good) == 0: print "No usable tods in chunk!" continue dspecs = dspecs[good] dzooms = dzooms[good]
# Apply weight to rhs if W is not None: iH = 1/np.maximum(H,np.max(H)*1e-2) urhs= U.apply(rhs*iH) ft = fft.fft(urhs+0j, axes=[-2,-1]) ft = W.apply(ft) urhs= fft.ifft(ft, urhs+0j, axes=[-2,-1], normalize=True).real rhs = U.trans(urhs, rhs)*H if rhs_tot is None: rhs_tot = rhs else: rhs_tot += rhs infos.append(bunch.Bunch(U=U,N=N,H=H,W=W,pattern=pattern,site=site,srate=srate,scale=scale,speed=speed)) rhs = utils.allreduce(rhs_tot, comm) #info = infos[0] #foo = rhs*info.H #enmap.write_map("test1.fits", foo) #bar = enmap.samewcs(info.U.apply(foo),foo) #enmap.write_map("test2.fits", bar) #foo = enmap.samewcs(info.U.trans(bar, foo),foo) #enmap.write_map("test3.fits", foo) #1/0 # # dof = zipper.ArrayZipper(rhs.copy()) A = Amat(dof, infos, comm) cg = enlib.cg.CG(A, dof.zip(rhs))
def fastweight(shape, wcs, db, weight="det", array_rad=0.7 * utils.degree, comm=None, dtype=np.float64, daz=0.5 * utils.degree, nt=4, chunk_size=100, site=None, verbose=False, normalize=True): # Get the boresight bounds for each TOD ntod = len(db) mids = np.array([db.data["t"], db.data["az"], db.data["el"]]) widths = np.array([db.data["dur"], db.data["waz"], db.data["wel"]]) box = np.array([mids - widths / 2, mids + widths / 2]) box[:, 1:] *= utils.degree ndets = db.data["ndet"] # Set up our output map omap = enmap.zeros(shape, wcs, dtype) # Sky horizontal period in pixels nphi = np.abs(utils.nint(360 / wcs.wcs.cdelt[0])) # Loop through chunks nchunk = (ntod + chunk_size - 1) / chunk_size if comm: rank, size = comm.rank, comm.size else: rank, size = 0, 1 for chunk in range(rank, nchunk, size): i1 = chunk * chunk_size i2 = min((chunk + 1) * chunk_size, ntod) # Split the hits into horizontal pixel ranges pix_ranges, weights = [], [] with bench.mark("get"): for i in range(i1, i2): ndet_eff = ndets[i] if weight == "det" else 1000.0 pr, w = get_pix_ranges(shape, wcs, box[:, :, i], daz, nt, ndet=ndet_eff, site=site) if pr is None: continue pix_ranges.append(pr) weights.append(w) if len(pix_ranges) == 0: continue pix_ranges = np.concatenate(pix_ranges, 0) weights = np.concatenate(weights, 0) with bench.mark("add"): add_weight(omap, pix_ranges, weights, nphi) if verbose: print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank, bench.stats.get("get"), bench.stats.get("add")) if comm: omap = utils.allreduce(omap, comm) # Change unit from seconds per pixel to seconds per square acmin if normalize: pixarea = omap.pixsizemap() / utils.arcmin**2 omap /= pixarea omap[~np.isfinite(omap)] = 0 if array_rad: omap = smooth_tophat(omap, array_rad) omap[omap < 1e-6] = 0 return omap
myspec = np.mean(nmat.iD, 1) inspec += myspec * np.sum(myhits) # weight in total avg bins = nmat.bins srate = d.srate site = dict(d.site) speed = np.median( np.abs(d.boresight[1:, 1] - d.boresight[:-1, 1])[::10]) / utils.degree * d.srate offsets[d.dets] += d.offsets[:, 2:0:-1] det_hit[d.dets] += 1 nscan += 1 del myhits, d # Ok, we're done with all tods for this pattern. Collect our # result. rhs = utils.allreduce(rhs, comm) hits = utils.allreduce(hits, comm) inspec = utils.allreduce(inspec, comm) / np.sum(hits) srate = comm.allreduce(srate, op=mpi.MAX) speed = comm.allreduce(speed, op=mpi.MAX) site = [w for w in comm.allgather(site) if len(w) > 0][0] nscan = comm.allreduce(nscan) det_hit = utils.allreduce(det_hit, comm) offsets = utils.allreduce(offsets, comm) offsets[det_hit > 0] /= det_hit[det_hit > 0][:, None] # Reduce to our actual set of detectors dets = np.where(det_hit > 0)[0] offsets = offsets[dets] det_hit = det_hit[dets] ndet = len(dets)
res = enmap.ifft(enmap.fft(map)*np.conj(enmap.fft(kernel))).real return res nphi = np.abs(utils.nint(360/wcs.wcs.cdelt[0])) for chunk in range(comm.rank, nchunk, comm.size): i1 = chunk*csize i2 = min((chunk+1)*csize, ntod) # Split the hits into horizontal pixel ranges pix_ranges, weights = [], [] with bench.mark("get"): for i in range(i1,i2): pr, w = get_pix_ranges(shape, wcs, box[:,:,i], daz, nt, azdown=args.azdown, ndet=ndets[i]) pix_ranges.append(pr) weights.append(w) pix_ranges = np.concatenate(pix_ranges, 0) weights = np.concatenate(weights, 0) with bench.mark("add"): add_weight(omap, pix_ranges, weights, nphi) print "%4d %4d %7.4f %7.4f" % (chunk, comm.rank, bench.stats.get("get"), bench.stats.get("add")) # Combine weights omap = utils.allreduce(omap, comm) # Change unit from seconds per pixel to seconds per square acmin if comm.rank == 0: pixarea = omap.pixsizemap() / utils.arcmin**2 omap /= pixarea omap = smooth_tophat(omap, rad) omap[omap<1e-3] = 0 enmap.write_map(args.omap, omap)
tspecs[0, i] = np.median(dhigh, 0) tspecs[1, i] = np.percentile(dhigh, 15.86553, 0) tspecs[2, i] = np.percentile(dhigh, 84.13447, 0) tspecs[3, i] = np.min(dhigh, 0) tspecs[4, i] = np.max(dhigh, 0) del ps # Normalize ft in bins, since we want correlations for di in range(d.ndet): ft[di] /= (dhigh[di]**0.5)[binds] # Average correlation in bin sps = np.abs(np.sum(ft, 0))**2 tcorrs[i] = (bin(sps, args.nbin) - d.ndet) / (d.ndet**2 - d.ndet) del sps, ft, d # Ok, we've gone through all the data in our chunk with bench.show("Reduce"): dspecs = utils.allreduce(dspecs, comm) dzooms = utils.allreduce(dzooms, comm) tspecs = utils.allreduce(tspecs, comm) tcorrs = utils.allreduce(tcorrs, comm) srates = utils.allreduce(srates, comm) nhits = utils.allreduce(nhits, comm) mce_fsamps = utils.allreduce(mce_fsamps, comm) mce_params = utils.allreduce(mce_params, comm) ofile = prefix + "specs%03d.hdf" % chunk if comm.rank == 0: # Get rid of empty tods good = np.where(np.any(dspecs > 0, (1, 2)))[0] if len(good) == 0: print "No usable tods in chunk!" continue dspecs = dspecs[good]
fsub = ft[:, f1:f2] cov = array_ops.measure_cov(fsub) std = np.diag(cov)**0.5 corr = cov / std[:, None] / std[None, :] myrhs = project_mat(pix, template, corr) mydiv = project_mat(pix, template) return fbin, myrhs, mydiv def collect(args): fbin, myrhs, mydiv = args rhs[fbin] += myrhs div[fbin] += mydiv p = multiprocessing.Pool(args.nmulti) for fbin in range(nbin): p.apply_async(handle_bin, [fbin], callback=collect) p.close() p.join() del ft # Collect the results if comm.rank == 0: print "Reducing" rhs = enmap.samewcs(utils.allreduce(rhs, comm), rhs) div = enmap.samewcs(utils.allreduce(div, comm), div) with utils.nowarn(): map = rhs / div if comm.rank == 0: print "Writing" enmap.write_map(args.ofile, map)
if args.full_stats: for i in range(2): stats[si, d.dets, i + 0] = rms_raw[:, i] stats[si, d.dets, i + 2] = rms_dec[:, i] ratio = rms_dec[:, 1] / rms_dec[:, 0] sens = rms_dec**-2 med_sens = np.median(sens, 0) cuts[si, d.dets] = ( (ratio > rate[0]) & (ratio < rate[1]) & (np.all(sens < med_sens[None, :] * args.max_sens, 1))) + 1 except Exception as e: print "Unexpected error " + id + " " + e.message + " skipping" print "Reducing" # Reduce everything cuts = utils.allreduce(cuts, comm) if args.full_stats: stats = utils.allreduce(stats, comm) print "Reduced" if comm.rank == 0: if args.full_stats: # Output full stats with h5py.File(args.odir + "/stats.hdf", "w") as ofile: ofile["stats"] = stats ofile["ids"] = ids # Output cuts as accept file with open(args.odir + "/accept.txt", "w") as ofile: for id, icut in zip(ids, cuts): ofile.write("%s %3d:" % (id, np.sum(icut == 2))) for det, dcut in enumerate(icut):
rms_dec = np.array([np.mean(ps[:,b[0]:b[1]],1) for b in inds]).T**0.5 if args.full_stats: for i in range(2): stats[si,d.dets,i+0] = rms_raw[:,i] stats[si,d.dets,i+2] = rms_dec[:,i] ratio = rms_dec[:,1]/rms_dec[:,0] sens = rms_dec**-2 med_sens = np.median(sens, 0) cuts[si,d.dets] = ((ratio>rate[0])&(ratio<rate[1])&(np.all(sens<med_sens[None,:]*args.max_sens,1)))+1 except Exception as e: print "Unexpected error " + id + " " + e.message + " skipping" print "Reducing" # Reduce everything cuts = utils.allreduce(cuts, comm) if args.full_stats: stats = utils.allreduce(stats, comm) print "Reduced" if comm.rank == 0: if args.full_stats: # Output full stats with h5py.File(args.odir + "/stats.hdf", "w") as ofile: ofile["stats"] = stats ofile["ids"] = ids # Output cuts as accept file with open(args.odir + "/accept.txt", "w") as ofile: for id, icut in zip(ids, cuts): ofile.write("%s %3d:" % (id, np.sum(icut==2))) for det, dcut in enumerate(icut):