def calc_model_constrained(tod, cut, srate=400, mask_scale=0.3, lim=3e-4, maxiter=50, verbose=False): # First do some simple gapfilling to avoid messing up the noise model tod = sampcut.gapfill_linear(cut, tod, inplace=False) ft = fft.rfft(tod) * tod.shape[1]**-0.5 iN = nmat_measure.detvecs_jon(ft, srate) del ft iV = iN.ivar * mask_scale def A(x): x = x.reshape(tod.shape) Ax = iN.apply(x.copy()) Ax += sampcut.gapfill_const(cut, x * iV[:, None], 0, inplace=True) return Ax.reshape(-1) b = sampcut.gapfill_const(cut, tod * iV[:, None], 0, inplace=True).reshape(-1) x0 = sampcut.gapfill_linear(cut, tod).reshape(-1) solver = cg.CG(A, b, x0) while solver.i < maxiter and solver.err > lim: solver.step() if verbose: print("%5d %15.7e" % (solver.i, solver.err)) res = solver.x.reshape(tod.shape) res = smooth(res, srate) return res
def calc_model_constrained(tod, cut, srate=400, mask_scale=0.3, lim=3e-4, maxiter=50, verbose=False): # First do some simple gapfilling to avoid messing up the noise model tod = sampcut.gapfill_linear(cut, tod, inplace=False) ft = fft.rfft(tod) * tod.shape[1]**-0.5 iN = nmat_measure.detvecs_jon(ft, srate) del ft iV = iN.ivar*mask_scale def A(x): x = x.reshape(tod.shape) Ax = iN.apply(x.copy()) Ax += sampcut.gapfill_const(cut, x*iV[:,None], 0, inplace=True) return Ax.reshape(-1) b = sampcut.gapfill_const(cut, tod*iV[:,None], 0, inplace=True).reshape(-1) x0 = sampcut.gapfill_linear(cut, tod).reshape(-1) solver = cg.CG(A, b, x0) while solver.i < maxiter and solver.err > lim: solver.step() if verbose: print "%5d %15.7e" % (solver.i, solver.err) return solver.x.reshape(tod.shape)
d.noise = None scan = data.ACTScan(entry, d=d) imap.map = imap.map.astype(d.tod.dtype, copy=False) pmap = pmat.PmatMap(scan, imap.map, sys=imap.sys) # Subtract input map from tod inplace pmap.forward(d.tod, imap.map, tmul=1, mmul=-1) utils.deslope(d.tod, w=8, inplace=True) ft = fft.rfft(d.tod) * d.tod.shape[1]**-0.5 t.append(time.time()) spikes = d.spikes[:2].T if args.spikecut else None if model == "old": noise = nmat_measure.detvecs_old(ft, d.srate, d.dets) elif model == "jon": noise = nmat_measure.detvecs_jon(ft, d.srate, d.dets, shared, cut_bins=spikes) elif model == "simple": noise = nmat_measure.detvecs_simple(ft, d.srate, d.dets) elif model == "joint": noise = nmat_measure.detvecs_joint(ft, d.srate, d.dets, cut_bins=spikes) t.append(time.time()) with h5py.File("%s/%s.hdf" % (args.odir, id), "w") as hfile: nmat.write_nmat(hfile, noise) t.append(time.time()) if args.covtest: # Measure full cov per bin
if os.path.isfile(ofile) and args.resume: continue t = [] t.append(time.time()) try: d = data.read(entry, ["gain", "tconst", "cut", "tod", "boresight"]) t.append(time.time()) d = data.calibrate(d) t.append(time.time()) except errors.DataMissing as e: print "%3d/%d %25s skip (%s)" % (i + 1, n, id, e.message) continue except zipfile.BadZipfile: print "%d/%d %25s bad zip" % (i + 1, n, id) continue ft = fft.rfft(d.tod) * d.tod.shape[1]**-0.5 t.append(time.time()) if model == "old": noise = nmat_measure.detvecs_old(ft, d.srate, d.dets) elif model == "jon": di = np.where(d.dets == 20)[0] noise = nmat_measure.detvecs_jon(ft, d.srate, d.dets, shared) elif model == "simple": noise = nmat_measure.detvecs_simple(ft, d.srate, d.dets) t.append(time.time()) nmat.write_nmat("%s/%s.hdf" % (args.odir, id), noise) t.append(time.time()) t = np.array(t) dt = t[1:] - t[:-1] print("%3d/%d %25s" + " %6.3f" * len(dt)) % tuple([i + 1, n, id] + list(dt))
if args.imap: # Make a full scan object, so we can perform pointing projection # operations d.noise = None scan = data.ACTScan(entry, d=d) imap.map = imap.map.astype(d.tod.dtype, copy=False) pmap = pmat.PmatMap(scan, imap.map, sys=imap.sys) # Subtract input map from tod inplace pmap.forward(d.tod, imap.map, tmul=1, mmul=-1) utils.deslope(d.tod, w=8, inplace=True) ft = fft.rfft(d.tod) * d.tod.shape[1]**-0.5 ; t.append(time.time()) spikes = d.spikes[:2].T if args.spikecut else None if model == "old": noise = nmat_measure.detvecs_old(ft, d.srate, d.dets) elif model == "jon": noise = nmat_measure.detvecs_jon(ft, d.srate, d.dets, shared, cut_bins=spikes) elif model == "simple": noise = nmat_measure.detvecs_simple(ft, d.srate, d.dets) elif model == "joint": noise = nmat_measure.detvecs_joint(ft, d.srate, d.dets, cut_bins=spikes) t.append(time.time()) with h5py.File("%s/%s.hdf" % (args.odir, id),"w") as hfile: nmat.write_nmat(hfile, noise) ; t.append(time.time()) if args.covtest: # Measure full cov per bin ndet = ft.shape[0] bins = np.minimum((noise.bins*ft.shape[1]/noise.bins[-1,1]).astype(int),ft.shape[1]-1) nbin = len(bins) cov_full = np.zeros([nbin,ndet,ndet]) for bi, b in enumerate(bins): print "A", bi, b, np.mean(np.abs(ft[0,b[0]:b[1]])**2)**0.5/20
def get_model(s, area): pos = area.posmap().reshape(2,-1)[::-1].T model = np.rollaxis(s.get_model(pos),-1).reshape(-1,area.shape[1],area.shape[2]) return enmap.ndmap(model, area.wcs)[:area.shape[0]] if args.measure is None: scans = get_scans(area, args.signal, args.bore, args.dets, args.noise, seed=args.seed, real=args.real) else: # Build noise model the same way we do for the real data, i.e. based on # measuring data itself. But do that based on a version with more noise # than the real one, to simulate realistic S/N ratios without needing # too many samples scans = get_scans(area, args.signal, args.bore, args.dets, args.noise, seed=args.seed, real=args.real, noise_override=args.measure) nmats = [] for scan in scans: ft = fft.rfft(scan.get_samples()) * scan.nsamp**-0.5 nmats.append(nmat_measure.detvecs_jon(ft, 400.0, shared=True)) scans = get_scans(area, args.signal, args.bore, args.dets, args.noise, seed=args.seed, real=args.real) for scan,nmat in zip(scans,nmats): scan.noise = nmat enmap.write_map(args.odir + "/area.fits", area) model = get_model(scans[0], area) enmap.write_map(args.odir + "/model.fits", model) with open(args.odir + "/tods.txt", "w") as ofile: for i, scan in enumerate(scans): L.info("scan %2d/%d" % (i+1,len(scans))) enlib.scan.write_scan(args.odir + "/scan%03d.hdf" % i, scan) ofile.write("%s/scan%03d.hdf\n" % (args.odir, i))