def apply(self, tod, inplace=False): if inplace: tod = np.array(tod) ftod = fft.rfft(tod) # Candidate for speedup in C norm = tod.shape[1] for bi, b in enumerate(self.bins): ftod[:, b[0]:b[1]] *= self.ips_binned[:, None, bi] / norm # I divided by the normalization above instead of passing normalize=True # here to reduce the number of operations needed fft.irfft(ftod, tod) return tod
def apply(self, tod, inplace=False, slow=False): if inplace: tod = np.array(tod) apply_window(tod, self.nwin) ftod = fft.rfft(tod) norm = tod.shape[1] if slow: for bi, b in enumerate(self.bins): # Want to multiply by iD + siViV' ft = ftod[:, b[0]:b[1]] iD = self.iD[bi] / norm iV = self.iV[bi] / norm**0.5 ft[:] = iD[:, None] * ft + self.s * iV.dot(iV.T.dot(ft)) else: so3g.nmat_detvecs_apply(ftod.view(tod.dtype), self.bins, self.iD, self.iV, self.s, norm) # I divided by the normalization above instead of passing normalize=True # here to reduce the number of operations needed fft.irfft(ftod, tod) apply_window(tod, self.nwin) return tod
def build(self, tod, **kwargs): ps = np.abs(fft.rfft(tod))**2 if self.spacing == "exp": bins = utils.expbin(ps.shape[-1], nbin=self.nbin, nmin=self.nmin) elif self.spacing == "lin": bins = utils.expbin(ps.shape[-1], nbin=self.nbin, nmin=self.nmin) else: raise ValueError("Unrecognized spacing '%s'" % str(self.spacing)) ps_binned = utils.bin_data(bins, ps) ips_binned = 1 / ps_binned # Compute the representative inverse variance per sample ivar = np.zeros(len(tod)) for bi, b in enumerate(bins): ivar += ips_binned[:, bi] * (b[1] - b[0]) ivar /= bins[-1, 1] - bins[0, 0] ivar *= tod.shape[1] return NmatUncorr(spacing=self.spacing, nbin=len(bins), nmin=self.nmin, bins=bins, ips_binned=ips_binned, ivar=ivar)
def __call__(self, scan, tod): ft = fft.rfft(tod) freq = fft.rfftfreq(scan.nsamp, 1 / scan.srate) ft *= (1 + np.maximum(freq / self.fknee, self.tol)**self.alpha)**-1 fft.irfft(ft, tod, normalize=True)
def highpass(tod, fknee=1e-2, alpha=3): ft = fft.rfft(tod) freq = fft.rfftfreq(tod.shape[1]) ft /= 1 + (freq / fknee)**-alpha return fft.irfft(ft, tod, normalize=True)
def build(self, tod, srate, **kwargs): # Apply window before measuring noise model nwin = utils.nint(self.window / srate) apply_window(tod, nwin) ft = fft.rfft(tod) # Unapply window again apply_window(tod, nwin, -1) ndet, nfreq = ft.shape nsamp = tod.shape[1] # First build our set of eigenvectors in two bins. The first goes from # 0.25 to 4 Hz the second from 4Hz and up mode_bins = makebins(self.mode_bins, srate, nfreq, 1000, rfun=np.round)[1:] # Then use these to get our set of basis vectors vecs = find_modes_jon(ft, mode_bins, eig_lim=self.eig_lim, single_lim=self.single_lim, verbose=self.verbose) nmode = vecs.shape[1] if vecs.size == 0: raise errors.ModelError("Could not find any noise modes") # Cut bins that extend beyond our max frequency bin_edges = self.bin_edges[self.bin_edges < srate / 2 * 0.99] bins = makebins(bin_edges, srate, nfreq, nmin=2 * nmode, rfun=np.round) nbin = len(bins) # Now measure the power of each basis vector in each bin. The residual # noise will be modeled as uncorrelated E = np.zeros([nbin, nmode]) D = np.zeros([nbin, ndet]) Nd = np.zeros([nbin, ndet]) for bi, b in enumerate(bins): # Skip the DC mode, since it's it's unmeasurable and filtered away b = np.maximum(1, b) E[bi], D[bi], Nd[bi] = measure_detvecs(ft[:, b[0]:b[1]], vecs) # Optionally downweight the lowest frequency bins if self.downweight != None and len(self.downweight) > 0: D[:len(self.downweight)] /= np.array(self.downweight)[:, None] # Instead of VEV' we can have just VV' if we bake sqrt(E) into V V = vecs[None] * E[:, None]**0.5 # At this point we have a model for the total noise covariance as # N = D + VV'. But since we're doing inverse covariance weighting # we need a similar representation for the inverse iN. The function # woodbury_invert computes iD, iV, s such that iN = iD + s iV iV' # where s usually is -1, but will become +1 if one inverts again iD, iV, s = woodbury_invert(D, V) # Also compute a representative white noise level bsize = bins[:, 1] - bins[:, 0] ivar = np.sum(iD * bsize[:, None], 0) / np.sum(bsize) # What about units? I haven't applied any fourier unit factors so far, # so we're in plain power units. From the uncorrelated model I found # that factor of tod.shape[1] is needed iD *= nsamp iV *= nsamp**0.5 ivar *= nsamp # Fix dtype bins = np.ascontiguousarray(bins.astype(np.int32)) D = np.ascontiguousarray(iD.astype(tod.dtype)) V = np.ascontiguousarray(iV.astype(tod.dtype)) iD = np.ascontiguousarray(D.astype(tod.dtype)) iV = np.ascontiguousarray(V.astype(tod.dtype)) return NmatDetvecs(bin_edges=self.bin_edges, eig_lim=self.eig_lim, single_lim=self.single_lim, window=self.window, nwin=nwin, downweight=self.downweight, verbose=self.verbose, bins=bins, D=D, V=V, iD=iD, iV=iV, s=s, ivar=ivar)