def multi_channel_wiener_filter(x, d, n, g=None, beta=0): """Compute multichannel optimal wiener filter. From Elliot, Signal Processing for Optimal Control, Eq. 5.3.31 Parameters ---------- x : array_like, shape (N1[, K]) K reference signals. d : array_like, shape (N2[, L]) L disturbance signals. n : int Output filter length. g : None or array_like, shape (N3[, L[, M]]), optional Secondary path impulse response. beta: float Regularize through reference noise. Returns ------- numpy.ndarray, shape (n,[, M]) Optimal wiener filter in freqency domain. """ if g is None: g = [1] x = atleast_2d(x) d = atleast_2d(d) g = atleast_3d(g) Nin = x.shape[1] _, Nmic, Nout = g.shape G = np.fft.fft(g, n=n, axis=0) # TODO: align in time during correlation Sxx = np.zeros((n, Nin, Nin), dtype=complex) for i in range(Nin): for j in range(Nin): _, S = csd(x[:, i], x[:, j], nperseg=n, return_onesided=False) Sxx[:, i, j] = S Sxd = np.zeros((n, Nmic, Nin), dtype=complex) for i in range(Nmic): for j in range(Nin): _, S = csd(x[:, j], d[:, i], nperseg=n, return_onesided=False) Sxd[:, i, j] = S return -np.linalg.pinv(G) @ Sxd @ np.linalg.pinv(Sxx + beta * np.identity(Nin))
def filt_time_fast(self, x): """Filter reference signal in time domain. This is slightly different to `MultiChannelBlockLMS.filt` and `MultiChannelBlockLMS.filt`: the convolution of the last block is computed with the old filter. Might be faster for some filter dimensions. Parameters ---------- x : (blocklength, Nin) array_like Reference signal. Returns ------- y : (blocklength, Nout) numpy.ndarray Filter output. """ x = atleast_2d(x) assert x.shape[0] == self.blocklength assert x.shape[1] == self.Nin # NOTE: filtering could also be done in FD. When is each one better? # NOTE: give olafilt the FFT of w? y, self._zifilt = olafilt(self.w, x, "nmk,nk->nm", zi=self._zifilt) return y
def filt(self, x): """Filter signal. Parameters ---------- x : array_like, shape (N,) or (N, M) Signal with samples along first dimension Returns ------- numpy.ndarray The filtered signal of shape (N, ) or (N, M) """ x_orig_shape = x.shape x = atleast_2d(x) nout, nsig = x.shape if self.zi is None: # first filtering: fill with zeros self.zi = np.zeros((self.nsamples, nsig)) zx = np.concatenate((self.zi, x), axis=0) out = zx[:nout] self.zi = zx[nout:] return out.reshape(x_orig_shape)
def adapt(self, x, e): """Adaptation step. If `self.locked == True` perform no adaptation, but fill buffers and estimate power. Parameters ---------- x : (blocklength, Nsens, Nout, Nin) array_like Reference signal. e : (blocklength, Nsens) array_like Error signal. """ x = atleast_4d(x) e = atleast_2d(e) assert x.shape == (self.blocklength, self.Nsens, self.Nout, self.Nin) assert e.shape == (self.blocklength, self.Nsens) fifo_extend(self._xbuff, x) fifo_extend(self._ebuff, e) X = np.fft.rfft(self._xbuff, axis=0) E = np.fft.rfft( np.concatenate((np.zeros((self.length, self.Nsens)), self._ebuff)), axis=0, ) if self.normalized: if self.normalized == 'elementwise': power = np.abs(X)**2 elif self.normalized == 'sum_errors': power = np.sum(np.abs(X)**2, axis=1, keepdims=True) else: raise ValueError(f'Unknown normalization "{self.normalized}".') self._P = (self.power_averaging * self._P + (1 - self.power_averaging) * power) D = 1 / (self._P + self.epsilon_power) # normalization factor else: D = 1 update = np.einsum("nlmk,nl->nmk", D * X.conj(), E) if self.constrained: # make it causal ut = np.fft.irfft(update, axis=0) #FIXME: pass n to all irffts ut[self.length:] = 0 update = np.fft.rfft(ut, axis=0) self.W = self.leakage * self.W + self.stepsize * update # update filter
def adapt(self, x, e): """Adaptation step. If `self.locked == True` perform no adaptation, but fill buffers and estimate power. Parameters ---------- x : (blocklength, Nsens, Nout, Nin) array_like Reference signal. e : (blocklength, Nsens) array_like Error signal. """ x = atleast_4d(x) e = atleast_2d(e) assert x.shape == (self.blocklength, self.Nsens, self.Nout, self.Nin) assert e.shape == (self.blocklength, self.Nsens) self.xbuff.append(x) self.ebuff.append(e) X = np.fft.fft(np.concatenate(self.xbuff, axis=0), axis=0) E = np.fft.fft( np.concatenate((np.zeros( (self.length, self.Nsens)), np.concatenate(self.ebuff))), axis=0, ) if self.normalized: # TODO: implement D = 1 else: D = 1 if self.locked: return update = D * np.einsum("nlmk,nl->nmk", X.conj(), E) if self.constrained: # make it causal ut = np.real(np.fft.ifft(update, axis=0)) ut[self.length:] = 0 update = np.fft.fft(ut, axis=0) # update filter self.W = self.leakage * self.W - self.stepsize * update
def static_filter(p, g, n=None, squeeze=True): """Compute the optimal cancellation filter from primary and secondary paths. Note that this filter can be non-causal. Parameters ---------- p : array_like, shape (N[, L]) Primary path impulse response. g : array_like, shape (N[, L[, M]]) Secondary path impulse response. n : int Output filter length. squeeze: bool, optional Squeeze output dimensions. Returns ------- numpy.ndarray, shape (n,[, M]) Optimal filter in frequency domain. """ assert p.shape[0] == g.shape[0] if n is None: n = p.shape[0] p = atleast_2d(p) g = atleast_3d(g) P = np.fft.fft(p, n=n, axis=0) G = np.fft.fft(g, n=n, axis=0) M = G.shape[2] W = np.zeros((n, M), dtype=complex) for i in range(n): W[i] = -np.linalg.lstsq(G[i], P[i], rcond=None)[0] return W if not squeeze else W.squeeze()
def filt(self, x): """Filter reference signal in frequency domain. Parameters ---------- x : (blocklength, Nin) array_like Reference signal. Returns ------- y : (blocklength, Nout) numpy.ndarray Filter output. """ x = atleast_2d(x) assert x.shape[0] == self.blocklength assert x.shape[1] == self.Nin fifo_extend(self._xfiltbuff, x) X = np.fft.rfft(self._xfiltbuff, axis=0) y = np.fft.irfft(np.einsum("nmk,nk->nm", self.W, X), axis=0) return y[-self.blocklength:]
def filt(self, x): """Filter reference signal. Parameters ---------- x : (blocklength, Nin) array_like Reference signal. Returns ------- y : (blocklength, Nout) numpy.ndarray Filter output. """ x = atleast_2d(x) assert x.shape[0] == self.blocklength assert x.shape[1] == self.Nin # NOTE: filtering could also be done in FD. When is each one better? # NOTE: give olafilt the FFT of w? y, self.zifilt = olafilt(self.w, x, "nmk,nk->nm", zi=self.zifilt) return y
def filt_time(self, x): """Filter reference signal in time domain. Parameters ---------- x : (blocklength, Nin) array_like Reference signal. Returns ------- y : (blocklength, Nout) numpy.ndarray Filter output. """ x = atleast_2d(x) assert x.shape[0] == self.blocklength assert x.shape[1] == self.Nin fifo_extend(self._xfiltbuff, x) # NOTE: filtering could also be done in FD. When is each one better? # NOTE: give olafilt the FFT of w? y, _ = olafilt(self.w, self._xfiltbuff, "nmk,nk->nm", zi=self._zifilt) return y[-self.blocklength:]