def feed_forward(self, x): """Pass the signal x through the learned filter weights""" if self.W1 is None or (self.scheme >= 2 and self.W2 is None): return None # Create a delayed version of the input signal by filtering through z^(-delta) num_coeffs = np.zeros((self.delta + 1, )) num_coeffs[self.delta] = 1 x_delayed = scipy.signal.lfilter(num_coeffs, 1, x) x_ale = pa.input_from_history(x_delayed, self.l1) # Create desired signal d_ale = x[self.l1 - 1:] # ale e = [] y = [] for i in range(self.W1.shape[0]): val = np.dot(self.W1[i], x_ale[i]) y.append(val) e.append(d_ale[i] - val) if self.scheme == 1: return np.array(y) # Feed the error signal into the second filter and set the desired filter as the delayed version of the # output of the previous AF x_anc = pa.input_from_history(e, self.l2) if self.scheme == 2: d_anc = y else: d_anc = d_ale # Delay d_anc by L/2 num_coeffs = np.zeros((self.l2 // 2 + 1, )) num_coeffs[self.l2 // 2] = 1 d_anc = scipy.signal.lfilter(num_coeffs, 1, d_anc) d_anc = d_anc[self.l2 - 1:] e = [] for i in range(self.W2.shape[0]): val = np.dot(self.W2[i], x_anc[i]) e.append(d_anc[i] - val) return np.array(e)
def ale_anc(self, x): """ Scheme 1 (only ale), Scheme 2 (ale+anc using only speech estimate), Scheme 3(ale+anc using speech+noise as primary) :param fs: Sample rate of the audio file :param x: input noisy speech signal :param l: length of the adaptive predictor :param delta_ms: the number of samples into the future the linear predictor is predicting. Set it to higher thant correlation range for the noise signal but less than the correlation range for the speech signal. :param scheme: {1, 2 or 3} See description above. :return: s_hat, n_hat """ # Create a delayed version of the input signal by filtering through z^(-delta) num_coeffs = np.zeros((self.delta + 1, )) num_coeffs[self.delta] = 1 x_delayed = scipy.signal.lfilter(num_coeffs, 1, x) # Prepare the input into the ALE x_ale = pa.input_from_history(x_delayed, self.l1) # Create desired signal d_ale = x[self.l1 - 1:] # ale f = pa.filters.FilterNLMS(n=self.l1, mu=self.mu1, w="random") s_hat, n_hat, self.W1 = f.run(d_ale, x_ale) # anc; delay primary input by L/2 samples to allow prediction filter to have two sided impulse response if self.scheme == 2 or self.scheme == 3: f2 = pa.filters.FilterNLMS(n=self.l2, mu=self.mu2, w="random") x_anc = pa.input_from_history(n_hat, self.l2) if self.scheme == 2: d_anc = s_hat else: d_anc = d_ale # Delay d_anc by L/2 num_coeffs = np.zeros((self.l2 // 2 + 1, )) num_coeffs[self.l2 // 2] = 1 d_anc = scipy.signal.lfilter(num_coeffs, 1, d_anc) d_anc = d_anc[self.l2 - 1:] n_hat, s_hat, self.W2 = f2.run(d_anc, x_anc) return s_hat, n_hat
def process_padasip_e(data, n, mu): out = [] for column in range(0, 4): u = data[:, column] x = pa.input_from_history(u, n)[:-1] d = u[n:] f = pa.filters.FilterNLMS(n=n, mu=mu, w="zeros") y, e, w = f.run(d, x) out.append(e.std()) # out.append(scipy.stats.kurtosis(e)) return out
def update(self): #read raw data from PyAudio object self.wf_data = self.stream.read(self.CHUNK + 8) #turn raw data into binary self.wf_data = struct.unpack( str(2 * (self.CHUNK + 8)) + 'B', self.wf_data) #turn binary into integer array self.wf_data = np.array(self.wf_data, dtype='b')[::2] + 128 #get data ready for filtering self.wf_data = pa.input_from_history(self.wf_data, 9) #create target array (zero noise plot) self.d = np.ones(2048) self.d[:] = [x * 128 for x in self.d] #create adaptive filter object with parameters self.filter = pa.filters.AdaptiveFilter(model="NLMS", n=9, mu=0.9, w="random") #run filter on collected data self.wf_data, e, w = self.filter.run(self.d, self.wf_data) #plot waveform data self.set_plotdata( name='waveform', data_x=self.x, data_y=self.wf_data, ) self.sp_data = fft(np.array(self.wf_data, dtype='int8') - 128) #getting first half of fft output self.sp_data = np.abs( self.sp_data[0:int(self.CHUNK / 2)]) * 2 / (128 * self.CHUNK) #removing first element of array, some error self.sp_data[0] = 0 #plot spectrum data self.set_plotdata(name='spectrum', data_x=self.f, data_y=self.sp_data) #get indices of the peaks of the spectrum data self.indexes = peakutils.indexes(self.sp_data, thres=0.2 / max(self.sp_data)) #convert array indices to frequency values (multiply by 21.533) 22050/1024 = 21.533. 1024 is length of self.indexes[:] = [index * 21.533 for index in self.indexes] if len(self.indexes) != 0: #print(type(self.indexes)) self.noteSelect(self.indexes[0])
def filter_signal(source, samplerate): order = 5 # filtering x = pa.input_from_history(source, order)[:-1] source = source[order:] filter = pa.filters.FilterRLS(mu=0.99, n=order) y, e, w = filter.run(source, x) pl.subplot(3, 1, 1) pl.plot(source) pl.subplot(3, 1, 2) pl.plot(y) pl.subplot(3, 1, 3) pl.plot(e) return y
def ANC_filter(U, V): # filtering # n = 20 # length of filter n = 40 Udelay = pa.input_from_history(U, n)[:-1] #Vdelay = V[n - 1:-1] Vdelay = V[n - 1:-1] f = pa.filters.FilterRLS(mu=0.99, n=n) y, e, w = f.run(Vdelay, Udelay) music = e.astype(np.int16) # mmax = np.max(music) # print(mmax) # music = music * 32768 // mmax # music = music.astype(np.int16) print("the length of filter result is ", len(music)) return music
def filter_signal(source, samplerate): ORDER = 5 # filtering x = pa.input_from_history(source, ORDER)[:-1] source = source[ORDER:] f = pa.filters.FilterRLS(mu=0.99, n=ORDER) y, e, w = f.run(source, x) pl.subplot(3,1,1) pl.plot(source) pl.subplot(3,1,2) pl.plot(y) pl.subplot(3,1,3) pl.plot(e) return y
def least_mean_squares(self, y, sr): # noise = self.noise[:, 0].astype(float)/np.iinfo(np.int16).max # y = y.astype(float)/np.iinfo(np.int16).max # print "NOISE", noise # print "INPUT", y # w, _, _ = af.lms(noise, y, 20, 0.03) # w *= np.iinfo(np.int16).max # print "CLEAN", w # return w n = 20 x = pa.input_from_history(y, n)[:-1] print x print y.shape # y = y[n:] f = pa.filters.FilterRLS(mu=0.9, n=n) y, e, w = f.run(y, x) return y
#%matplotlib inline plt.style.use('ggplot') # nicer plots np.random.seed( 52102) # always use the same random seed to make results comparable #%config InlineBackend.print_figure_kwargs = {} # signals creation: u, v, d N = 5000 n = 10 u = np.sin(np.arange(0, N / 10., N / 50000.)) v = np.random.normal(0, 1, N) d = u + v # filtering x = pa.input_from_history(d, n)[:-1] d = d[n:] u = u[n:] f = pa.filters.FilterRLS(mu=0.9, n=n) y, e, w = f.run(d, x) # error estimation MSE_d = np.dot(u - d, u - d) / float(len(u)) MSE_y = np.dot(u - y, u - y) / float(len(u)) # results plt.figure(figsize=(12.5, 6)) plt.plot(u, "r:", linewidth=4, label="original") plt.plot(d, "b", label="noisy, MSE: {}".format(MSE_d)) plt.plot(y, "g", label="filtered, MSE: {}".format(MSE_y)) plt.xlim(N - 100, N)
plt.xlim(0, N) plt.legend() plt.subplot(224) plt.title("MMSE Filter error") plt.xlabel("Number of iteration [-]") plt.plot(pa.misc.logSE(signal, zy), "r", label="Squared error [dB]") plt.legend() plt.xlim(0, N) plt.tight_layout() plt.show() # identification window = 500 x = pa.input_from_history(x, n=window) f = pa.filters.FilterNLMS(mu=0.9, n=window) y = signal[:-(window - 1)] y1, e, w = f.run(y, x) # show results plt.figure(figsize=(12.5, 9)) plt.subplot(221) plt.title("Target Signal") plt.xlabel("Number of iteration [-]") plt.plot(y, "b", label="d - target") plt.xlim(0, N) plt.legend() plt.subplot(222) plt.title("LMS Adaptation")
x = 5 * np.sin(2 * np.pi * 300 * t) for i in range(len(x)): if x[i] > 0: x[i] = 5 else: x[i] = -5 noise = np.random.normal(0, 1, inputSize) * 0.1 x = x + noise x = x / max(x) #generate Output d = signal.lfilter(hCoeff, [1.0], x) #inputMatrix inputMatrix = pa.input_from_history(x, n2) #inputMatrix=np.zeros((inputSize,n1+20)) #for i in range(n1): # inputMatrix[:,i]=x # identification f = pa.filters.FilterNLMS(mu=0.8, n=n2) y, e, w = f.run(d, inputMatrix) omega, mag2 = signal.freqz(w[-1], omega) #Plot fig, ax = plt.subplots(4) mp.myPlotter(ax[0], freq, abs(mag)) mp.myPlotter(ax[0], freq, abs(mag2), param_dict={'color': 'red'})
import numpy as np import matplotlib.pylab as plt import padasip as pa # creation of u, x and d N = 100 u = np.random.random(N) d = np.zeros(N) for k in range(3, N): d[k] = 2 * u[k] + 0.1 * u[k - 1] - 4 * u[k - 2] + 0.5 * u[k - 3] d = d[3:] # identification x = pa.input_from_history(u, 4) y, e, w = pa.rls_filter(d, x, mu=0.1) # show results plt.figure(figsize=(13, 9)) plt.subplot(211) plt.title("Adaptation") plt.xlabel("samples - k") plt.plot(d, "b", label="d - target") plt.plot(y, "g", label="y - output") plt.legend() plt.subplot(212) plt.title("Filter error") plt.xlabel("samples - k") plt.plot(abs(e), "r", label="abs(e) - prediction error") plt.legend() plt.tight_layout() plt.show()
import numpy as np import matplotlib.pylab as plt import padasip as pa # creation of u, x and d N = 100 u = np.random.random(N) d = np.zeros(N) for k in range(3, N): d[k] = 2*u[k] + 0.1*u[k-1] - 4*u[k-2] + 0.5*u[k-3] d = d[3:] # identification x = pa.input_from_history(u, 4) y, e, w = pa.rls_filter(d, x, mu=0.1) # show results plt.figure(figsize=(13,9)) plt.subplot(211);plt.title("Adaptation");plt.xlabel("samples - k") plt.plot(d,"b", label="d - target") plt.plot(y,"g", label="y - output");plt.legend() plt.subplot(212);plt.title("Filter error");plt.xlabel("samples - k") plt.plot(abs(e),"r", label="abs(e) - prediction error");plt.legend() plt.tight_layout() plt.show()