Пример #1
0
def filtering2(data, sf, kind=1):
    data = data - np.mean(data)
    b = [0.2,0.2,0.2,0.2,0.2]
    a=[1]
    if kind == 1:
        data = lfilter(b,a,data)
    elif kind == 2:
        data = filtfilt(b,a,data)
    T = 1.0/sf
    Fc = 1
    c1 = 1.0/(1+np.tan(Fc*3.14*T))
    c2 = (1-np.tan(Fc*3.14*T))/(1+np.tan(Fc*3.14*T))
    b= [c1, -c1]
    a = [1,-c2]
    if kind == 1:
        data = lfilter(b,a,data)
    elif kind == 2:
        data = filtfilt(b,a,data)
    fh = sf/2.0
    mb = 2
    b,a = butter(mb,30/fh)
    if kind == 1:
        data = lfilter(b,a,data)
    elif kind == 2:
        data = filtfilt(b,a,data)
    return  data
def iir_noise_filter(name, df, current_col):

    #extracting sampling rate from parameter file
    parafile = name + '-parameters.txt'
    try:
        params = read_csv(parafile, delim_whitespace=True, engine='python', names=['0','1','2','3'], index_col=0)
    except:
        print('Cannot find parameter file')
    samplerate = params['2']['Samplerate']
    Samplingrates = {"2.5Hz":2.5, "5Hz":5.0, "10Hz":10.0, "15Hz":15.0,
                         "25Hz":25.0, "30Hz":30.0, "50Hz":50.0, "60Hz":60.0,
                         "100Hz":100.0, "500Hz":500.0, "1KHz":1000.0, "2KHz":2000.0,
                         "3.75KHz":3750.0, "7.5KHz":7500.0, "15KHz":1500.0, "30KHz":30000.0}
    samplerate = Samplingrates[samplerate]

    #constructing IIR notch filters
    fs = samplerate   # Sample frequency (Hz)
    f0 = 60.0  # Frequency to be removed from signal (Hz)
    Q = 2.0  # Quality factor
    # Design notch filter
    b, a = signal.iirnotch(f0, Q, fs)
    c, d = signal.iirnotch(f0*2, Q, fs)
    e, f = signal.iirnotch(f0*4, Q, fs)
    g, h = signal.iirnotch(f0*5, Q, fs)
    i, j = signal.iirnotch(f0*6, Q, fs)
    
    #apply filters
    yf1 = signal.filtfilt(b,a,df[current_col]) #60Hz filter
    yf2 = signal.filtfilt(c,d,yf1) #120Hz filter
    yf3 = signal.filtfilt(e,f,yf2) #240Hz filter
    yf4 = signal.filtfilt(g,h,yf3) #300Hz filter
    yf5 = signal.filtfilt(i,j,yf4) #360Hz filter

    return yf5
Пример #3
0
def reconstruct(pitch,fs,coffs,syllable):
	gain = coffs[0]
	coffs[0] = 1;
	x = np.double(fs)/np.double(pitch)
	num = np.ceil((pitch*3)/10.0)
	#300ms thus
	if syllable != "s":
		
		ex_input = np.zeros(num*x)
		for i in range (0,int(num)):
			ex_input[(i*x)-1] = 1

	    # Filtering the signal
		
		out = signal.filtfilt([gain],coffs,ex_input)
		d_num = [1]
		d_den = [1,-0.9]
		out = signal.filtfilt(d_num,d_den,out)  # De-emphasis
		out = np.int16(out/np.max(np.abs(out)) * 32767)
	else:
		ex_input = np.random.normal(0,1,num)
		out = signal.filtfilt([gain],coffs,ex_input)
		out = np.int16(out/np.max(np.abs(out)) * 32767)

	return out	
def filter_data(eeg_data, fs):
    #FILTER CONSTANTS
  fn = fs/2
  filter_order = 2   #2nd order filter
  f_high = 50
  f_low = 5
  wn = [59,61]       #Nyquist filter window

  [b,a] = signal.butter(filter_order,f_high/fn, 'low')
  [b1,a1] = signal.butter(filter_order,f_low/fn, 'high')
  [bn,an] = signal.butter(4,[x/fn for x in wn], 'stop')

  filtered_eeg = []
  spectogram = []
  notched = []
  high_passed = []
  low_passed = []


  channel =  eeg_data
  high_passed = signal.filtfilt(b1,a1,channel);        # high pass filter
  low_passed = signal.filtfilt(b,a,high_passed);                # low pass filter
  y = signal.filtfilt(bn,an,low_passed);        # notch filter

  return y
Пример #5
0
 def _forward_data(self, data):
     params = self.params
     try:
         mapped = filtfilt(self.__iir_num,
                           self.__iir_denom,
                           data,
                           axis=params.axis,
                           padtype=params.padtype,
                           padlen=params.padlen)
     except TypeError:
         # we have an ancient scipy, do manually
         # but is will only support 2d arrays
         if params.axis == 0:
             data = data.T
         if params.axis > 1:
             raise ValueError("this version of scipy does not "
                              "support nd-arrays for filtfilt()")
         if not (params['padlen'].is_default and params['padtype'].is_default):
             warning("this version of scipy.signal.filtfilt() does not "
                     "support `padlen` and `padtype` arguments -- ignoring "
                     "them")
         mapped = [filtfilt(self.__iir_num,
                            self.__iir_denom,
                            x)
                 for x in data]
         mapped = np.array(mapped)
         if params.axis == 0:
             mapped = mapped.T
     return mapped
Пример #6
0
def corr_func(drive, response, fsamp, fdrive, good_pts = [], filt = False, band_width = 1):
    #gives the correlation over a cycle of drive between drive and response.

    #First subtract of mean of signals to avoid correlating dc
    drive = drive-np.median(drive)
    response  = response - np.median(response)

    #bandpass filter around drive frequency if desired.
    if filt:
        b, a = sp.butter(3, [2.*(fdrive-band_width/2.)/fsamp, 2.*(fdrive+band_width/2.)/fsamp ], btype = 'bandpass')
        drive = sp.filtfilt(b, a, drive)
        response = sp.filtfilt(b, a, response)
    
    #Compute the number of points and drive amplitude to normalize correlation
    lentrace = len(drive)
    drive_amp = np.sqrt(2)*np.std(drive)

      
    #Throw out bad points if desired
    if len(good_pts):
        response[-good_pts] = 0.
        lentrace = np.sum(good_pts)    


    corr_full = good_corr(drive, response, fsamp, fdrive)/(lentrace*drive_amp)
    return corr_full
    def ApplyFilter(self, data_in):
    
        data_out = sp.filtfilt(self.coefb_low, self.coefa_low, data_in)
        
        data_out = sp.filtfilt(self.coefb_high, self.coefa_high, data_out)

        return data_out
Пример #8
0
def test_train_segments():
    results = np.zeros((6, 12))
    for i in range(6):
        dat = io.loadmat('testset%02d.mat' % (i + 1))
        eog_v = dat['eog_v_seg'][0]
        eog_h = dat['eog_h_seg'][0]
        fs = float(dat['fs'][0])

        fir_len = 150
        stop_limit = 500 / fs
        wn = stop_limit / (fs/2)
        b = sig.firwin(fir_len, wn)

        eog_v_f = sig.filtfilt(b, 1, eog_v)
        eog_h_f = sig.filtfilt(b, 1, eog_h)
        p = PyGERT()
        p.train(eog_h_f, eog_v_f)

        results[i,  0] = p.mu_fix
        results[i,  1] = p.sigma_fix
        results[i,  2] = p.prior_fix
        results[i,  3] = p.mu_sac
        results[i,  4] = p.sigma_sac
        results[i,  5] = p.prior_sac
        results[i,  6] = p.mu_bli
        results[i,  7] = p.sigma_bli
        results[i,  8] = p.prior_bli
        results[i,  9] = p.mu_bs
        results[i, 10] = p.sigma_bs
        results[i, 11] = p.prior_bs

    print(results)
    np.savetxt('segment_test_python.csv', results, delimiter=',')
def generate_noise(D,N):
  """Generate data for the changepoint detection. Data can either be of type 0
  or type 1, but when it's a combination fo both, we define a target label
  Input
  - D,N Dimenstionality arguments D dimensions over N samples
  Output
  - Data in format
  X is a matrix in R^{N x D}
  y is a matrix in R^{N,} not to donfuse with {N,1}"""
  #Check if we have even D, so we can split the array in future
  assert D%2 == 0, 'We need even number of dimensions'
  ratioP = 0.5   #balance of targets
  X = np.random.randn(N,D)
  y = np.zeros(N)
  mark = np.zeros(N)
  #Generate two filter cofficients
  filters = {}
  filters['b1'],filters['a1'] = signal.butter(4,2.0*cutoff1/fs,btype='lowpass')
  filters['b0'],filters['a0'] = signal.butter(4,2.0*cutoff0/fs,btype='lowpass')
  for i in xrange(N):
    if np.random.rand() > 0.5:	#Half of the samples will have changepoint, other half wont
      Dcut = np.random.randint(pattern_len,D-pattern_len)
      signalA = signal.filtfilt(filters['b1'],filters['a1'],X[i])
      signalB = signal.filtfilt(filters['b0'],filters['a0'],X[i])
      X[i] = np.concatenate((signalA[:Dcut],signalB[Dcut:]),axis=0)    #Concatenate the two signals
      if True:  #Boolean: do you want to introduce a pattern at the changepoint?
        Dstart = int(Dcut - pattern_len/2)
        X[i,Dstart:Dstart+pattern_len] = pattern
      y[i] = 1		#The target label
      mark[i] = Dcut
    else:
      mode = int(np.random.rand()>ratioP)
      X[i] = signal.filtfilt(filters['b'+str(mode)],filters['a'+str(mode)],X[i])
      y[i] = 0		#The target label
  return X,y,mark   
Пример #10
0
def lowhighpass_filter(data, cutperiod, pass_periods='low'):
    """Butterworth low- or high pass filter.

    This function applies a linear filter twice, once forward and once
    backwards. The combined filter has linear phase.

    Args:
        data (array, optional): Data array of shape (time, variables).
        cutperiod (int): Period of cutoff.
        pass_periods (str, optional): Either 'low' or 'high' to act as a low-
            or high-pass filter

    Returns:
        Filtered data array.
    """
    try:
        from scipy.signal import butter, filtfilt
    except:
        print 'Could not import scipy.signal for butterworth filtering!'

    fs = 1.
    order = 3
    ws = 1. / cutperiod / (0.5 * fs)
    b, a = butter(order, ws, pass_periods)
    if numpy.ndim(data) == 1:
        data = filtfilt(b, a, data)
    else:
        for i in range(data.shape[1]):
            data[:, i] = filtfilt(b, a, data[:, i])

    return data
Пример #11
0
def plot_data(dataWindow):

  print("Init plotting")
  b, a = butter(2, 0.0001, 'high')
  #b, a = butter(2, 0.5, 'high')

  fig = plt.figure()
  ax1 = fig.add_subplot(211)
  ax2 = fig.add_subplot(212)
  line1, = ax1.plot(dataWindow.filt_data)
  line2, = ax2.plot(eog2)
  fig.show()

  print("Start plotting")

  #while True:
  for t in range(40*12):

    #print("Filtering")
    eog1_filt = filtfilt(b, a, eog1)
    eog2_filt = filtfilt(b, a, eog2)

    #print("Plotting")
    line1.set_ydata(dataWindow.filt_data)
    #ax1.set_ylim((min(eog1_filt), max(eog1_filt)))
    ax1.set_ylim(-50000,50000)
    line2.set_ydata(eog2_filt)
    #ax2.set_ylim((min(eog2_filt), max(eog2_filt)))
    ax2.set_ylim(-50000,50000)
    fig.canvas.draw()
    fig.show()

    sleep(.5)

  print("Plotting ended")
  def filter_data(self,eeg_data):
    #FILTER CONSTANTS
    fs = self.fs
    fn = self.fn
    filter_order = 2   #2nd order filter
    f_high = 50
    f_low = 5
    wn = [59,61]       #Nyquist filter window

    [b,a] = signal.butter(filter_order,f_high/fn, 'low')
    [b1,a1] = signal.butter(filter_order,f_low/fn, 'high')
    [bn,an] = signal.butter(4,[x/fn for x in wn], 'stop')

    filtered_eeg = []
    spectogram = []
    notched = []
    high_passed = []
    low_passed = []
    print(eeg_data)
    for i in range(len(eeg_data[0])):
      channel =  eeg_data[:,i]
      high_passed = signal.filtfilt(b1,a1,channel);        # high pass filter
      low_passed = signal.filtfilt(b,a,high_passed);                # low pass filter
      y = signal.filtfilt(bn,an,low_passed);        # notch filter
      filtered_eeg.append(y);
    self.filtered_eeg = filtered_eeg
    return filtered_eeg


# if __name__ == '__main__':
#   main()
Пример #13
0
    def test_sine(self):
        rate = 2000
        t = np.linspace(0, 1.0, rate + 1)
        # A signal with low frequency and a high frequency.
        xlow = np.sin(5 * 2 * np.pi * t)
        xhigh = np.sin(250 * 2 * np.pi * t)
        x = xlow + xhigh

        b, a = butter(8, 0.125)
        z, p, k = tf2zpk(b, a)
        # r is the magnitude of the largest pole.
        r = np.abs(p).max()
        eps = 1e-5
        # n estimates the number of steps for the
        # transient to decay by a factor of eps.
        n = int(np.ceil(np.log(eps) / np.log(r)))

        # High order lowpass filter...
        y = filtfilt(b, a, x, padlen=n)
        # Result should be just xlow.
        err = np.abs(y - xlow).max()
        assert_(err < 1e-4)

        # A 2D case.
        x2d = np.vstack([xlow, xlow + xhigh])
        y2d = filtfilt(b, a, x2d, padlen=n, axis=1)
        assert_equal(y2d.shape, x2d.shape)
        err = np.abs(y2d - xlow).max()
        assert_(err < 1e-4)

        # Use the previous result to check the use of the axis keyword.
        # (Regression test for ticket #1620)
        y2dt = filtfilt(b, a, x2d.T, padlen=n, axis=0)
        assert_equal(y2d, y2dt.T)
Пример #14
0
def plot_comparison(f1, f2, data, xs=[-0.5, 7], ys=[-400, 1000], s=0):
    '''Compare file 1 (red) with file 2 (blue), taking the mean and std
    of the signal'''
    d = handler.extract_data(f1)
    d2 = handler.extract_data(f2)
    # due to the 60Hz noise of the water heater, I have to filter it out with a notch filter
    temp =  np.array([56.0,64.0])/(d['fs']/2.0)
    # 3 is the highest order I can go without it going crazy
    b,a = butter(3, temp[0], btype='bandstop')

    y = filtfilt(b,a,lowpass_filter((d['stim']*1e6, d['fs']))[0].T).T
    y2 = filtfilt(b,a,lowpass_filter((d2['stim']*1e6, d2['fs']))[0].T).T

    ystd = np.std(y, axis=1)
    ymean = average((y, d['fs']))[0][:,0]
    t = d['t']*1e3
    y2std = np.std(y2, axis=1)
    y2mean = average((y2, d2['fs']))[0][:,0]
    t2 = d2['t']*1e3
    plt.figure(figsize=(8,6))
    plt.fill_between(t, (ymean-ystd), (ymean+ystd), alpha=0.5, facecolor=color_palette[0])
    plt.plot(t, ymean, color=color_palette[0])
    plt.fill_between(t2, y2mean - y2std, y2mean + y2std, alpha=0.5, facecolor=color_palette[5])
    plt.plot(t2, y2mean, color=color_palette[5])
    plt.xlim(xs)
    plt.ylim(ys)
    plt.ylabel('Voltage (µV)')
    plt.xlabel('Time (ms)')
    plt.title(descriptive_title(f1).replace('<br>', '\n'))
    plt.legend([str(handler.get_current(data,f1))+'µA', str(handler.get_current(data,f2))+'µA'])
    if s and type(s) == str:
        plt.savefig(s + '.pdf')
    elif s:
        plt.savefig(f1[:-4]+ f2[:-4]+'-comaparison.pdf')
Пример #15
0
 def makeFiltered(self):
     filtered = np.zeros(len(self.data))
     b1, a1 = butter(1, 0.0003, 'lowpass')
     filtered = filtfilt(b1, a1, self.data)
     filtered = self.data - filtered
     b2, a2 = butter(1, 0.025, 'lowpass')
     self.filt_data = filtfilt(b2, a2, filtered)
Пример #16
0
 def test_axis(self):
     # Test the 'axis' keyword on a 3D array.
     x = np.arange(10.0 * 11.0 * 12.0).reshape(10, 11, 12)
     b, a = butter(3, 0.125)
     y0 = filtfilt(b, a, x, padlen=0, axis=0)
     y1 = filtfilt(b, a, np.swapaxes(x, 0, 1), padlen=0, axis=1)
     assert_array_equal(y0, np.swapaxes(y1, 0, 1))
     y2 = filtfilt(b, a, np.swapaxes(x, 0, 2), padlen=0, axis=2)
     assert_array_equal(y0, np.swapaxes(y2, 0, 2))
Пример #17
0
def apply_filter(x, filter=None):
    b, a = filter
    try:
        out_arr = signal.filtfilt(b, a, x, axis=0)
    except TypeError:   
        out_arr = np.zeros_like(x)
        for i_ch in range(x.shape[1]):
            out_arr[:, i_ch] = signal.filtfilt(b, a, x[:, i_ch]) 
    return out_arr
Пример #18
0
def filtracja(kan,fs, czest):
	n=1
	[bx,ax]=ss.butter(n, czest/(fs/2.),btype='highpass')
	y1a = ss.filtfilt(bx,ax,kan)
	
	[b,a]=ss.butter(n, [48./(fs/2.), 52./(fs/2.)],btype='bandstop')
	y1 = ss.filtfilt(b,a,y1a)
 
	return y1
Пример #19
0
    def generate_peoples_results_files(self):

        self.np_result = np.c_[self.results[0]['blue'], self.results[0]['green'], self.results[0]['red']]
        list_number = len(self.results[0]['blue'])

        #  ICA
        ica = FastICA(n_components=3, fun='logcosh', max_iter=2000)
        ica_transformed = ica.fit_transform(self.np_result)
        component_all = ica_transformed.ravel([1])
        component_1 = component_all[:list_number]
        component_2 = component_all[list_number:(2 * list_number)]
        component_3 = component_all[(2 * list_number):(3 * list_number)]

        #  butter_smooth
        N = 8
        Wn = [1.6 / 30, 4.0 / 30]
        t = np.linspace(1 / 30, list_number / 30, list_number)
        b, a = signal.butter(N, Wn, 'bandpass', analog=False)
        filter_1 = signal.filtfilt(b, a, component_1)
        filter_2 = signal.filtfilt(b, a, component_2)
        filter_3 = signal.filtfilt(b, a, component_3)
        lowess_1 = sm.nonparametric.lowess(filter_1, t, frac=10.0 / list_number)
        lowess_2 = sm.nonparametric.lowess(filter_2, t, frac=10.0 / list_number)
        lowess_3 = sm.nonparametric.lowess(filter_3, t, frac=10.0 / list_number)

        smooths = []
        smooth_1 = lowess_1[:, 1]
        smooth_2 = lowess_2[:, 1]
        smooth_3 = lowess_3[:, 1]
        smooths.append(smooth_1)
        smooths.append(smooth_2)
        smooths.append(smooth_3)

        # FFT and spectrum
        fft_1 = np.fft.fft(smooth_1, 256)
        fft_2 = np.fft.fft(smooth_2, 256)
        fft_3 = np.fft.fft(smooth_3, 256)
        spectrum_1 = list(np.abs(fft_1) ** 2)
        spectrum_2 = list(np.abs(fft_2) ** 2)
        spectrum_3 = list(np.abs(fft_3) ** 2)
        max1 = max(spectrum_1)
        max2 = max(spectrum_2)
        max3 = max(spectrum_3)
        num_spec1 = spectrum_1.index(max(spectrum_1))
        if num_spec1 > (list_number / 2):
            num_spec1 = 256 - num_spec1
        num_spec2 = spectrum_2.index(max(spectrum_2))
        if num_spec2 > (list_number / 2):
            num_spec2 = 256 - num_spec2
        num_spec3 = spectrum_3.index(max(spectrum_3))
        if num_spec3 > (list_number / 2):
            num_spec3 = 256 - num_spec3
        num_spec = [num_spec1, num_spec2, num_spec3]
        max_all = [max1, max2, max3]
        max_num = max_all.index(max(max_all))
        self.heartRate = int(num_spec[max_num] * 1800 / 256) + 1
        return smooths[max_num]
def sf_anal(infile, chunk_rate=80.0, n_steps=12, base_freq=440.0, min_level=0.001, cutoff=None):
    min_sq_level = min_level**2
    if cutoff is None: cutoff = chunk_rate

    sr, wav = load_non_wav(infile)
    chunk_size = int(round(float(sr) / chunk_rate))
    wav = high_passed(sr, wav)
    wav2 = wav * wav
    freqs = 2**(np.linspace(0.0, n_steps, num=n_steps, endpoint=False)/n_steps) * base_freq

    amp2 = wav2
    rel_cutoff = 2.0/(float(sr)/cutoff)  # relative to nyquist freq, not samplerate
    b, a = RC(Wn=rel_cutoff)
    for i in xrange(4):
        amp2 = filtfilt(b, a, amp2)

    mask = amp2>min_sq_level
    little_amp2 = decimate(
        amp2, chunk_size, ftype='fir'
    )
    little_mask = mask[np.arange(0,little_amp2.size,1)*chunk_size]

    little_corrs = []
    for freq in freqs:
        # For now, offset is rounded to the nearest sample
        offset = int(round(float(sr)/freq))
        cov = np.zeros_like(wav)
        cov[:-offset] = wav[offset:]*wav[:-offset]
        # repeatedly filter; this is effectively an 8th-order lowpass now
        smooth_cov = cov
        for i in xrange(4):
            smooth_cov = filtfilt(b, a, smooth_cov)
        
        # technically the correlation should be taken wrt the harmonic mean of the variances at
        # the two times, but we assume autocorrelation lag << smooth time
        little_corrs.append(
            decimate(
                mask * smooth_cov/np.maximum(amp2, min_sq_level),
                chunk_size,
                ftype='fir' #FIR is needed to be stable at haptic rates
            )
        )
    
    
    all_corrs = np.vstack(little_corrs)
    sample_times = (np.arange(0,little_amp2.size,1)*chunk_size).astype(np.float)/sr

    #trim "too quiet" stuff
    all_corrs = all_corrs[:,np.where(little_mask)[0]]
    sample_times = sample_times[np.where(little_mask)[0]]
    little_amp2 = little_amp2[np.where(little_mask)[0]]
    
    return dict(
        all_corrs=all_corrs,
        sample_times=sample_times,
        amp=np.sqrt(little_amp2), #RMS amp is more usual
    )
Пример #21
0
Файл: ABR.py Проект: r-b-g-b/Lab
	def filter_abr(self, x, duration = 0.00999424, nsamples = 244, Wn = 0.02, btype = 'high'):

		b, a = butter(10, Wn = Wn, btype = btype)
		if len(x.shape)==1:
			x_filt = filtfilt(b, a, x)
		elif len(x.shape)>1:
			x_filt = np.empty_like(x)
			for i in range(x.shape[1]):
				x_filt[:, i] = filtfilt(b, a, x[:, i])
		return x_filt
def scatter(filename, thrs, nchannels=2, chnl=5):
    b, a = iirfilter(1, (0.002, 0.05))
    with trace_gen(filename, nchannels, chnl) as gen:
        data = [(sum(event[thrs:thrs + 180]),
                 min(filtfilt(b, a, event)[thrs:thrs + 180]) - 200)
                for event in gen if max(filtfilt(b, a, event)[:20]) < 400]

    int_data = zip(*data)[0]
    min_data = zip(*data)[1]
    return int_data, min_data
Пример #23
0
	def noise_filters(self,data):
		[b1, a1] = [self.high_pass_coefficients[0],self.high_pass_coefficients[1]]
		[b, a] = [self.low_pass_coefficients[0],self.low_pass_coefficients[1]]
		[bn, an] = [self.notch_coefficients[0],self.notch_coefficients[1]]

		for i,channel in enumerate(data):
		  high_passed = signal.filtfilt(b1,a1,channel);        # high pass filter
		  low_passed = signal.filtfilt(b,a,high_passed);       # low pass filter
		  y = signal.filtfilt(bn,an,low_passed);   	     # notch filter
		  self.filtered_eeg[i] = y;
		return self.filtered_eeg
Пример #24
0
 def filter(self):
     """
     Filters this time sequence.
     """
     eog_filt1 = np.zeros(len(self.vector))
     b1, a1 = butter(1, 0.0003, 'lowpass')
     eog_filt1 = filtfilt(b1, a1, self.vector)
     matrix2 = (self.getVector() - eog_filt1)
     b2, a2 = butter(1, 0.05, 'lowpass')
     eogfilt2 = filtfilt(b2, a2, matrix2)
     self.setVector(eogfilt2)
Пример #25
0
def profile(fname, ends = 100, stage_cal = 8.):
    dat, attribs, f = bu.getdata(fname)
    dat = dat[ends:-ends, :]
    dat[:, stage_column]*=stage_cal
    f.close()
    b, a = sig.butter(3, 0.25)
    int_filt = sig.filtfilt(b, a, dat[:, data_column])    
    proft = np.gradient(int_filt)
    stage_filt = sig.filtfilt(b, a, dat[:, stage_column])
    dir_sign = np.sign(np.gradient(stage_filt))
    b, y, e = spatial_bin(dat[dir_sign<0, stage_column], proft[dir_sign<0])
    return b, y, e
Пример #26
0
def Filter(data_in, f1, f2, sample_rate, filter_order):

    nyq_rate = sample_rate / 2
    w1, w2 = f1 / nyq_rate, f2 / nyq_rate

    b, a = sp.butter(filter_order, [f2 / nyq_rate], btype='low')
    data_out = sp.filtfilt(b, a, data_in)

    b, a = sp.butter(filter_order, [f1 / nyq_rate], btype='high')
    data_out = sp.filtfilt(b, a, data_out)

    return data_out
Пример #27
0
def bandpass_filter(x, s_f, hi=400, lo=7000):
    """
    :param x: one d numpy arrays
    :param s_f: sampling frequency (in hz)
    :param hi: hi-pass cutoff
    :param lo: lo-pass cutoff
    :return:
    """
    hp_b, hp_a = sg.butter(4, hi / (s_f / 2.), btype='high')
    lp_b, lp_a = sg.butter(4, lo / (s_f / 2.), btype='low')
    x_hi = sg.filtfilt(hp_b, hp_a, x, axis=0)
    x_lo = sg.filtfilt(lp_b, lp_a, x_hi, axis=0)
    return x_lo
Пример #28
0
    def motion_detected(self, data):
        filtCutOff = 0.001;
        [b, a] = butter(1, (2*filtCutOff*1.0)/(self.sr), btype='highpass');
        acc_magFilt = filtfilt(b, a, data);
        #acc_magFilt = lfilter(b, a, lfilter(b,a,data));
        acc_magFilt = [abs(x) for x in acc_magFilt] 
        filtCutOff = 0.3;
        [b, a] = butter(1, (2*filtCutOff)*1.0/(self.sr), btype='lowpass');
        filtered = filtfilt(b, a, acc_magFilt);
        #filtered = lfilter(b, a, lfilter(b,a,acc_magFilt));

        stan = filtered < 0.05
        return stan, filtered
Пример #29
0
    def __amp_detect(self, x):
        
        ref = np.floor(self.min_ref_per*self.sr/1000.0)
        
        # HIGH-PASS FILTER OF THE DATA
        (b,a) = signal.ellip(2, 0.1, 40, [self.fmin_detect*2.0/self.sr,self.fmax_detect*2.0/self.sr], btype='bandpass', analog=0, output='ba')
        xf_detect = signal.filtfilt(b, a, x)
        (b,a) = signal.ellip(2, 0.1, 40, [self.fmin_sort*2.0/self.sr,self.fmax_sort*2.0/self.sr], btype='bandpass', analog=0, output='ba')
        xf = signal.filtfilt(b, a, x)
        
        
        noise_std_detect = scipy.median(np.abs(xf_detect))/0.6745;
        noise_std_sorted = scipy.median(np.abs(xf))/0.6745;
       
        thr = self.stdmin * noise_std_detect        #thr for detection is based on detected settings.
        thrmax = self.stdmax * noise_std_sorted     #thrmax for artifact removal is based on sorted settings.
        
        # LOCATE SPIKE TIMES
        nspk = 0;
        xaux = np.argwhere(xf_detect[self.w_pre+1:len(xf_detect)-self.w_post-1-1] > thr) + self.w_pre + 1
        xaux = np.resize(xaux,len(xaux))
        xaux0 = 0;
        index = []
        for i in range(len(xaux)):
            if xaux[i] >= (xaux0 + ref):
            # after find a peak it begin search after ref over the last xaux
                iaux = xf[xaux[i]:xaux[i]+np.floor(ref/2.0)].argmax(0)    # introduces alignment
                nspk = nspk + 1
                index.append(iaux + xaux[i])
                xaux0 = index[nspk-1];
        
        # SPIKE STORING (with or without interpolation)
        ls = self.w_pre + self.w_post
        spikes = np.zeros([nspk,ls+4])
        xf = np.concatenate((xf,np.zeros(self.w_post)),axis=0)
        
        for i in range(nspk):                          # Eliminates artifacts
            if np.max( np.abs( xf[index[i]-self.w_pre:index[i]+self.w_post] )) < thrmax :
                spikes[i,:] = xf[index[i]-self.w_pre-1:index[i]+self.w_post+3]
     
        aux = np.argwhere(spikes[:,self.w_pre] == 0)       #erases indexes that were artifacts
        if len(aux) != 0:
            aux = aux.reshape((1,len(aux)))[0]
            spikes = np.delete(spikes, aux, axis = 0)
            index = np.delete(index,aux)
 
        if self.interpolation == 'y':
            # Does interpolation
            spikes = self.__int_spikes(spikes)

        return spikes, thr, index
Пример #30
0
def decimate(x, q=10, n=4, k=0.8, filterfun=ss.cheby1):
    """
    scipy.signal.decimate like downsampling using filtfilt instead of lfilter,
    and filter coeffs from butterworth or chebyshev type 1.


    Parameters
    ----------
    x : numpy.ndarray
        Array to be downsampled along last axis.
    q : int 
        Downsampling factor.
    n : int
        Filter order.
    k : float
        Aliasing filter critical frequency Wn will be set as Wn=k/q.
    filterfun : function
        `scipy.signal.filter_design.cheby1` or
        `scipy.signal.filter_design.butter` function

    Returns
    -------
    numpy.ndarray
        Array of downsampled signal.
              
    """
    if not isinstance(q, int):
        raise TypeError("q must be an integer")

    if n is None:
        n = 1

    if filterfun == ss.butter:
        b, a = filterfun(n, k / q)
    elif filterfun == ss.cheby1:
        b, a = filterfun(n, 0.05, k / q)
    else:
        raise Exception('only ss.butter or ss.cheby1 supported')

    try:
        y = ss.filtfilt(b, a, x)
    except: # Multidim array can only be processed at once for scipy >= 0.9.0
        y = []
        for data in x:
            y.append(ss.filtfilt(b, a, data))
        y = np.array(y)

    try:
        return y[:, ::q]
    except:
        return y[::q]
Пример #31
0
 def bandpass_filter(self, data, cutoffs, fs, order):
     low, high = cutoffs[0] / (fs / 2), cutoffs[1] / (fs / 2)
     b, a = butter(order, [low, high], btype='band', output='ba')
     y = filtfilt(b, a, data)
     return np.asarray(y, dtype=np.int16)
Пример #32
0
def bandpass_filter(sample, band, sample_rate, filter_order):
    nyq = sample_rate * 0.5
    _band = [(f / nyq) for f in band]
    b, a = signal.butter(filter_order, _band, btype='band')
    return signal.filtfilt(b, a, sample)
Пример #33
0
def lowpass_filter(sample, cutoff, sample_rate, filter_order):
    nyq = sample_rate * 0.5
    _cutoff = cutoff / nyq
    b, a = signal.butter(filter_order, _cutoff, btype='lowpass')
    y = signal.filtfilt(b, a, sample)
    return y
Пример #34
0
    def filter_file(data_file_in, data_file_out, do_filtering,
                    do_remove_median):

        try:
            cut_off = params.getfloat('filtering', 'cut_off')
            cut_off = [cut_off, 0.95 * (params.rate / 2.)]
        except Exception:
            cut_off = params.get('filtering', 'cut_off')
            cut_off = cut_off.split(',')
            try:
                cut_off[0] = float(cut_off[0])
            except Exception:
                if comm.rank == 0:
                    print_and_log(
                        ['First value of cut off must be a valid number'],
                        'error', logger)
                sys.exit(1)

            cut_off[1] = cut_off[1].replace(' ', '')
            if cut_off[1] == 'auto':
                cut_off[1] = 0.95 * (params.rate / 2.)
            else:
                try:
                    cut_off[1] = float(cut_off[1])
                except Exception:
                    if comm.rank == 0:
                        print_and_log([
                            'Second value of cut off must either auto, or a valid a number'
                        ], 'error', logger)
                    sys.exit(1)

        chunk_size = params.getint('data', 'chunk_size')
        nb_chunks, _ = data_file_in.analyze(chunk_size)

        b, a = signal.butter(3, np.array(cut_off) / (params.rate / 2.), 'pass')
        all_chunks = numpy.arange(nb_chunks, dtype=numpy.int64)
        to_process = all_chunks[comm.rank::comm.size]
        loc_nb_chunks = len(to_process)
        N_total = params.nb_channels

        if comm.rank == 0:
            if do_filtering:
                to_write = [
                    "Filtering the signal with a Butterworth filter in (%g, %g) Hz"
                    % (cut_off[0], cut_off[1])
                ]
            if do_remove_median:
                to_write += [
                    "Median over all channels is substracted to each channels"
                ]

            print_and_log(to_write, 'default', logger)

            pbar = get_progressbar(loc_nb_chunks)

        for count, gidx in enumerate(to_process):

            local_chunk, t_offset = data_file_in.get_data(gidx, chunk_size)

            if do_filtering:
                for i in nodes:
                    local_chunk[:,
                                i] = signal.filtfilt(b, a, local_chunk[:, i])
                local_chunk[:, i] -= numpy.median(local_chunk[:, i])

            if do_remove_median:
                if not numpy.all(nodes == numpy.arange(N_total)):
                    global_median = numpy.median(
                        numpy.take(local_chunk, nodes, axis=1), 1)
                else:
                    global_median = numpy.median(local_chunk, 1)
                for i in nodes:
                    local_chunk[:, i] -= global_median

            if data_file_in != data_file_out and data_file_in:
                if data_file_in.is_stream:
                    t_offset -= data_file_in._times[
                        data_file_in._get_streams_index_by_time(t_offset)]
                else:
                    t_offset -= data_file_in.t_start

            data_file_out.set_data(t_offset, local_chunk)

            if comm.rank == 0:
                pbar.update(count)

        if comm.rank == 0:
            pbar.finish()

        comm.Barrier()

    ### Read wav data to samples ###
    fs, wav_data_BCM = wavfile.read(wavFullPathBCM)
    wav_data_BCM = wav_data_BCM/32767
    #wav_data_BCM = wav_data_BCM[0:20000]
    fs, wav_data_REF = wavfile.read(wavFullPathREF)
    wav_data_REF = wav_data_REF/32767
    #wav_data_REF = wav_data_REF[0:20000]

     ### Filters audio data ###
    low_cutoff = 60
    high_cutoff = 6000
    wn = [low_cutoff/(fs/2), high_cutoff/(fs/2)]
    b, a = dsp.butter(4, wn, 'band')
    wav_data_BCM = dsp.filtfilt(b,a,wav_data_BCM)
    ### Resamples audio to 12 kHz ###
    wav_data_BCM = dsp.resample(wav_data_BCM,int(wav_data_BCM.shape[0]/4))
    #plt.plot(wav_data_12kHz)
    wav_data_BCM = utils.adjustSNR(wav_data_BCM,60)

    low_cutoff = 20
    high_cutoff = 6000
    wn = [low_cutoff/(fs/2), high_cutoff/(fs/2)]
    b, a = dsp.butter(4, wn, 'band')
    wav_data_REF = dsp.filtfilt(b,a,wav_data_REF)
    ### Resamples audio to 12 kHz ###
    wav_data_REF = dsp.resample(wav_data_REF,int(wav_data_REF.shape[0]/4))
    #plt.plot(wav_data_12kHz)
    wav_data_REF = utils.adjustSNR(wav_data_REF,60)
Пример #36
0
sig = np.zeros(nsamps)
n = (map(float, range(0, (nsamps))))

text_file = open("comb1.txt", "a")

# Synthesize bandlimited impulse train

limits = [24, 48]
for limit in limits:
    for j in xrange(1, limit + 1):
        for i in xrange(1, int(j + 1)):
            harm = np.cos(map(lambda x: x * w0T * float(i), n))
            sig = sig + harm
        print(i)
        sig = sig / np.max(sig)
    speech = signal.filtfilt([1], A, sig)
    #		plt.plot(n[0:256], sig[0:256])
    out = speech[4864:5121]
    #plt.plot(out[0:257])
    out = np.add(out, abs(np.min(out)))
    out = np.multiply(out, 1 / (np.max(out)))
    out = np.multiply(out, -1)
    out = out + 1
    out = np.multiply(out, 32767)
    plt.plot(out[0:257])
    out = np.int0(out)
    # symmetric
    text_file.write('static const uint16_t ')
    text_file.write(name)
    text_file.write(str(limit))
    text_file.write('Atk')
                # - f['SUMMED_OUTPUT'].value['isyn_e']
                # - f['SUMMED_OUTPUT'].value['isyn_i']
            ] + [
                f['SUMMED_OUTPUT'].value[name]
                for name in f['SUMMED_OUTPUT'].dtype.names
            ],
            [  # 'sum',
                # r'$i_\mathrm{pas}$', r'$i_\mathrm{cap}$',
                # r'$i_\mathrm{syn, E}$', r'$i_\mathrm{syn, I}$',
                # r'$i_\mathrm{syn, E}+i_\mathrm{syn, I}$', 'residual'
            ] + [name for name in f['SUMMED_OUTPUT'].dtype.names],
            [  # 'k',
                # 'r', 'b', 'c', 'm', 'g', 'y'
            ] + ['k'] +
            [colors[i] for i in range(PSET.populationParameters.size)]):
            ax.semilogx(ss.filtfilt(b, a, data, axis=-1)[:, tind:].var(axis=1),
                        y,
                        lw=2,
                        label=label,
                        color=color)
        f.close()
        ax.set_yticks(y)
        ax.set_yticklabels(yticklabels)
        ax.axis(ax.axis('tight'))
        ax.set_xlabel(r'variance (mV$^2$)')

        fig.savefig(os.path.join(PSET.OUTPUTPATH,
                                 'example_parallel_network_variance.pdf'),
                    bbox_inches='tight')
        plt.close(fig)
Пример #38
0
def butter_highpass_filter(data, cutoff, fs, order=5):
    b, a = butter_highpass(cutoff, fs, order=order)
    y = signal.filtfilt(b, a, data)
    return y
Пример #39
0
def process_file(audio_file):
    #Read in the file upload
    #FOR EXAMPLE: Folder: Bone Transducer, fileName = BoneTransducer_T02_2
    #don't include .wav for obvious reasons
    fileName = 'brp_h_t06'
    sampFreq, snd = wav.read(fileName + '.wav')

    # PREPROCESSING DATA

    # ZERO-MEAN
    snd = (snd - snd.mean()) / snd.std()
    #snd = snd[50000:1450000] #used for dropouts

    # 60 HZ INTERFERENCE NOTCH FILTER
    f0 = 60.0  # Frequency to be removed from signal (Hz)
    Q = 40.0  # Quality factor
    w0 = f0 / (sampFreq / 2)  # Normalized Frequency
    # Design notch filter
    b, a = signal.iirnotch(w0, Q)
    snd = signal.filtfilt(b, a, snd)

    # INITIALIZE
    fig = plt.figure()

    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2,
                                                 ncols=2,
                                                 figsize=(15, 10))

    file_len = len(snd)

    # PLOT AUDIO WAVEFORM
    timeArray = np.arange(0, len(snd), 1)
    timeArray = timeArray / sampFreq
    print("Length of file: " + str(len(snd) / sampFreq) + "s")
    #snd_norm = np.amax(snd)
    #ax1.plot(timeArray, snd/snd_norm, color='k')
    ax1.plot(timeArray, snd, color='k')
    ax1.set_title('Audio Waveform')
    ax1.set_xlabel('Time (s)')
    ax1.set_ylabel('Amplitude')

    # CALCULATE FFT

    dft = fft(snd[:490000])  # calculate fourier transform
    N = len(dft)  # length of discrete fourier transform
    freqs = [i * sampFreq / N
             for i in range(N)]  # convert from dft frequencies to Hz
    #dft_norm = np.amax(np.abs(dft))
    #ax2.plot(freqs, np.abs(dft)/dft_norm, color='k') # change the indices to zoom in/out in frequency
    ax2.plot(freqs, np.abs(dft),
             color='k')  # change the indices to zoom in/out in frequency
    ax2.set_title('Freq Analysis')
    ax2.set_xlabel('Frequencies (Hz)')
    ax2.set_ylabel('DFT Coefficients')
    ax2.set_xlim([0, 1000])

    # CALCULATE SPECTROGRAM

    Pxx, freqs, bins, im = ax3.specgram(snd, NFFT=1024, Fs=sampFreq)
    ax3.set_title('Spectrogram')
    ax3.set_xlabel('Time')
    ax3.set_ylabel('Frequency')
    ax3.set_ylim([0, 1000])

    # CALCULATE PSD (based on Welch)
    f, Pxx_den = signal.welch(snd[:file_len], sampFreq,
                              nperseg=1024)  #PLACE INDEX HERE
    #Pxx_den_norm = np.amax(Pxx_den)
    #ax4.plot(f, Pxx_den/Pxx_den_norm, color='k')
    ax4.plot(f, Pxx_den, color='k')
    ax4.set_title('PSD (Welch)')
    ax4.set_xlabel('Frequency [Hz]')
    ax4.set_ylabel('PSD [V^2/Hz]')
    ax4.set_xlim([0, 1000])

    # POST-CALCULATION AESTHETIC

    plt.suptitle('Results for ' + fileName, fontsize=20)
    plt.tight_layout(rect=[0, 0.03, 1, 0.92])

    figure_name = fileName + '.png'
    fig.savefig(figure_name, bbox_inches='tight')
    print("Saved!")
    return figure_name
Пример #40
0
    def filter_signal(self,
                      device_type="accelerometer",
                      filter_type="bandpass",
                      low_f=1,
                      high_f=10,
                      filter_order=1):
        """Filtering details: https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.filtfilt.html
        Arguments:
            -device_type: "accelerometer" or "ECG"
            -filter_type: filter type - "bandpass", "lowpass", or "highpass"
            -low_f: low-end cutoff frequency, required for lowpass and bandpass filters
            -high_f: high-end cutoff frequency, required for highpass and bandpass filters
            -filter_order: integet for filter order
        Adds columns to dataframe corresponding to each device. Filters all devices that are available.
        """

        if device_type == "accelerometer":

            self.accel_filter_low_f = low_f
            self.accel_filter_low_h = high_f

            for data_type in ["hip"]:

                # DATA SET UP

                if data_type == "hip" and self.hip_fname is not None:
                    data = np.array(
                        [self.df_hip["X"], self.df_hip["Y"], self.df_hip["Z"]])
                    original_df = self.df_hip
                    fs = self.hip_samplerate * .5

                # FILTERING TYPES
                if filter_type == "lowpass":
                    print("\nFiltering {} accelerometer data with {}Hz, "
                          "order {} lowpass filter.".format(
                              data_type, low_f, filter_order))
                    low = low_f / fs
                    b, a = butter(N=filter_order, Wn=low, btype="lowpass")
                    filtered_data = filtfilt(b, a, x=data)

                    self.filter_details = {
                        "Order": filter_order,
                        "Type": filter_type,
                        "F crit": [low_f]
                    }

                if filter_type == "highpass":
                    print("\nFiltering {} accelerometer data with {}Hz, "
                          "order {} highpass filter.".format(
                              data_type, high_f, filter_order))
                    high = high_f / fs
                    b, a = butter(N=filter_order, Wn=high, btype="highpass")
                    filtered_data = filtfilt(b, a, x=data)

                    self.filter_details = {
                        "Order": filter_order,
                        "Type": filter_type,
                        "F crit": [high_f]
                    }

                if filter_type == "bandpass":
                    print("\nFiltering {} accelerometer data with {}-{}Hz, "
                          "order {} bandpass filter.".format(
                              data_type, low_f, high_f, filter_order))

                    low = low_f / fs
                    high = high_f / fs
                    b, a = butter(N=filter_order,
                                  Wn=[low, high],
                                  btype="bandpass")
                    filtered_data = filtfilt(b, a, x=data)

                    self.filter_details = {
                        "Order": filter_order,
                        "Type": filter_type,
                        "F crit": [low_f, high_f]
                    }

                original_df["X_filt"] = filtered_data[0]
                original_df["Y_filt"] = filtered_data[1]
                original_df["Z_filt"] = filtered_data[2]

        print("\nFiltering complete.")
Пример #41
0
    def __init__(
        self,
        inputfreq=40,
        system="NTSC",
        tape_format="VHS",
        dod_threshold_p=vhs_formats.DEFAULT_THRESHOLD_P_DDD,
        dod_threshold_a=None,
        dod_hysteresis=vhs_formats.DEFAULT_HYSTERESIS,
        track_phase=None,
    ):

        # First init the rf decoder normally.
        super(VHSRFDecode, self).__init__(
            inputfreq, system, decode_analog_audio=False, has_analog_audio=False
        )

        self.dod_threshold_p = dod_threshold_p
        self.dod_threshold_a = dod_threshold_a
        self.dod_hysteresis = dod_hysteresis

        if track_phase is None:
            self.track_phase = 0
            self.detect_track = True
            self.needs_detect = True
        elif track_phase == 0 or track_phase == 1:
            self.track_phase = track_phase
            self.detect_track = False
            self.needs_detect = False
        else:
            raise Exception("Track phase can only be 0, 1 or None")
        self.hsync_tolerance = 0.8

        self.field_number = 0
        self.last_raw_loc = None

        # Then we override the laserdisc parameters with VHS ones.
        if system == "PAL":
            # Give the decoder it's separate own full copy to be on the safe side.
            self.SysParams = copy.deepcopy(vhs_formats.SysParams_PAL_VHS)
            self.DecoderParams = copy.deepcopy(vhs_formats.RFParams_PAL_VHS)
        elif system == "NTSC":
            if tape_format == "UMATIC":
                self.SysParams = copy.deepcopy(vhs_formats.SysParams_NTSC_UMATIC)
                self.DecoderParams = copy.deepcopy(vhs_formats.RFParams_NTSC_UMATIC)
            else:
                self.SysParams = copy.deepcopy(vhs_formats.SysParams_NTSC_VHS)
                self.DecoderParams = copy.deepcopy(vhs_formats.RFParams_NTSC_VHS)
        else:
            raise Exception("Unknown video system! ", system)

        # Lastly we re-create the filters with the new parameters.
        self.computevideofilters()

        cc = self.DecoderParams["color_under_carrier"] / 1000000

        DP = self.DecoderParams

        self.Filters["RFVideoRaw"] = lddu.filtfft(
            sps.butter(
                DP["video_bpf_order"],
                [
                    DP["video_bpf_low"] / self.freq_hz_half,
                    DP["video_bpf_high"] / self.freq_hz_half,
                ],
                btype="bandpass",
            ),
            self.blocklen,
        )

        self.Filters["EnvLowPass"] = sps.butter(
            8, [1.0 / self.freq_half], btype="lowpass"
        )

        # More advanced rf filter - only used for NTSC for now.
        if system == "NTSC":
            y_fm = sps.butter(
                DP["video_bpf_order"],
                [
                    DP["video_bpf_low"] / self.freq_hz_half,
                    DP["video_bpf_high"] / self.freq_hz_half,
                ],
                btype="bandpass",
            )
            y_fm = lddu.filtfft(y_fm, self.blocklen)

            y_fm_lowpass = lddu.filtfft(
                sps.butter(
                    DP['video_lpf_extra_order'], [DP["video_lpf_extra"] / self.freq_hz_half], btype="lowpass"
                ),
                self.blocklen,
            )

            y_fm_chroma_trap = lddu.filtfft(
                sps.butter(
                    1,
                    [(cc * 0.9) / self.freq_half, (cc * 1.1) / self.freq_half],
                    btype="bandstop",
                ),
                self.blocklen,
            )

            y_fm_filter = (
                y_fm * y_fm_lowpass * y_fm_chroma_trap * self.Filters["hilbert"]
            )

            self.Filters["RFVideo"] = y_fm_filter
        else:
            y_fm_lowpass = lddu.filtfft(
                sps.butter(
                    DP['video_lpf_extra_order'], [DP["video_lpf_extra"] / self.freq_hz_half], btype="lowpass"
                ),
                self.blocklen,
            )
            y_fm_highpass = lddu.filtfft(
                sps.butter(
                    DP['video_hpf_extra_order'], [DP["video_hpf_extra"] / self.freq_hz_half], btype="highpass"
                ),
                self.blocklen,
            )
            self.Filters["RFVideo"] = self.Filters["RFVideo"] * y_fm_lowpass * y_fm_highpass

        # Video (luma) de-emphasis
        # Not sure about the math of this but, by using a high-shelf filter and then
        # swapping b and a we get a low-shelf filter that goes from 0 to -14 dB rather
        # than from 14 to 0 which the high shelf function gives.
        da, db = gen_high_shelf(
            DP["deemph_corner"] / 1.0e6, DP["deemph_gain"], 1 / 2, inputfreq
        )
        w, h = sps.freqz(db, da)

        self.Filters["Fdeemp"] = lddu.filtfft((db, da), self.blocklen)
        self.Filters["FVideo"] = self.Filters["Fvideo_lpf"] * self.Filters["Fdeemp"]
        SF = self.Filters
        SF["FVideo05"] = SF["Fvideo_lpf"] * SF["Fdeemp"] * SF["F05"]

        # Filter to pick out color-under chroma component.
        # filter at about twice the carrier. (This seems to be similar to what VCRs do)
        chroma_lowpass = sps.butter(
            4, [0.05 / self.freq_half, 1.4 / self.freq_half], btype="bandpass"
        )  # sps.butter(4, [1.2/self.freq_half], btype='lowpass')
        self.Filters["FVideoBurst"] = chroma_lowpass

        # The following filters are for post-TBC:
        # The output sample rate is at approx 4fsc
        fsc_mhz = self.SysParams["fsc_mhz"]
        out_sample_rate_mhz = fsc_mhz * 4
        out_frequency_half = out_sample_rate_mhz / 2
        het_freq = fsc_mhz + cc
        fieldlen = self.SysParams["outlinelen"] * max(self.SysParams["field_lines"])

        # Final band-pass filter for chroma output.
        # Mostly to filter out the higher-frequency wave that results from signal mixing.
        # Needs tweaking.
        chroma_bandpass_final = sps.butter(
            2,
            [
                (fsc_mhz - 0.64) / out_frequency_half,
                (fsc_mhz + 0.24) / out_frequency_half,
            ],
            btype="bandpass",
        )
        self.Filters["FChromaFinal"] = chroma_bandpass_final

        chroma_burst_check = sps.butter(
            2,
            [
                (fsc_mhz - 0.14) / out_frequency_half,
                (fsc_mhz + 0.04) / out_frequency_half,
            ],
            btype="bandpass",
        )
        self.Filters["FChromaBurstCheck"] = chroma_burst_check

        # Bandpass filter to select heterodyne frequency from the mixed fsc and color carrier signal
        het_filter = sps.butter(
            2,
            [
                (het_freq - 0.001) / out_frequency_half,
                (het_freq + 0.001) / out_frequency_half,
            ],
            btype="bandpass",
        )
        samples = np.arange(fieldlen)

        # As this is done on the tbced signal, we need the sampling frequency of that,
        # which is 4fsc for NTSC and approx. 4 fsc for PAL.
        # TODO: Correct frequency for pal?
        cc_wave_scale = cc / out_sample_rate_mhz
        self.cc_ratio = cc_wave_scale
        # 0 phase downconverted color under carrier wave
        self.cc_wave = np.sin(2 * np.pi * cc_wave_scale * samples)
        # +90 deg and so on phase wave for track2 phase rotation
        cc_wave_90 = np.sin((2 * np.pi * cc_wave_scale * samples) + (np.pi / 2))  #
        cc_wave_180 = np.sin((2 * np.pi * cc_wave_scale * samples) + np.pi)
        cc_wave_270 = np.sin(
            (2 * np.pi * cc_wave_scale * samples) + np.pi + (np.pi / 2)
        )

        # Standard frequency color carrier wave.
        self.fsc_wave = utils.gen_wave_at_frequency(
            fsc_mhz, out_sample_rate_mhz, fieldlen
        )
        self.fsc_cos_wave = utils.gen_wave_at_frequency(
            fsc_mhz, out_sample_rate_mhz, fieldlen, np.cos
        )

        # Heterodyne wave
        # We combine the color carrier with a wave with a frequency of the
        # subcarrier + the downconverted chroma carrier to get the original
        # color wave back.
        self.chroma_heterodyne = {}

        self.chroma_heterodyne[0] = sps.filtfilt(
            het_filter[0], het_filter[1], self.cc_wave * self.fsc_wave
        )
        self.chroma_heterodyne[1] = sps.filtfilt(
            het_filter[0], het_filter[1], cc_wave_90 * self.fsc_wave
        )
        self.chroma_heterodyne[2] = sps.filtfilt(
            het_filter[0], het_filter[1], cc_wave_180 * self.fsc_wave
        )
        self.chroma_heterodyne[3] = sps.filtfilt(
            het_filter[0], het_filter[1], cc_wave_270 * self.fsc_wave
        )
Пример #42
0
def filter_simple(data, filter_coeffs):
    fb, fa = filter_coeffs
    return sps.filtfilt(fb, fa, data, padlen=150)
Пример #43
0
def log_envelope(x, fs, filt_len=100):
    log_env = 10 * np.log10(10.**-4.5 + np.power(x.flatten()[:], 2.0))
    w_n = np.hanning(filt_len)
    w_n /= w_n.sum()
    return sig.filtfilt(w_n, np.ones(1), log_env)
def butter_lowpass_filter(data,cutoff,fs,order):
    normal_cutoff = cutoff / nyq
    # Get the filter coefficients 
    b, a = butter(order, normal_cutoff, btype='low', analog=False)
    y = filtfilt(b, a, data,axis=0)
    return y    
Пример #45
0
import soundfile as sf
from scipy import signal
import numpy as np

#read .wav file
input_signal,fs = sf.read('../data/Sound_Noise.wav')

#sampling frequency of input signal
sampl_freq = fs

#order of the filter
order = 4

#cutoff freq 4kHz
cutoff_freq = 4000

#digital frequency
Wn = 2*cutoff_freq/sampl_freq

#b and a are numerator and denominator polynomials respectively
b,a = signal.butter(order,Wn,'low')
print('a:',a)
print('b:',b)

#filter the input signal with butterworth filter 
output_signal = signal.filtfilt(b,a,input_signal)

#write output signal into .wav file
sf.write('../data/Sound_With_ReducedNoise.wav',output_signal,fs)
Пример #46
0
def lowpass(s, f, order=2, fs=100.0):
    b, a = signal.butter(order, f / (fs/2))
    return signal.filtfilt(b, a, s)
def draw_lineplot(ax,
                  data,
                  dt=0.1,
                  T=(0, 200),
                  scaling_factor=1.,
                  vlimround=None,
                  label='local',
                  scalebar=True,
                  scalebarpos=0,
                  scalebarbasis='log2',
                  unit='mV',
                  ylabels=True,
                  color='r',
                  ztransform=True,
                  filter=False,
                  filterargs=dict(N=2, Wn=0.02, btype='lowpass')):
    ''' draw some nice lines'''

    tvec = np.arange(data.shape[1]) * dt
    try:
        tinds = (tvec >= T[0]) & (tvec <= T[1])
    except TypeError:
        print(data.shape, T)
        raise Exception

    # apply temporal filter
    if filter:
        b, a = ss.butter(**filterargs)
        data = ss.filtfilt(b, a, data, axis=-1)

    # subtract mean in each channel
    if ztransform:
        dataT = data.T - data.mean(axis=1)
        data = dataT.T

    zvec = -np.arange(data.shape[0])
    vlim = abs(data[:, tinds]).max()
    if vlimround is None:
        vlimround = 2.**np.round(np.log2(vlim)) / scaling_factor
    else:
        pass

    yticklabels = []
    yticks = []

    for i, z in enumerate(zvec):
        if i == 0:
            ax.plot(tvec[tinds],
                    data[i][tinds] / vlimround + z,
                    lw=1,
                    rasterized=False,
                    label=label,
                    clip_on=False,
                    color=color)
        else:
            ax.plot(tvec[tinds],
                    data[i][tinds] / vlimround + z,
                    lw=1,
                    rasterized=False,
                    clip_on=False,
                    color=color)
        yticklabels.append('ch. %i' % (i + 1))
        yticks.append(z)

    if scalebar:
        if scalebarbasis == 'log2':
            ax.plot([tvec[tinds][-1], tvec[tinds][-1]],
                    [-1 - scalebarpos, -2 - scalebarpos],
                    lw=2,
                    color=color,
                    clip_on=False)
            ax.text(tvec[tinds][-1] + np.diff(T) * 0.03,
                    -1.5 - scalebarpos,
                    '$2^{' + '{}'.format(int(np.log2(vlimround))) + '}$ ' +
                    '{0}'.format(unit),
                    color=color,
                    rotation='vertical',
                    va='center')
        elif scalebarbasis == 'log10':
            # recompute scale bar size to show it on scientific format
            vlimround10 = 10**np.round(np.log10(vlimround))
            if vlimround10 >= 1:
                vlimround10 = int(np.round(vlimround10))
            rescale = vlimround10 / vlimround
            ax.plot([tvec[tinds][-1], tvec[tinds][-1]],
                    np.array([0.5, -0.5]) * rescale - 1.5 - scalebarpos,
                    lw=2,
                    color=color,
                    clip_on=False)
            ax.text(tvec[tinds][-1] + np.diff(T) * 0.03,
                    -1.5 - scalebarpos,
                    '{0} '.format(vlimround10) + '{0}'.format(unit),
                    color=color,
                    rotation='vertical',
                    va='center')

    ax.axis(ax.axis('tight'))
    ax.yaxis.set_ticks(yticks)
    if ylabels:
        ax.yaxis.set_ticklabels(yticklabels)
        ax.set_ylabel('channel', labelpad=0.1)
    else:
        ax.yaxis.set_ticklabels([])
    remove_axis_junk(ax, lines=['right', 'top'])
    ax.set_xlabel(r'time (ms)', labelpad=0.1)

    return vlimround
Пример #48
0
def hp_flt_applied(smp_data, fs=2000000, passfreq=500, flt_order=3):
    b, a = butter_hp_flt(fs, passfreq, flt_order)
    w, h = signal.freqz(b, a, worN=int(fs / 2))
    p_paras = [passfreq, flt_order, w, abs(h)]
    p_flt_data = signal.filtfilt(b, a, smp_data)
    return p_flt_data
Пример #49
0
import pytest

import numpy as np
from numpy.testing import assert_array_almost_equal
from scipy.signal import filtfilt

from pylops.utils import dottest
from pylops.utils.wavelets import ricker
from pylops.avo.prestack import PrestackLinearModelling, PrestackWaveletModelling


# Create medium parameters for multiple contrasts
nt0 = 201
dt0 = 0.004
t0 = np.arange(nt0)*dt0
vp = 1200 + np.arange(nt0) + filtfilt(np.ones(5)/5., 1, np.random.normal(0, 80, nt0))
vs = 600 + vp/2 + filtfilt(np.ones(5)/5., 1, np.random.normal(0, 20, nt0))
rho = 1000 + vp + filtfilt(np.ones(5)/5., 1, np.random.normal(0, 30, nt0))
m = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)

# Angles
ntheta = 7
thetamin, thetamax = 0, 40
theta = np.linspace(thetamin, thetamax, ntheta)

# Wavelet
ntwav = 41
wav, twav, wavc = ricker(t0[:ntwav // 2 + 1], 20)

# Shifted wavelet
wavoff = 10
Пример #50
0
def low_pass_fliter(noisy_signal):
    b, a = signal.butter(3, .9, btype='lowpass', analog=False)
    low_passed = signal.filtfilt(b, a, noisy_signal)
    return low_passed
Пример #51
0
 def highpassfilter(self, data, cutoff, fs, order=5):
     b, a = self.butter_highpass(cutoff, fs, order=order)
     y = signal.filtfilt(b, a, data)
     return y
Пример #52
0
 def glove_filter(self, order=4, highcut=2):
     nyq = 0.5 * self.sRate['glove']
     high = highcut / nyq
     b, a = butter(N=order, Wn=high, btype='lowpass')
     self.glove = filtfilt(b=b, a=a, x=self.glove, axis=0)
Пример #53
0
def butter_lowpass_filtfilt(data, cutoff, fs, order=5):
    b, a = butter_lowpass(cutoff, fs, order=order)
    y = filtfilt(b, a, data)
    return y
Пример #54
0
 def lowpass_filter(self, data, cutoff, fs, order):
     normal_cutoff = cutoff / (fs / 2)
     b, a = butter(order, normal_cutoff, btype='low', output='ba')
     y = filtfilt(b, a, data)
     return np.asarray(y, dtype=np.int16)
Пример #55
0
def get_VTC_from_file(
    subject,
    run,
    files_list,
    cpt_blocs=[2, 3, 4, 5, 6, 7],
    inout_bounds=[25, 75],
    filt_cutoff=0.05,
    filt_type="gaussian",
):
    """Short summary.

    Parameters
    ----------
    subject : type
        Description of parameter `subject`.
    run : type
        Description of parameter `run`.
    files_list : type
        Description of parameter `files_list`.
    cpt_blocs : type
        Description of parameter `cpt_blocs`.
    inout_bounds : type
        Description of parameter `inout_bounds`.

    Returns
    -------
    type
        Description of returned object.

    """
    # Find the logfiles belonging to a subject
    subject_logfiles = []
    for bloc in cpt_blocs:
        subject_logfiles.append(
            op.join(LOGS_DIR, find_logfile(subject, bloc, files_list)))

    # Load and clean RT arrays
    RT_arrays = []
    RT_to_VTC = []
    for idx_file, logfile in enumerate(subject_logfiles):
        data = loadmat(logfile)
        df_response = pd.DataFrame(data["response"])

        # Replace commission errors by 0
        df_clean, perf_dict = clean_comerr(df_response)
        RT_raw = np.asarray(df_clean.loc[:, 4])
        RT_raw = np.array([x if x != 0 else np.nan
                           for x in RT_raw])  # zeros to nans
        # RT_interpolated = interpolate_RT(RT_raw)
        RT_arrays.append(RT_raw)
        if int(cpt_blocs[idx_file]) == int(run):
            RT_to_VTC = RT_raw
            performance_dict = perf_dict.copy()
            df_response_out = df_response

    # Obtain meand and std across runs
    allruns_RT_array = np.concatenate(RT_arrays)
    subj_mean = np.nanmean(allruns_RT_array)
    subj_std = np.nanstd(allruns_RT_array)

    # New VTC
    VTC_raw = compute_VTC(RT_to_VTC, subj_mean, subj_std)
    # VTC_thresholded = threshold_VTC(VTC_raw, thresh=3)  # Compute VTC remove variability values above threshold
    VTC_raw[np.isnan(VTC_raw)] = 0
    VTC_interpolated = interpolate_RT(VTC_raw)
    if filt_type == "gaussian":
        filt = signal.gaussian(len(VTC_interpolated), fwhm2sigma(9))
        VTC_filtered = np.convolve(VTC_interpolated, filt, "same") / sum(filt)
    elif filt_type == "butterworth":
        b, a = signal.butter(3, filt_cutoff)  # (filt_order,filt_cutoff)
        VTC_filtered = signal.filtfilt(b, a, VTC_interpolated)

    IN_mask = np.ma.masked_where(
        VTC_filtered >= np.quantile(VTC_filtered, inout_bounds[0] / 100),
        VTC_filtered)
    OUT_mask = np.ma.masked_where(
        VTC_filtered < np.quantile(VTC_filtered, inout_bounds[1] / 100),
        VTC_filtered)
    IN_idx = np.where(IN_mask.mask == False)[0]
    OUT_idx = np.where(OUT_mask.mask == False)[0]

    return (
        IN_idx,
        OUT_idx,
        VTC_raw,
        VTC_filtered,
        IN_mask,
        OUT_mask,
        performance_dict,
        df_response_out,
        RT_to_VTC,
    )
Пример #56
0
def performPrep(eeg, refChan, srate, linenoise, referenceType='robust'):
    dim = np.shape(eeg)
    if refChan != 0:
        eeg_chans = np.setdiff1d(
            range(0, dim[0]),
            refChan - 1)  #remove the reference channel from the eeg channels
        eeg = eeg[eeg_chans, :]

    #finding bad channels

    #finding channels with NaNs or constant values for long periods of time
    org_dim = np.shape(eeg)

    originalChannels = np.arange(org_dim[0])
    channelsInterpolate = originalChannels
    nanChannelMask = [False] * org_dim[0]
    noSignalChannelMask = [False] * org_dim[0]

    for i in range(0, org_dim[0]):
        nanChannelMask[i] = np.sum(np.isnan(eeg[i, :])) > 0
    for i in range(0, org_dim[0]):
        noSignalChannelMask[i] = robust.mad(eeg[i, :]) < 10**(-10) or np.std(
            eeg[i, :]) < 10**(-10)
    badChannelsfromNans = channelsInterpolate[nanChannelMask]
    badChannelsfromNoData = channelsInterpolate[noSignalChannelMask]
    for i in range(0, org_dim[0]):
        if nanChannelMask[i] == True or noSignalChannelMask[i] == True:
            eeg = np.delete(eeg, i, axis=0)

    channelsInterpolate = np.setdiff1d(
        channelsInterpolate,
        np.union1d(
            badChannelsfromNans,
            badChannelsfromNoData))  #channels to be used for interpolation
    evaluationChannels = channelsInterpolate
    new_dim = np.shape(eeg)

    # find channels that have abnormally high or low amplitude
    robustchanneldeviation = np.zeros(org_dim[0])
    badChannelFromDeviationMask = [False] * (new_dim[0])
    channeldeviation = np.zeros(new_dim[0])
    for i in range(0, new_dim[0]):
        channeldeviation[i] = 0.7413 * iqr(eeg[i, :])

    channeldeviationSD = 0.7413 * iqr(channeldeviation)
    channeldeviationMedian = np.nanmedian(channeldeviation)
    robustchanneldeviation[evaluationChannels] = np.divide(
        np.subtract(channeldeviation, channeldeviationMedian),
        channeldeviationSD)
    for i in range(0, new_dim[0]):
        badChannelFromDeviationMask[i] = abs(
            robustchanneldeviation[i]) > 5 or np.isnan(
                robustchanneldeviation[i])

    badChannelsfromDeviation = evaluationChannels[badChannelFromDeviationMask]

    #finding channels with high frequency noise
    if srate > 100:
        eeg = np.transpose(eeg)
        dim = np.shape(eeg)
        X = np.zeros((dim[0], dim[1]))
        B = filter_design(100,
                          A=np.array([1, 1, 0, 0]),
                          F=np.array([0, .36, 0.4, 1]),
                          srate=250)
        for i in range(0, dim[1]):
            X[:, i] = signal.filtfilt(B, 1, eeg[:, i])

        noisiness = np.divide(robust.mad(np.subtract(eeg, X)), robust.mad(X))
        noisinessmedian = np.nanmedian(noisiness)
        noiseSD = np.median(
            np.absolute(np.subtract(noisiness, np.median(noisiness)))) * 1.4826
        zscoreHFNoise = np.divide(np.subtract(noisiness, noisinessmedian),
                                  noiseSD)
        HFnoisemask = [False] * new_dim[0]
        for i in range(0, new_dim[0]):
            HFnoisemask[i] = zscoreHFNoise[i] > 5 or np.isnan(zscoreHFNoise[i])
    else:
        X = eeg
        noisinessmedian = 0
        noisinessSD = 1
        zscoreHFNoise = np.zeros(dim[1], 1)
        badChannelsfromHFnoise = []
    badChannelsfromHFnoise = evaluationChannels[HFnoisemask]
    #finding channels by correlation
    correlationSeconds = 1  # default value
    correlationFrames = correlationSeconds * srate
    correlationWindow = np.arange(correlationFrames)
    correlationOffsets = np.arange(1, dim[0] - correlationFrames,
                                   correlationFrames)
    Wcorrelation = len(correlationOffsets)
    maximumCorrelations = np.ones((org_dim[0], Wcorrelation))
    drop_out = np.zeros((dim[1], Wcorrelation))
    channelCorrelation = np.ones((Wcorrelation, dim[1]))
    noiselevels = np.zeros((Wcorrelation, dim[1]))
    channelDeviations = np.zeros((Wcorrelation, dim[1]))
    drop = np.zeros((Wcorrelation, dim[1]))
    n = len(correlationWindow)
    XWin = np.reshape(np.transpose(X[0:n * Wcorrelation, :]),
                      (dim[1], n, Wcorrelation),
                      order='F')
    dataWin = np.reshape(np.transpose(eeg[0:n * Wcorrelation, :]),
                         (dim[1], n, Wcorrelation),
                         order='F')
    for k in range(0, Wcorrelation):
        eegportion = np.transpose(np.squeeze(XWin[:, :, k]))
        dataportion = np.transpose(np.squeeze(dataWin[:, :, k]))
        windowCorrelation = np.corrcoef(np.transpose(eegportion))
        abs_corr = np.abs(
            np.subtract(windowCorrelation,
                        np.diag(np.diag(windowCorrelation))))
        channelCorrelation[k, :] = np.quantile(
            abs_corr, 0.98, axis=0)  # problem is here is solved
        noiselevels[k, :] = np.divide(
            robust.mad(np.subtract(dataportion, eegportion)),
            robust.mad(eegportion))
        channelDeviations[k, :] = 0.7413 * iqr(dataportion, axis=0)

    for i in range(0, Wcorrelation):
        for j in range(0, dim[1]):
            drop[i, j] = np.int(
                np.isnan(channelCorrelation[i, j])
                or np.isnan(noiselevels[i, j]))
            if drop[i, j] == 1:
                channelDeviations[i, j] = 0
                noiselevels[i, j] = 0

    maximumCorrelations[evaluationChannels, :] = np.transpose(
        channelCorrelation)
    drop_out[:] = np.transpose(drop)
    noiselevels_out = np.transpose(noiselevels)
    channelDeviations_out = np.transpose(channelDeviations)
    thresholdedCorrelations = maximumCorrelations < 0.4
    thresholdedCorrelations = thresholdedCorrelations.astype(int)
    fractionBadCorrelationWindows = np.mean(thresholdedCorrelations, axis=1)
    fractionBadDropOutWindows = np.mean(drop_out, axis=1)

    badChannelsFromCorrelation = np.where(fractionBadCorrelationWindows > 0.01)
    badChannelsFromCorrelation_out = badChannelsFromCorrelation[:]
    badChannelsFromDropOuts = np.where(fractionBadDropOutWindows > 0.01)
    badChannelsFromDropOuts_out = badChannelsFromDropOuts[:]
    #medianMaxCorrelation = np.median(maximumCorrelations, 2);

    badChannelsfromSNR = np.union1d(badChannelsFromCorrelation_out,
                                    badChannelsfromHFnoise)
    noisyChannels = np.union1d(
        np.union1d(
            np.union1d(
                badChannelsfromDeviation,
                np.union1d(badChannelsFromCorrelation_out,
                           badChannelsFromDropOuts_out)), badChannelsfromSNR),
        np.union1d(badChannelsfromNans, badChannelsfromNoData))
    print(noisyChannels)
def butter_lowpass_filter(_data, cutoff, fs, order=5):
    _b, _a = butter_lowpass(cutoff, fs, order=order)
    _y = filtfilt(_b, _a, _data)
    return _y
Пример #58
0
def main():
    xIMUdata = xIMU.xIMUdataClass(filePath, 'InertialMagneticSampleRate',
                                  1 / samplePeriod)
    time = xIMUdata.CalInertialAndMagneticData.Time
    gyrX = xIMUdata.CalInertialAndMagneticData.gyroscope[:, 0]
    gyrY = xIMUdata.CalInertialAndMagneticData.gyroscope[:, 1]
    gyrZ = xIMUdata.CalInertialAndMagneticData.gyroscope[:, 2]
    accX = xIMUdata.CalInertialAndMagneticData.accelerometer[:, 0]
    accY = xIMUdata.CalInertialAndMagneticData.accelerometer[:, 1]
    accZ = xIMUdata.CalInertialAndMagneticData.accelerometer[:, 2]

    indexSel = np.all([time >= startTime, time <= stopTime], axis=0)
    time = time[indexSel]
    gyrX = gyrX[indexSel]
    gyrY = gyrY[indexSel]
    gyrZ = gyrZ[indexSel]
    accX = accX[indexSel]
    accY = accY[indexSel]
    accZ = accZ[indexSel]

    # Compute accelerometer magnitude
    acc_mag = np.sqrt(accX * accX + accY * accY + accZ * accZ)

    # HP filter accelerometer data
    filtCutOff = 0.001
    b, a = signal.butter(1, (2 * filtCutOff) / (1 / samplePeriod), 'highpass')
    acc_magFilt = signal.filtfilt(b,
                                  a,
                                  acc_mag,
                                  padtype='odd',
                                  padlen=3 * (max(len(b), len(a)) - 1))

    # Compute absolute value
    acc_magFilt = np.abs(acc_magFilt)

    # LP filter accelerometer data
    filtCutOff = 5
    b, a = signal.butter(1, (2 * filtCutOff) / (1 / samplePeriod), 'lowpass')
    acc_magFilt = signal.filtfilt(b,
                                  a,
                                  acc_magFilt,
                                  padtype='odd',
                                  padlen=3 * (max(len(b), len(a)) - 1))

    # Threshold detection
    stationary = acc_magFilt < 0.05

    fig = plt.figure(figsize=(10, 5))
    ax1 = fig.add_subplot(2, 1, 1)
    ax2 = fig.add_subplot(2, 1, 2)
    ax1.plot(time, gyrX, c='r', linewidth=0.5)
    ax1.plot(time, gyrY, c='g', linewidth=0.5)
    ax1.plot(time, gyrZ, c='b', linewidth=0.5)
    ax1.set_title("gyroscope")
    ax1.set_xlabel("time (s)")
    ax1.set_ylabel("angular velocity (degrees/s)")
    ax1.legend(["x", "y", "z"])
    ax2.plot(time, accX, c='r', linewidth=0.5)
    ax2.plot(time, accY, c='g', linewidth=0.5)
    ax2.plot(time, accZ, c='b', linewidth=0.5)
    ax2.plot(time, acc_magFilt, c='k', linestyle=":", linewidth=1)
    ax2.plot(time, stationary, c='k')
    ax2.set_title("accelerometer")
    ax2.set_xlabel("time (s)")
    ax2.set_ylabel("acceleration (g)")
    ax2.legend(["x", "y", "z"])
    plt.show(block=False)

    # Compute orientation
    quat = np.zeros((time.size, 4), dtype=np.float64)

    # initial convergence
    initPeriod = 2
    indexSel = time <= time[0] + initPeriod
    gyr = np.zeros(3, dtype=np.float64)
    acc = np.array([
        np.mean(accX[indexSel]),
        np.mean(accY[indexSel]),
        np.mean(accZ[indexSel])
    ])
    mahony = ahrs.filters.Mahony(Kp=1,
                                 Ki=0,
                                 KpInit=1,
                                 frequency=1 / samplePeriod)
    q = np.array([1.0, 0.0, 0.0, 0.0], dtype=np.float64)
    for i in range(0, 2000):
        q = mahony.updateIMU(q, gyr=gyr, acc=acc)

    # For all data
    for t in range(0, time.size):
        if (stationary[t]):
            mahony.Kp = 0.5
        else:
            mahony.Kp = 0
        gyr = np.array([gyrX[t], gyrY[t], gyrZ[t]]) * np.pi / 180
        acc = np.array([accX[t], accY[t], accZ[t]])
        quat[t, :] = mahony.updateIMU(q, gyr=gyr, acc=acc)

    # -------------------------------------------------------------------------
    # Compute translational accelerations

    # Rotate body accelerations to Earth frame
    acc = []
    for x, y, z, q in zip(accX, accY, accZ, quat):
        acc.append(q_rot(np.array([x, y, z]), q_conj(q)))
    acc = np.array(acc)
    acc = acc - np.array([0, 0, 1])
    acc = acc * 9.81

    # Compute translational velocities
    # acc[:,2] = acc[:,2] - 9.81

    # acc_offset = np.zeros(3)
    vel = np.zeros(acc.shape)
    for t in range(1, vel.shape[0]):
        vel[t, :] = vel[t - 1, :] + acc[t, :] * samplePeriod
        if stationary[t] == True:
            vel[t, :] = np.zeros(3)

    # Compute integral drift during non-stationary periods
    velDrift = np.zeros(vel.shape)
    stationaryStart = np.where(np.diff(stationary.astype(int)) == -1)[0] + 1
    stationaryEnd = np.where(np.diff(stationary.astype(int)) == 1)[0] + 1
    for i in range(0, stationaryEnd.shape[0]):
        driftRate = vel[stationaryEnd[i] - 1, :] / (stationaryEnd[i] -
                                                    stationaryStart[i])
        enum = np.arange(0, stationaryEnd[i] - stationaryStart[i])
        drift = np.array(
            [enum * driftRate[0], enum * driftRate[1], enum * driftRate[2]]).T
        velDrift[stationaryStart[i]:stationaryEnd[i], :] = drift

    # Remove integral drift
    vel = vel - velDrift
    fig = plt.figure(figsize=(10, 5))
    plt.plot(time, vel[:, 0], c='r', linewidth=0.5)
    plt.plot(time, vel[:, 1], c='g', linewidth=0.5)
    plt.plot(time, vel[:, 2], c='b', linewidth=0.5)
    plt.legend(["x", "y", "z"])
    plt.title("velocity")
    plt.xlabel("time (s)")
    plt.ylabel("velocity (m/s)")
    plt.show(block=False)

    # -------------------------------------------------------------------------
    # Compute translational position
    pos = np.zeros(vel.shape)
    for t in range(1, pos.shape[0]):
        pos[t, :] = pos[t - 1, :] + vel[t, :] * samplePeriod

    fig = plt.figure(figsize=(10, 5))
    plt.plot(time, pos[:, 0], c='r', linewidth=0.5)
    plt.plot(time, pos[:, 1], c='g', linewidth=0.5)
    plt.plot(time, pos[:, 2], c='b', linewidth=0.5)
    plt.legend(["x", "y", "z"])
    plt.title("position")
    plt.xlabel("time (s)")
    plt.ylabel("position (m)")
    plt.show(block=False)

    # -------------------------------------------------------------------------
    # Plot 3D foot trajectory

    posPlot = pos
    quatPlot = quat

    extraTime = 20
    onesVector = np.ones(int(extraTime * (1 / samplePeriod)))

    # Create 6 DOF animation
    fig = plt.figure(figsize=(7, 7))
    ax = fig.add_subplot(111, projection='3d')  # Axe3D object
    ax.plot(posPlot[:, 0], posPlot[:, 1], posPlot[:, 2])
    min_, max_ = np.min(np.min(posPlot,
                               axis=0)), np.max(np.max(posPlot, axis=0))
    ax.set_xlim(min_, max_)
    ax.set_ylim(min_, max_)
    ax.set_zlim(min_, max_)
    ax.set_title("trajectory")
    ax.set_xlabel("x position (m)")
    ax.set_ylabel("y position (m)")
    ax.set_zlabel("z position (m)")
    plt.show(block=False)

    plt.show()
 def _butter_bandpass_filter(self, signal, rate, low, hi, order=6):
     """butterwoth bandpass filter"""
     b, a = self._butter_bandpass(low, hi, rate, order=order)
     y = filtfilt(b, a, signal)
     return y
Пример #60
0
def filter(dat, fc, del_t):
    
    b, a = signal.butter(5, fc, 'low', analog=False, fs=1/del_t)
    xd = signal.filtfilt(b, a, dat)

    return xd