Exemplo n.º 1
0
def initialize_online_notchfilter(fsample, fnotch, quality, x, axis=-1):
    nyquist = fsample / 2.
    ndim = len(x.shape)
    axis = axis % ndim

    if fnotch != None:
        fnotch = fnotch / nyquist
        if fnotch < 0.001:
            fnotch = None
        elif fnotch > 0.999:
            fnotch = None

    if not (fnotch == None) and (quality > 0):
        print('using NOTCH filter', [fnotch, quality])
        b, a = iirnotch(fnotch, quality)
    else:
        # no filtering at all
        print('using IDENTITY filter', [fnotch, quality])
        b = np.ones(1)
        a = np.ones(1)

    # initialize the state for the filtering based on the previous data
    if ndim == 1:
        zi = zi = lfiltic(b, a, x, x)
    elif ndim == 2:
        f = lambda x: lfiltic(b, a, x, x)
        zi = np.apply_along_axis(f, axis, x)

    return b, a, zi
Exemplo n.º 2
0
def initialize_online_filter(fsample, highpass, lowpass, order, x, axis=-1):
    # boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann
    filtwin = 'nuttall'
    nyquist = fsample / 2.
    ndim = len(x.shape)
    axis = axis % ndim

    if highpass != None:
        highpass = highpass / nyquist
        if highpass < 0.001:
            print('Warning: highpass is too low, disabling')
            highpass = None
        elif highpass > 0.999:
            print('Warning: highpass is too high, disabling')
            highpass = None

    if lowpass != None:
        lowpass = lowpass / nyquist
        if lowpass < 0.001:
            print('Warning: lowpass is too low, disabling')
            lowpass = None
        elif lowpass > 0.999:
            print('Warning: lowpass is too low, disabling')
            lowpass = None

    if not (highpass is None) and not (lowpass is
                                       None) and highpass >= lowpass:
        # totally blocking all signal
        print('using NULL filter', [highpass, lowpass, order])
        b = np.zeros(order)
        a = np.ones(1)
    elif not (lowpass is None) and (highpass is None):
        print('using lowpass filter', [highpass, lowpass, order])
        b = firwin(order, cutoff=lowpass, window=filtwin, pass_zero=True)
        a = np.ones(1)
    elif not (highpass is None) and (lowpass is None):
        print('using highpass filter', [highpass, lowpass, order])
        b = firwin(order, cutoff=highpass, window=filtwin, pass_zero=False)
        a = np.ones(1)
    elif not (highpass is None) and not (lowpass is None):
        print('using bandpass filter', [highpass, lowpass, order])
        b = firwin(order,
                   cutoff=[highpass, lowpass],
                   window=filtwin,
                   pass_zero=False)
        a = np.ones(1)
    else:
        # no filtering at all
        print('using IDENTITY filter', [highpass, lowpass, order])
        b = np.ones(1)
        a = np.ones(1)

    # initialize the state for the filtering based on the previous data
    if ndim == 1:
        zi = zi = lfiltic(b, a, x, x)
    elif ndim == 2:
        f = lambda x: lfiltic(b, a, x, x)
        zi = np.apply_along_axis(f, axis, x)

    return b, a, zi
Exemplo n.º 3
0
 def __init__(self, order=2, freq=0.7,y=[],x=[]):
      self.b,self.a=iirfilter(order, freq, btype="lowpass")
      if len(y)>0:
          print "here"
          self.z=lfiltic(self.b,self.a, y)
      else:
          self.z=array([0.]*order)
      self.z=lfiltic(self.b,self.a, y,x=x)
Exemplo n.º 4
0
def smooth(time, vals, dt=None, gapFactor=20, T=T_M2):
    from scipy import signal
    if dt is None:
        dt = np.diff(time).mean()
    ta = timeArray.timeArray(time, 'epoch')
    # try to calculate exact dt by omitting large gaps
    gaps, ranges, t = ta.detectGaps(dt=dt, gapFactor=gapFactor)
    diff = []
    for i in range(ranges.shape[0]):
        twin = time[ranges[i, 0]:ranges[i, 1]]
        diff.append(np.diff(twin))
    diff = np.concatenate(tuple(diff), axis=0)
    dt = diff.mean()
    # filter design, low-pass butterworth
    T0 = (2 * dt)  # period of Nyquist frequency
    Tpass = 8 * T  # period of pass frequency
    Gpass = 3.0       # max dB loss in pass band
    Tstop = 1 * T  # period of stop frequency
    Gstop = 30.0     # min dB atennuation in stop band
    o, Wn = signal.buttord(T0 / Tpass, T0 / Tstop, Gpass, Gstop)
    if o < 0:
        raise Exception(
            'Cannot create tidal filter. Data sampling frequency may be too low, dt=' +
            str(dt))
    b, a = signal.butter(o, Wn, 'low')
    newvals = []
    newtime = []
    # filter each contiquous data range separately
    for i in range(ranges.shape[0]):
        twin = time[ranges[i, 0]:ranges[i, 1]]
        vwin = vals[ranges[i, 0]:ranges[i, 1]]
        if len(vwin) > 3 * len(a):
            try:
                # default forward-backward filter
                # filtered = signal.filtfilt(b, a, vwin, padtype='constant')
                # forward-backward filter with custom boundary conditions
                # pad with mean of 1/2 pass window lenght
                N_init = int(np.ceil(Tpass / dt / 2 / 4))
                # forward filter
                x_init = vwin[:N_init]
                y_init = x_init.mean() * np.ones_like(x_init)
                z_init = signal.lfiltic(b, a, y_init, x_init)
                filtered, _ = signal.lfilter(b, a, vwin, zi=z_init)
                # backward filter
                x_init = vwin[-N_init:][::-1]
                y_init = x_init.mean() * np.ones_like(x_init)
                z_init = signal.lfiltic(b, a, y_init, x_init)
                filtered, _ = signal.lfilter(b, a, filtered[::-1], zi=z_init)
                filtered = filtered[::-1]
                newvals.append(filtered)
                newtime.append(twin)
            except Exception as e:
                print a.shape, vwin.shape
                raise e
    newvals = np.concatenate(tuple(newvals), axis=0)
    newtime = np.concatenate(tuple(newtime), axis=0)
    return newtime, newvals
Exemplo n.º 5
0
 def test_lfiltic_bad_zi(self):
     # Regression test for #3699: bad initial conditions
     a = np.ones(1).astype(self.dt)
     b = np.ones(1).astype(self.dt)
     # "y" sets the datatype of zi, so it truncates if int
     zi = lfiltic(b, a, [1., 0])
     zi_1 = lfiltic(b, a, [1, 0])
     zi_2 = lfiltic(b, a, [True, False])
     assert_array_equal(zi, zi_1)
     assert_array_equal(zi, zi_2)
Exemplo n.º 6
0
 def test_lfiltic_bad_zi(self):
     # Regression test for #3699: bad initial conditions
     a = np.ones(1).astype(self.dt)
     b = np.ones(1).astype(self.dt)
     # "y" sets the datatype of zi, so it truncates if int
     zi = lfiltic(b, a, [1., 0])
     zi_1 = lfiltic(b, a, [1, 0])
     zi_2 = lfiltic(b, a, [True, False])
     assert_array_equal(zi, zi_1)
     assert_array_equal(zi, zi_2)
Exemplo n.º 7
0
def initialize_online_filter(fsample, highpass, lowpass, order, x, axis=-1):
    # boxcar, triang, blackman, hamming, hann, bartlett, flattop, parzen, bohman, blackmanharris, nuttall, barthann
    filtwin = 'nuttall'
    nyquist = fsample / 2.
    ndim = len(x.shape)
    axis = axis % ndim

    if highpass != None:
        highpass = highpass/nyquist
        if highpass < 0.01:
            highpass = None
        elif highpass > 0.99:
            highpass = None

    if lowpass != None:
        lowpass = lowpass/nyquist
        if lowpass < 0.01:
            lowpass = None
        elif lowpass > 0.99:
            lowpass = None

    if not(highpass is None) and not(lowpass is None) and highpass>=lowpass:
        # totally blocking all signal
        print 'using NULL filter', [highpass, lowpass]
        b = np.zeros(window)
        a = np.ones(1)
    elif not(lowpass is None) and (highpass is None):
        print 'using lowpass filter', [highpass, lowpass]
        b = firwin(order, cutoff = lowpass, window = filtwin, pass_zero = True)
        a = np.ones(1)
    elif not(highpass is None) and (lowpass is None):
        print 'using highpass filter', [highpass, lowpass]
        b = firwin(order, cutoff = highpass, window = filtwin, pass_zero = False)
        a = np.ones(1)
    elif not(highpass is None) and not(lowpass is None):
        print 'using bandpass filter', [highpass, lowpass]
        b = firwin(order, cutoff = [highpass, lowpass], window = filtwin, pass_zero = False)
        a = np.ones(1)
    else:
        # no filtering at all
        print 'using IDENTITY filter', [highpass, lowpass]
        b = np.ones(1)
        a = np.ones(1)

    # initialize the state for the filtering based on the previous data
    if ndim == 1:
        zi = zi = lfiltic(b, a, x, x)
    elif ndim == 2:
        f = lambda x : lfiltic(b, a, x, x)
        zi = np.apply_along_axis(f, axis, x)

    return b, a, zi
Exemplo n.º 8
0
def test_lfiltic():
    # this would return f32 when given a mix of f32 / f64 args
    b_f32 = np.array([1, 2, 3], dtype=np.float32)
    a_f32 = np.array([4, 5, 6], dtype=np.float32)
    x_f32 = np.ones(32, dtype=np.float32)

    b_f64 = b_f32.astype(np.float64)
    a_f64 = a_f32.astype(np.float64)
    x_f64 = x_f32.astype(np.float64)

    assert_(lfiltic(b_f64, a_f32, x_f32).dtype == np.float64)
    assert_(lfiltic(b_f32, a_f64, x_f32).dtype == np.float64)
    assert_(lfiltic(b_f32, a_f32, x_f64).dtype == np.float64)
    assert_(lfiltic(b_f32, a_f32, x_f32, x_f64).dtype == np.float64)
Exemplo n.º 9
0
    def runController(self, error, error_dot, ref_dot_feedfwd):
        if self.z_state is None:
            self.z_state = sps.lfiltic(self.tf_state['num'], self.tf_state['den'], y=np.zeros_like(self.tf_state['den'])) # initial filter delays
            self.z_state = np.tile(self.z_state, error.shape)
            self.z_state_dot = sps.lfiltic(self.tf_state_dot['num'], self.tf_state_dot['den'], y=np.zeros_like(self.tf_state_dot['den']))
            self.z_state_dot = np.tile(self.z_state_dot, error_dot.shape)

        u_state, self.z_state = sps.lfilter(self.tf_state['num'], self.tf_state['den'], error, axis=1, zi=self.z_state)
        u_state_dot, self.z_state_dot = sps.lfilter(self.tf_state_dot['num'], self.tf_state_dot['den'], error_dot, axis=1, zi=self.z_state_dot)
        u = u_state + u_state_dot + ref_dot_feedfwd

        v_cmd = u[0:2, 0]
        omega_cmd = u[2, 0]

        return v_cmd, omega_cmd
Exemplo n.º 10
0
    def test_2d_active(self):
        shape = self.shape2D
        known_data = np.random.normal(size=shape).astype(np.float32).view(
            np.complex64)
        idata = bf.ndarray(known_data, space='cuda_managed')
        odata = bf.empty_like(idata)
        coeffs = self.coeffs * 1.0
        coeffs.shape += (1, )
        coeffs = np.repeat(coeffs, idata.shape[1], axis=1)
        coeffs.shape = (coeffs.shape[0], idata.shape[1])
        coeffs = bf.ndarray(coeffs, space='cuda_managed')

        fir = Fir()
        fir.init(coeffs, 1)
        fir.execute(idata, odata)
        fir.execute(idata, odata)
        stream_synchronize()

        for i in range(known_data.shape[1]):
            zf = lfiltic(self.coeffs, 1.0, 0.0)
            known_result, zf = lfilter(self.coeffs,
                                       1.0,
                                       known_data[:, i],
                                       zi=zf)
            known_result, zf = lfilter(self.coeffs,
                                       1.0,
                                       known_data[:, i],
                                       zi=zf)
            compare(odata[:, i], known_result)
Exemplo n.º 11
0
    def process(self, data):
        """Applies the filter to the input.

        Parameters
        ----------
        data : ndarray, shape (n_channels, n_samples)
            Input signals.
        """
        if data.ndim != 2:
            raise ValueError("data must be 2-dimensional.")

        if self._x_prev is None:
            # first pass has no initial conditions
            out = signal.lfilter(self.b, self.a, data, axis=-1)
        else:
            # subsequent passes get ICs from previous input/output
            num_ch = data.shape[0]
            K = max(len(self.a)-1, len(self.b)-1)
            self._zi = np.zeros((num_ch, K))

            # unfortunately we have to get zi channel by channel
            for c in range(data.shape[0]):
                self._zi[c, :] = signal.lfiltic(
                    self.b,
                    self.a,
                    self._y_prev[c, -(self.overlap+1)::-1],
                    self._x_prev[c, -(self.overlap+1)::-1])

            out, zf = signal.lfilter(self.b, self.a, data, axis=-1,
                                     zi=self._zi)

        self._x_prev = data
        self._y_prev = out

        return out
Exemplo n.º 12
0
    def __init__( self, band_start, band_stop ):
        nyquist_frequency = float(SAMPLES_PER_SECOND) / 2.0
        
        band_start /= nyquist_frequency
        band_stop /= nyquist_frequency

        assert( band_start >= 0 and band_start <= 1 )
        assert( band_stop >= 0 and band_stop <= 1 )
        assert( band_stop >= band_start )

        passband_edges = []
        stopband_edges = []

        if band_start >= 0.05: # if not, make LPF only
            passband_edges.append( band_start * 1.025 )
            stopband_edges.append( band_start * 0.975 )

        if band_stop <= 0.95: # if not, make HPF only
            passband_edges.append( band_stop * 0.975 )
            stopband_edges.append( band_stop * 1.025 )

        (self.feedforward_taps,
         self.feedback_taps) = iirdesign( passband_edges,
                                          stopband_edges,
                                          0.1,               # max attenuation (dB) in passband
                                          30 )               # min attenuation (dB) in stopband

        self.filter_state = lfiltic( self.feedforward_taps, self.feedback_taps, [] )
Exemplo n.º 13
0
    def geth(self, params):
        '''

        Parameters
        ----------
        params : tuple, (ar, ma)
            try to keep the params conversion in loglike

        copied from generate_gjrgarch
        needs to be extracted to separate function
        '''
        #mu, ar, ma = params
        ar, ma, mu = params

        #etax = self.endog  #this would be enough for basic garch version
        etax = self._etax + mu
        icetax = self._icetax  #read ic-eta-x, initial condition

        #TODO: where does my go with lfilter ?????????????
        #      shouldn't matter except for interpretation

        nobs = etax.shape[0]

        #check arguments of lfilter
        zi = signal.lfiltic(ma,ar, icetax)
        #h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
        #just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
        h = signal.lfilter(ma, ar, etax, zi=zi)[0]
        return h
Exemplo n.º 14
0
    def __init__(self, band_start, band_stop):
        nyquist_frequency = float(SAMPLES_PER_SECOND) / 2.0

        band_start /= nyquist_frequency
        band_stop /= nyquist_frequency

        assert (band_start >= 0 and band_start <= 1)
        assert (band_stop >= 0 and band_stop <= 1)
        assert (band_stop >= band_start)

        passband_edges = []
        stopband_edges = []

        if band_start >= 0.05:  # if not, make LPF only
            passband_edges.append(band_start * 1.025)
            stopband_edges.append(band_start * 0.975)

        if band_stop <= 0.95:  # if not, make HPF only
            passband_edges.append(band_stop * 0.975)
            stopband_edges.append(band_stop * 1.025)

        (self.feedforward_taps, self.feedback_taps) = iirdesign(
            passband_edges,
            stopband_edges,
            0.1,  # max attenuation (dB) in passband
            30)  # min attenuation (dB) in stopband

        self.filter_state = lfiltic(self.feedforward_taps, self.feedback_taps,
                                    [])
Exemplo n.º 15
0
    def _design_filter(self, frequency, type, slice):
        ''' Create filter settings for channel groups with equal filter parameters
        @param frequency: filter frequeny in Hz
        @param type: filter type, "low", "high" or "bandstop"
        @param slice: channel group indices
        @return: filter parameters and state vector
        '''
        self.shm.eegchs = slice.stop

        if (frequency == 0.0) or (frequency > self.samplefreq / 2.0):
            return None
        if type == "bandstop":
            cut1 = (frequency - 1.0) / self.samplefreq * 2.0
            cut2 = (frequency + 1.0) / self.samplefreq * 2.0
            b, a = signal.filter_design.iirfilter(2, [cut1, cut2],
                                                  btype=type,
                                                  ftype='butter')
            #b,a = signal.filter_design.iirfilter(2, [cut1, cut2], rs=40.0, rp=0.5, btype=type, ftype='elliptic')
        else:
            cut = frequency / self.samplefreq * 2.0
            b, a = signal.filter_design.butter(self.filterorder,
                                               cut,
                                               btype=type)
        zi = signal.lfiltic(b, a, (0.0, ))
        czi = np.resize(zi, (slice.stop - slice.start, len(zi)))
        return {
            'slice': slice,
            'a': a,
            'b': b,
            'zi': czi,
            'frequency': frequency
        }
Exemplo n.º 16
0
    def update_plots(self, fs, data):
        self.current_update += 1
        data = signal.detrend(data.ravel())

        # Plot RMS
        if self._coefs is None:
            self._coefs = signal.iirfilter(2, (400.0/(fs/2), 40e3/(fs/2)))
            b, a = self._coefs
            self._zf = signal.lfiltic(b, a, data[:len(a)-1], data[:len(b)-1])
        b, a = self._coefs

        data, self._zf = signal.lfilter(b, a, data, zi=self._zf)
        rms = np.mean(data**2)**0.5
        db_rms = db(rms)-self.paradigm.mic_sens_dbv-db(20e-6)
        self.append_data(time=self.current_time, rms=db_rms)
        self.current_time += len(data)/fs

        self.current_spl = db_rms
        self.current_spl_average = self.rms_data.get_data('rms')[-60:].mean()
        self.overall_spl_average = self.rms_data.get_data('rms').mean()

        w_frequency = psd_freq(data, fs)
        w_psd = psd(data, fs, 'hamming')
        w_psd_db = db(w_psd)-self.paradigm.mic_sens_dbv-db(20e-6)
        self.rms_data.update_data(frequency=w_frequency, psd=w_psd_db)
Exemplo n.º 17
0
def exponential_moving_average_v3(series: pd.Series,
                                  alpha: float = 0.0,
                                  min_periods: int = 5):
    """
    The exponential moving average (EMA) is a technical indicator
    that tracks the price of an investment (like a stock or commodity)
    over time. The EMA is a type of weighted moving average (WMA) that gives
    more weighting or importance to more recent price data.

    Args:
        values (pd.Series): closing price 
        alpha (float, optional): smoothing factor, usually between .1 - .3 Defaults to 2/(N+1).
        min_periods (int, optional): periods for window function. Defaults to 5.

    Returns:
        EMA (pd.Series): series named EMA 
    """
    if not alpha:
        alpha = 2 / (min_periods + 1)

    values = series.to_numpy()
    b = [alpha]
    a = [1, alpha - 1]
    zi = lfiltic(b, a, values[0:1], [0])
    output = pd.Series(lfilter(b, a, values, zi=zi)[0])
    output.index = series.index
    output.rename('EMA', inplace=True)
    return output
Exemplo n.º 18
0
def N2OEmssionstoConcs(emissions):
    '''
    This function converts nitrous oxide (|N2O|) emissions into concentrations.
    
    :param emissions: |N2O| emissions [TgN2O/year]
    :returns: numpy.array -- containing the |N2O| concentrations for each year [ppb]
    '''
    tauN2O = 114.0  # Lifetime of N2O
    lamN2O = 1.0 / tauN2O  # inverse lifetime in years-1
    scaleN2O = 4.8  # TgN2O per ppb (IPCC TAR report value, chapter 4)

    Result = np.zeros(len(emissions['N2O']))
    decay = np.exp(-lamN2O)
    accum = (1.0 - decay) / (lamN2O * scaleN2O)
    #v = np.arange(1,len(emissions['N2O']))
    #for i in range(1,len(emissions['N2O'])):
    #    Result[i] = Result[i-1] * decay + emissions['N2O'][i-1] * accum
    #Result[v] = Result[v-1] * decay + emissions['N2O'][v-1] * accum

    # 'Result' is the output of a linear filter - see:
    # https://stackoverflow.com/questions/21336794/python-recursive-vectorization-with-timeseries/21338665#21338665
    # filter soln
    b = np.array([0., accum])
    a = np.array([1., -1. * decay])
    zi = lfiltic(b,
                 a, [0, emissions['N2O'][0] * accum],
                 x=emissions['N2O'][1::-1])
    y = np.empty_like(emissions['N2O'])
    y[:2] = [0, emissions['N2O'][0] * accum]
    y[2:], zo = lfilter(b, a, emissions['N2O'][2:], zi=zi)
    return y
Exemplo n.º 19
0
def CH4EmssionstoConcs(emissions):
    '''
    This function converts methane (|CH4|) emissions into concentrations.
    
    :param emissions: |CH4| emissions [TgCH4/year]
    :returns: numpy.array -- containing the |CH4| concentrations for each year [ppb]
    '''
    TauCH4 = 10.0  # Lifetime of CH4
    LamCH4 = 1.0 / TauCH4  # inverse lifetime in years-1
    scaleCH4 = 2.78  # TgCH4 per ppb (IPCC TAR report value, chapter 4)

    Result = np.zeros(len(emissions['CH4']))
    decay = np.exp(-LamCH4)
    accum = (1.0 - decay) / (LamCH4 * scaleCH4)
    #for i in range(1,len(emissions['CH4'])):
    #    Result[i] = Result[i-1] * decay + emissions['CH4'][i-1] * accum

    # filter soln
    b = np.array([0., accum])
    a = np.array([1., -1. * decay])
    zi = lfiltic(b,
                 a, [0, emissions['CH4'][0] * accum],
                 x=emissions['CH4'][1::-1])
    y = np.empty_like(emissions['CH4'])
    y[:2] = [0, emissions['CH4'][0] * accum]
    y[2:], zo = lfilter(b, a, emissions['CH4'][2:], zi=zi)

    return y
Exemplo n.º 20
0
    def geth(self, params):
        '''

        Parameters
        ----------
        params : tuple, (ar, ma)
            try to keep the params conversion in loglike

        copied from generate_gjrgarch
        needs to be extracted to separate function
        '''
        #mu, ar, ma = params
        ar, ma, mu = params

        #etax = self.endog  #this would be enough for basic garch version
        etax = self._etax + mu
        icetax = self._icetax  #read ic-eta-x, initial condition

        #TODO: where does my go with lfilter ?????????????
        #      should not matter except for interpretation

        nobs = etax.shape[0]

        #check arguments of lfilter
        zi = signal.lfiltic(ma,ar, icetax)
        #h = signal.lfilter(ar, ma, etax, zi=zi) #np.atleast_1d(etax[:,1].mean()))
        #just guessing: b/c ValueError: BUG: filter coefficient a[0] == 0 not supported yet
        h = signal.lfilter(ma, ar, etax, zi=zi)[0]
        return h
Exemplo n.º 21
0
    def applyOn(self, x):
        '''Apply the filter to a given array of signal

        Args:
            x (:obj:`numpy array`): The signal array on which the filter needs to be applied

        Returns:
            :obj:`numpy array`: Filtered signal array
        '''

        if self.__storeState:

            if self.__zi is None:
                self.__zi = signal.lfiltic(self.__b, self.__a, x,
                                           self.__initOut)

            retDat, self.__zi = signal.lfilter(self.__b,
                                               self.__a,
                                               x,
                                               zi=self.__zi)
            return retDat
        else:
            if self.__zeroPhase:
                return signal.filtfilt(self.__b, self.__a, x)
            else:
                return signal.lfilter(self.__b, self.__a, x)
Exemplo n.º 22
0
    def process(self, data):
        if self.xPrev is None:
            # first pass has no initial conditions
            out = signal.lfilter(
                self.b, self.a, data, axis=0)
        else:
            # subsequent passes get ICs from previous input/output
            nCh = data.shape[1]
            K = max(len(self.a)-1, len(self.b)-1)
            self.zi = np.zeros((K, nCh))
            # unfortunately we have to get zi channel by channel
            for c in range(data.shape[1]):
                self.zi[:, c] = signal.lfiltic(
                    self.b,
                    self.a,
                    self.yPrev[-(self.overlap+1)::-1, c],
                    self.xPrev[-(self.overlap+1)::-1, c])

            out, zf = signal.lfilter(
                self.b, self.a, data, axis=0, zi=self.zi)

        self.xPrev = data
        self.yPrev = out

        return out
Exemplo n.º 23
0
def filtered_deriv(y, x, tau=0):
    """Numerical derivative with optional lowpass filter.

    tau is the filter time constant expressed in same units as x (eg seconds if x is
    time).
    """
    dy = np.empty(y.shape)
    dy[0] = (y[1] - y[0]) / (x[1] - x[0])
    for i in range(1, (dy.shape[0] - 1)):
        dy[i] = (y[i + 1] - y[i - 1]) / (x[i + 1] - x[i - 1])
    dy[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2])

    # No filter
    if tau <= 0:
        return dy

    # sample freq
    fs = x.shape[0] / (x[-1] - x[0])
    nyqfreq = fs / 2
    w0 = 1 / (tau * 2 * np.pi * nyqfreq)

    lowpass = signal.iirfilter(1, w0, btype='lowpass', analog=False)

    # Create initial conditions so y=0 at t=0
    zi = signal.lfiltic(*lowpass, y=[0], x=dy[:1])

    filtered, _ = signal.lfilter(*lowpass, dy, zi=-zi)
    return filtered
Exemplo n.º 24
0
    def test_3d_initial(self):
        shape = self.shape3D
        known_data = np.random.normal(size=shape).astype(np.float32).view(
            np.complex64)
        idata = bf.ndarray(known_data, space='cuda')
        odata = bf.empty_like(idata)
        coeffs = self.coeffs * 1.0
        coeffs.shape += (1, )
        coeffs = np.repeat(coeffs, idata.shape[1] * idata.shape[2], axis=1)
        coeffs.shape = (coeffs.shape[0], idata.shape[1], idata.shape[2])
        coeffs = bf.ndarray(coeffs, space='cuda')

        fir = Fir()
        fir.init(coeffs, 1)
        fir.execute(idata, odata)
        odata = odata.copy('system')

        for i in range(known_data.shape[1]):
            for j in range(known_data.shape[2]):
                zf = lfiltic(self.coeffs, 1.0, 0.0)
                known_result, zf = lfilter(self.coeffs,
                                           1.0,
                                           known_data[:, i, j],
                                           zi=zf)
                compare(odata[:, i, j], known_result)
Exemplo n.º 25
0
def plot_result(x, n_p, n_d):
    b = x
    n = np.arange(n_d + 1)
    y = np.exp(b[0] * n) - np.exp(b[1] * n)
    n_max = np.log(b[0] / b[1]) / (b[1] - b[0])
    y_max = np.exp(b[0] * n_max) - np.exp(b[1] * n_max)
    print("y_max: " + str(y_max))
    #y_max=np.max(y)
    y_arg_max = np.argmax(y)
    y /= y_max
    print("Value at n_p: %f" % (20 * np.log10(y[n_p]), ))
    print("Value at n_d: %f" % (20 * np.log10(y[n_d]), ))
    print("Location of peak: %d" % (y_arg_max, ))
    print("Desired peak: %d" % (n_p, ))
    print("n_d: %d" % (n_d, ))
    s = np.zeros_like(y)
    s[0] = 1
    b_, a_ = get_filter_coeffs(b, 1 / y_max)
    print("b_: " + str(b_))
    print("a_: " + str(a_))
    y_f, v_n = signal.lfilter(b_, a_, s, zi=[0, 0])
    print("v_n_: " + str(signal.lfiltic(b_, a_, y_f[::-1], [0, 0])))
    print("v_n: " + str(v_n))
    plt.plot(n, 20 * np.log10(y), label='computed')
    plt.plot(n, 20 * np.log10(y_f), label='filter')
    y_f_2, _ = signal.lfilter(b_, a_, y_f, zi=[0, 0])
    plt.plot(n, 20 * np.log10(y_f_2 / np.max(y_f_2)), label='filter2')
    plt.legend()
Exemplo n.º 26
0
    def test_2d_decimate_active(self):
        shape = self.shape2D
        known_data = np.random.normal(size=shape).astype(np.float32).view(
            np.complex64)
        idata = bf.ndarray(known_data, space='cuda')
        odata = bf.empty((idata.shape[0] // 2, idata.shape[1]),
                         dtype=idata.dtype,
                         space='cuda')
        coeffs = self.coeffs * 1.0
        coeffs.shape += (1, )
        coeffs = np.repeat(coeffs, idata.shape[1], axis=1)
        coeffs.shape = (coeffs.shape[0], idata.shape[1])
        coeffs = bf.ndarray(coeffs, space='cuda')

        fir = Fir()
        fir.init(coeffs, 2)
        fir.execute(idata, odata)
        fir.execute(idata, odata)
        odata = odata.copy('system')

        for i in range(known_data.shape[1]):
            zf = lfiltic(self.coeffs, 1.0, 0.0)
            known_result, zf = lfilter(self.coeffs,
                                       1.0,
                                       known_data[:, i],
                                       zi=zf)
            known_result, zf = lfilter(self.coeffs,
                                       1.0,
                                       known_data[:, i],
                                       zi=zf)
            known_result = known_result[0::2]
            compare(odata[:, i], known_result)
Exemplo n.º 27
0
    def initializeFilter(self):
        # Read impulse response.
        self.rawImpulseResponse = []
        with open(self.filename,'rb') as csvfile:
            reader = csv.reader(csvfile,delimiter=' ', quotechar='|')
            for row in reader:
                self.rawImpulseResponse.append(float(row[1]))
        # Window it.
        self.windowedImpulseResponse = self.rawImpulseResponse * self.window
        # Now normalize it.
        # This is what *we* call an impulse response.
        self.impulseResponse = self.windowedImpulseResponse/np.linalg.norm(self.windowedImpulseResponse)

        # Store the impulse peak normalization
        # This is what you normalize an impulse by.
        self.impulseNorm = (np.max(self.impulseResponse) - np.min(self.impulseResponse))/2
        
        # Generate an IIR from it.
        self.filter = prony.prony(self.impulseResponse,
                                  self.order,
                                  self.order)
        # And construct the initial conditions.
        self.filterState = lfiltic(self.filter[0],
                                   self.filter[1],
                                   np.zeros(len(self.filter[1]-1)),
                                   np.zeros(len(self.filter[0]-1)))
Exemplo n.º 28
0
def butter_bandpass_filter(data, lowcut=180000000.0, highcut=1200000000.0, fs=2600000000.0, order=8):
    from noise import generate_noise
    b, a = butter_bandpass(lowcut, highcut, fs, order=order)
    y = generate_noise(len(a),noise_sigma=32.0,filter_flag=0)
    x = np.linspace(0.0,(1.0/fs)*len(data),len(b))
    zi = lfiltic(b,a,y,x)
    y,zf = lfilter(b,a,data,zi=zi)
    return y
Exemplo n.º 29
0
def make_filter_func(b, a):
    zi = lfiltic(b, a, [])

    def filter_func(x):
        y, zi[:] = lfilter(b, a, x, zi=zi)
        return y

    return filter_func
Exemplo n.º 30
0
def datfilt(dat, channels, out, order, highpass, lowpass, filttype):

    params = read_metadata(dat)
    dtype = params["dtype"]
    nchannels = params["n_channels"]
    rate = params["sampling_rate"]
    if highpass:
        params["highpass"] = highpass
    if lowpass:
        params["lowpass"] = lowpass
    params["filter_order"] = order
    if not channels:
        channels = np.arange(nchannels)  # select all channels
    params["filter_channels"] = channels
    if not out:
        out = dat + "_filt.dat"
    # load and reshape dat file
    data = np.memmap(dat, dtype=dtype, mode="r").reshape(-1, nchannels)
    if filttype == "butter":
        fil = butter
    elif filttype == "bessel":
        fil = bessel
    else:
        raise Exception("filter must be 'butter' or 'bessel'")
    if highpass and not lowpass:
        coefs = [fil(order, highpass /
                     (rate / 2.), btype="highpass")] * nchannels
    elif lowpass and not highpass:
        coefs = [fil(order, lowpass /
                     (rate / 2.), btype="lowpass")] * nchannels
    elif lowpass and highpass:
        coefs = [
            fil(order,
                np.array((highpass, lowpass)) / (rate / 2.),
                btype="bandpass")
        ] * nchannels
    else:
        raise Exception("must set either '--lowpass' or '--highpass'")
    states = [lfiltic(c[0], c[1], [0]) for c in coefs]
    copyfile(dat, out)  # make a copy of the data to write over
    outdat = np.memmap(out, dtype=dtype, mode="r+", shape=data.shape)
    for i in range(0, len(data), BUFFER_SIZE):
        for c in channels:
            buffer = data[i:i + BUFFER_SIZE, c]
            outdat[i:i + BUFFER_SIZE, c], states[c] = lfilter(coefs[c][0],
                                                              coefs[c][1],
                                                              buffer,
                                                              zi=states[c])
    # run filter backwards (zero phase)
    for i in list(range(0, len(data), BUFFER_SIZE))[::-1]:
        for c in channels:
            buffer = data[i:i + BUFFER_SIZE, c][::-1]
            newbuffer, states[c] = lfilter(coefs[c][0],
                                           coefs[c][1],
                                           buffer,
                                           zi=states[c])
            outdat[i:i + BUFFER_SIZE, c] = newbuffer[::-1]
    write_metadata(out, **params)
Exemplo n.º 31
0
def miso_lfilter(ar, ma, x, useic=False):
    """
    Filter multiple time series into a single time series.

    Uses a convolution to merge inputs, and then lfilter to produce output.

    Parameters
    ----------
    ar : array_like
        The coefficients of autoregressive lag polynomial including lag zero,
        ar(L) in the expression ar(L)y_t.
    ma : array_like, same ndim as x, currently 2d
        The coefficient of the moving average lag polynomial, ma(L) in
        ma(L)x_t.
    x : array_like
        The 2-d input data series, time in rows, variables in columns.
    useic : bool
        Flag indicating whether to use initial conditions.

    Returns
    -------
    y : ndarray
        The filtered output series.
    inp : ndarray, 1d
        The combined input series.

    Notes
    -----
    currently for 2d inputs only, no choice of axis
    Use of signal.lfilter requires that ar lag polynomial contains
    floating point numbers
    does not cut off invalid starting and final values

    miso_lfilter find array y such that:

            ar(L)y_t = ma(L)x_t

    with shapes y (nobs,), x (nobs, nvars), ar (narlags,), and
    ma (narlags, nvars).
    """
    ma = array_like(ma, 'ma')
    ar = array_like(ar, 'ar')
    inp = signal.correlate(x, ma[::-1, :])[:, (x.shape[1] + 1) // 2]
    # for testing 2d equivalence between convolve and correlate
    #  inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
    #  np.testing.assert_almost_equal(inp2, inp)
    nobs = x.shape[0]
    # cut of extra values at end

    # TODO: initialize also x for correlate
    if useic:
        return signal.lfilter([1],
                              ar,
                              inp,
                              zi=signal.lfiltic(np.array([1., 0.]), ar,
                                                useic))[0][:nobs], inp[:nobs]
    else:
        return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
Exemplo n.º 32
0
def recursive_filter(x, ar_coeff, init=None):
    '''
    Autoregressive, or recursive, filtering.

    Parameters
    ----------
    x : array-like
        Time-series data. Should be 1d or n x 1.
    ar_coeff : array-like
        AR coefficients in reverse time order. See Notes
    init : array-like
        Initial values of the time-series prior to the first value of y.
        The default is zero.

    Returns
    -------
    y : array
        Filtered array, number of columns determined by x and ar_coeff. If a
        pandas object is given, a pandas object is returned.

    Notes
    -----

    Computes the recursive filter ::

        y[n] = ar_coeff[0] * y[n-1] + ...
                + ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]

    where n_coeff = len(n_coeff).
    '''
    _pandas_wrapper = _maybe_get_pandas_wrapper(x)
    x = np.asarray(x).squeeze()
    ar_coeff = np.asarray(ar_coeff).squeeze()

    if x.ndim > 1 or ar_coeff.ndim > 1:
        raise ValueError('x and ar_coeff have to be 1d')

    if init is not None:  # integer init are treated differently in lfiltic
        if len(init) != len(ar_coeff):
            raise ValueError("ar_coeff must be the same length as init")
        init = np.asarray(init, dtype=float)

    if init is not None:
        zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
    else:
        zi = None

    y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)

    if init is not None:
        result = y[0]
    else:
        result = y

    if _pandas_wrapper:
        return _pandas_wrapper(result)
    return result
Exemplo n.º 33
0
def LPC(previous_sig, next_sig, gap_start, gap_end, lpc_order):

    target_length = gap_end - gap_start

    ab, _, _ = _arburg2(previous_sig, lpc_order)
    Zb = lfiltic(b=[1], a=ab, y=previous_sig[:-lpc_order - 1:-1])
    forw_pred, _ = lfilter(b=[1], a=ab, x=np.zeros((target_length)), zi=Zb)

    next_sig = np.flipud(next_sig)
    af, _, _ = _arburg2(next_sig, lpc_order)
    Zf = lfiltic([1], af, next_sig[:-lpc_order - 1:-1])
    backw_pred, _ = lfilter([1], af, np.zeros((target_length)), zi=Zf)
    backw_pred = np.flipud(backw_pred)

    t = np.linspace(0, np.pi / 2, target_length)
    sqCos = np.cos(t)**2
    sigout = sqCos * forw_pred + np.flipud(sqCos) * backw_pred
    return sigout
Exemplo n.º 34
0
def recursive_filter(x, ar_coeff, init=None):
    '''
    Autoregressive, or recursive, filtering.

    Parameters
    ----------
    x : array-like
        Time-series data. Should be 1d or n x 1.
    ar_coeff : array-like
        AR coefficients in reverse time order. See Notes
    init : array-like
        Initial values of the time-series prior to the first value of y.
        The default is zero.

    Returns
    -------
    y : array
        Filtered array, number of columns determined by x and ar_coeff. If a
        pandas object is given, a pandas object is returned.

    Notes
    -----

    Computes the recursive filter ::

        y[n] = ar_coeff[0] * y[n-1] + ...
                + ar_coeff[n_coeff - 1] * y[n - n_coeff] + x[n]

    where n_coeff = len(n_coeff).
    '''
    _pandas_wrapper = _maybe_get_pandas_wrapper(x)
    x = np.asarray(x).squeeze()
    ar_coeff = np.asarray(ar_coeff).squeeze()

    if x.ndim > 1 or ar_coeff.ndim > 1:
        raise ValueError('x and ar_coeff have to be 1d')

    if init is not None:  # integer init are treated differently in lfiltic
        if len(init) != len(ar_coeff):
            raise ValueError("ar_coeff must be the same length as init")
        init = np.asarray(init, dtype=float)

    if init is not None:
        zi = signal.lfiltic([1], np.r_[1, -ar_coeff], init, x)
    else:
        zi = None

    y = signal.lfilter([1.], np.r_[1, -ar_coeff], x, zi=zi)

    if init is not None:
        result = y[0]
    else:
        result = y

    if _pandas_wrapper:
        return _pandas_wrapper(result)
    return result
Exemplo n.º 35
0
 def __init__(self, hp_3dBHz=4, fs=250, filter_order=4):
     self.fs = fs
     self.filter_order = filter_order
     self.hp_3dBHz = hp_3dBHz
     if (self.hp_3dBHz >= 0) and (self.filter_order >= 0) and (
             self.hp_3dBHz is not None) and (self.filter_order is not None):
         b, a = butter_highpass(self.hp_3dBHz, self.fs, self.filter_order)
         self.a = a
         self.b = b
         self.zi = signal.lfiltic(self.b, self.a, [0])
Exemplo n.º 36
0
    def initializeFilterFlatHistory(self, yout, yin=None):
        '''Initialize filter assuming the history has been a constant signal with output yout
        and, input yin (if None, assumed to be equal to yout)'''
        yout = np.ones(self.order) * yout
        if yin is None:
            yin = yout
        else:
            yin = np.ones(self.order) * yin

        self.zi = scs.lfiltic(self.b, self.a, y=yout, x=yin)
Exemplo n.º 37
0
def miso_lfilter(ar, ma, x, useic=False):
    '''
    use nd convolution to merge inputs,
    then use lfilter to produce output

    arguments for column variables
    return currently 1d

    Parameters
    ----------
    ar : array_like, 1d, float
        autoregressive lag polynomial including lag zero, ar(L)y_t
    ma : array_like, same ndim as x, currently 2d
        moving average lag polynomial ma(L)x_t
    x : array_like, 2d
        input data series, time in rows, variables in columns

    Returns
    -------
    y : array, 1d
        filtered output series
    inp : array, 1d
        combined input series

    Notes
    -----
    currently for 2d inputs only, no choice of axis
    Use of signal.lfilter requires that ar lag polynomial contains
    floating point numbers
    does not cut off invalid starting and final values

    miso_lfilter find array y such that::

            ar(L)y_t = ma(L)x_t

    with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)

    '''
    ma = array_like(ma, 'ma')
    ar = array_like(ar, 'ar')
    inp = signal.correlate(x, ma[::-1, :])[:, (x.shape[1] + 1) // 2]
    # for testing 2d equivalence between convolve and correlate
    #  inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
    #  np.testing.assert_almost_equal(inp2, inp)
    nobs = x.shape[0]
    # cut of extra values at end

    # TODO: initialize also x for correlate
    if useic:
        return signal.lfilter([1], ar, inp,
                              zi=signal.lfiltic(np.array([1., 0.]), ar,
                                                useic))[0][:nobs], inp[:nobs]
    else:
        return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
Exemplo n.º 38
0
    def __init__(self, hp_3dBHz=8, lp_3dBHz=12, fs=250, filter_order=4):
        self.fs = fs
        self.filter_order = filter_order
        self.hp_3dBHz = hp_3dBHz
        self.lp_3dBHz = lp_3dBHz
        b, a = butter_bandpass(self.hp_3dBHz, self.lp_3dBHz, self.fs,
                               self.filter_order)
        self.a = a
        self.b = b

        self.zi = signal.lfiltic(self.b, self.a,
                                 [0] * (max(len(self.a), len(self.b)) - 1))
Exemplo n.º 39
0
    def __init__(self, lp_3dBHz=4, fs=250, filter_order=4):
        self.fs = fs
        self.filter_order = filter_order
        self.lp_3dBHz = lp_3dBHz
        if (self.lp_3dBHz >= 0) and (self.filter_order >= 0) and (
                self.lp_3dBHz is not None) and (self.filter_order is not None):
            b, a = butter_lowpass(self.lp_3dBHz, self.fs, self.filter_order)
            self.a = a
            self.b = b

            self.zi = signal.lfiltic(self.b, self.a,
                                     [0] * (max(len(self.a), len(self.b)) - 1))
Exemplo n.º 40
0
 def step(self, node):
     b, a, x = node.getIn('B'), node.getIn('A'), node.getIn('X')
     if node.state is None:
         node.state = signal.lfilter_zi(b, a)
     result = signal.lfilter(b, a, x, zi=node.state)
     if node.state is None:
         data = result
         node.state = signal.lfiltic(b, a, data, x)
     else:
         data, node.state = result
     print(x.shape, x.dtype, data.shape, data.dtype)
     node.setOut('Y', data)
Exemplo n.º 41
0
def PLL(xr, tipo, xi, omega, f0, Ts):
    """ Retorna las salidas del PLL dada la condición inicial y sus parámetros. 
    
    Parámetros
    ----------
    xr : list
        Señal de entrada representada con una lista con N elementos de la forma [x,y].
    Tipo : str
        Filtro que se utiliza dentro del PLL.
    xi : float
        Coeficiente de amortiguamiento del filtro.
    omega : float
        Pulsación propia del filtro.
    f0 : float
        Frecuencia de oscilación natural del VCO.
    Ts : float
        Período de muestreo de la señal de entrada.

    Retorna
    -------   
    xr : list
        Señal de entrada representada con una lista con N elementos de la forma [x,y].
    xd : list
        Señal de salida del detector de fase.
    xc : list
        Señal de salida del filtro.
    xv : list
        Señal de salida del VCO.
    """

    R = traductor(tipo, omega, xi)  # Primero arma la condición inicial
    coeficientes = coef_filtro(tipo, R, R, R, Ts)

    if tipo == 'rc':  # Calcula la ganancia de lazo del VCO
        K = omega / (2 * xi)
    elif tipo == 'lead-lag activo':
        K = 2 * xi * omega

    xd = [[0, 0]]
    xc = [[0, 0]]  # Antes de entrar la señal, los pasos intermedios son cero
    xv = [[0, 0]]  # Para el detector de fase
    ph = 0  # fase de la salida inicial del VCO, xv

    zi = xd[0][1] * signal.lfiltic(coeficientes[0], coeficientes[1], [0])

    for i in range(len(xr)):  # Itera sobre cada elemento
        xd.append(detector([xr[i]], [xv[-1]])[-1])  # 1ro. el detector de fase
        xc_, zi = filtro([xd[-1]], zi, tipo, R, R, R, Ts)  # 2do. el filtro
        xc.append(xc_[-1])
        xv_, ph = vco([xc[-1]], Ts, K, f0, ph)  # 3ro. el VCO
        xv.append(xv_[-1])

    return xr, xd, xc, xv, K
Exemplo n.º 42
0
def ewma(x, alpha, v0=0):
  '''
  Causal exponential moving average implemented using scipy.signal.lfilter.
  With alpha as the forgetting factor close to one, x the signal to filter.
  Optionally, an initial estimate can be provided with the float v0.
  '''
  b, a = ewma_filter(alpha)
  x = np.atleast_1d(x).flatten()
  v0 = float(v0)

  zi = signal.lfiltic(b, a, [v0])
  return signal.lfilter(b, a, x, zi=zi)[0]
Exemplo n.º 43
0
    def __init__(self, reference, fir_coefficients=None, fir_fs=None,
                 check_mode=None):

        # Convert to a Numpy record array (with named fields) to facilitate data
        # indexing and sorting
        #self.reference = np.array(reference).T.astype(calibration_dtype)
        reference = np.asanyarray(reference)
        self.reference = np.rec.fromarrays(reference.T, dtype=calibration_dtype)
        self.reference.sort(axis=0)

        self.ref_gains = np.unique(self.reference['gain'])
        self.ref_voltages = np.unique(self.reference['voltage'])
        self.ref_frequencies = np.unique(self.reference['frequency'])
        self.ref_spl = self.reference['spl']
        self.ref_phi = self.reference['phase']

        # Ensure that valid error-checking modes were passed for check_mode
        valid_modes = (None, 'exact', 'bounds')
        mode_error = 'Invalid mode for check_bounds'
        if type(check_mode) == type({}):
            if self.check_mode['gain'] not in valid_modes:
                raise ValueError, mode_error
            if self.check_mode['voltage'] not in valid_modes:
                raise ValueError, mode_error
            if self.check_mode['frequency'] not in valid_modes:
                raise ValueError, mode_error
            self.check_mode = check_mode
        elif check_mode not in valid_modes:
            raise ValueError, 'Invalid mode %s for check_bounds' % check_mode
        else:
            self.check_mode = {}
            self.check_mode['gain'] = check_mode
            self.check_mode['voltage'] = check_mode
            self.check_mode['frequency'] = check_mode
        
        # Reformat reference into a 3D arrays that we can use for trilinear
        # interpolation to estimate max SPL and average phase shift.
        gains = len(self.ref_gains)
        voltages = len(self.ref_voltages)
        frequencies = len(self.ref_frequencies)
        new_shape = gains, voltages, frequencies
        self.ref_spl.shape = new_shape
        self.ref_phi.shape = new_shape

        # Prepare the FIR coefficients for use
        self.fir_coefficients = fir_coefficients
        if fir_coefficients is not None:
            self.fir_zi = signal.lfiltic(fir_coefficients, 1, 0)
            if fir_fs is None:
                mesg = 'Must provide sampling frequency for fir_coefficients' 
                raise ValueError(mesg)
            self.fir_fs = fir_fs
Exemplo n.º 44
0
    def apply_(self, d):
        b, a = self.filter
        if self.zi == []:
            self.zi = [signal.lfiltic(b, a, np.zeros(b.size)) for fi in range(d.nfeatures)]

        new_zi = []
        xs = []
        for i in range(d.nfeatures):
            xi, zii = signal.lfilter(b, a, d.xs[:, i], zi=self.zi[i])
            xs.append(xi.reshape(-1, 1))
            new_zi.append(zii)
        self.zi = new_zi

        return DataSet(xs=np.hstack(xs), default=d)
Exemplo n.º 45
0
  def apply_(self, d):
    b, a = self.filter
    if self.zi == []:
      self.zi = [signal.lfiltic(b, a, np.zeros(b.size)) for fi in 
        range(d.nfeatures)]

    data, new_zi = signal.lfilter(b, a, d.data, zi=self.zi, axis=self.axis)
    #new_zi = []
    #data = []
    #for i in range(d.nfeatures):
    #  xi, zii = signal.lfilter(b, a, d.data[i, :], zi=self.zi[i])
    #  data.append(xi[np.newaxis, :])
    #  new_zi.append(zii)
    self.zi = new_zi

    return DataSet(data=data, default=d)
Exemplo n.º 46
0
    def postprocess(self,fbin,data,field_name=None):
        """ Given a freebird_bin object and the data array,
        returns a list of tuples [(field_name,field_values,units),...]
        for new fields generated from the postprocessing

        For squid, assumes that there is a 'counts' field.

        field_name is either the name of a field in data, or some prefix
        for several fields, useful when multiple instruments of the same
        type are logged together, and are differentiated by some prefix.
        """
        field_name=field_name or 'counts'

        raw=data['counts']

        # Convert to voltage:
        Vx=raw*4.096 / 32768.0
        Y=(Vx-self.squid_cal.a)/self.squid_cal.b

        C_dC=10*Y/self.sbe7_cal.K

        f_s=fbin.sample_rate_hz()
        # 1st order,
        # Wn=1.0/((f_s/2)*2*np.pi*self.squid_cal.Gd)
        # f_s=512Hz nyq=256Hz  256*2pi rad
        # Gd=0.02 s, so this should be a 7.98Hz cutoff
        [b,a]=butter(1,1.0/((f_s/2)*2*np.pi*self.squid_cal.Gd))
        zi=lfiltic(b,a,[C_dC[0],C_dC[0]])
        C,zf=lfilter(b,a,C_dC,zi=zi)

        # conductance * cell constant * S/m to mS/cm
        fields=[('cond',C,'mS/cm'),('cond_emph',C_dC,'mS/cm')]

        if self.mag_cal is not None:
            cal_mag = self.mag_cal.adjust(data['imu_m'])
            fields.append( ('cal_imu_m',cal_mag,'counts') )

        # Remove offset from gyro, and scale based on the MPU6050 reference Arduino
        # code setting the gyro sensitivity to 250deg/s
        # Full scale is [-32768,32767]
        fields.append( ('cal_imu_g',(data['imu_g'] - data['imu_g'].mean(axis=0))*250./32768,'deg/s') )
                       
        return fields
    def setup(self, channels=None, samplerate=None,
              blocksize=None, totalframes=None):

        super(IRITStartSeg, self).setup(channels,
                                        samplerate,
                                        blocksize,
                                        totalframes)

        self.input_blocksize = int(0.02 * samplerate)
        self.input_stepsize = int(0.008 * samplerate)


        sr = float(samplerate)
        lowFreq = 100.0
        highFreq = sr / 5
        f1 = lowFreq / sr
        f2 = highFreq / sr
        numtaps = 10
        self.filtre = firwin(numtaps=numtaps, cutoff=[f1, f2], pass_zero=False)
        self.filtre_z = lfiltic(b=self.filtre, a=1, y=0)  # Initial conditions
 def _rebuildFilter(self):        
     """rebuild the filter initial condition based upon current state
     """
     if self.aCmplx:
         self._a = self._convertCmplx(self.a)
     else:
         self._a = self.a
     if self.bCmplx:
         self._b = self._convertCmplx(self.b)
     else:
         self._b = self.b
     #set up the initial conditions based up on our filters and our history
     self.zi = lfiltic(self._b,self._a,self.lastY, self.lastX)
     
     oldOutputCmplx = self.outputCmplx
     #if the taps are complex or the initial condition is complex we will be complex
     if self.aCmplx or self.bCmplx:
         self.outputCmplx = True  
     #if we are changing between real and complex modes force an sri update to reflect this 
     if (oldOutputCmplx != self.outputCmplx):
         self.forceSriUpdate = True
     self.updateFilter=False
Exemplo n.º 49
0
    def process(self, data):
        if self.zi is None:
            # initial pass, get ICs from filter coefficients
            zi = signal.lfilter_zi(self.b, self.a)
            self.zi = np.tile(zi, (data.shape[1], 1)).T
        else:
            # subsequent passes get ICs from previous input/output
            num_ch = data.shape[1]
            K = max(len(self.a)-1, len(self.b)-1)
            self.zi = np.zeros((K, num_ch))
            # unfortunately we have to get zi channel by channel
            for c in range(data.shape[1]):
                self.zi[:, c] = signal.lfiltic(
                    self.b,
                    self.a,
                    self.y_prev[-(self.overlap+1)::-1, c],
                    self.x_prev[-(self.overlap+1)::-1, c])

        out, zf = signal.lfilter(
            self.b, self.a, data, axis=0, zi=self.zi)

        self.x_prev = data
        self.y_prev = out
        return out
Exemplo n.º 50
0
Arquivo: lwdf.py Projeto: jimurai/lwdf
	lwdfilter = Filter(order,wn,coeff_scale,rs=70,ftype=ftype,dtype=dtype)
	# lwdfilter = Filter(order,wn,coeff_scale,'butter',dtype=np.int32)
	# lwdfilter = Filter(order,wn,coeff_scale,'butter',dtype=dtype)
	
	# Perform sample-by-sample filtering
	ir_out = []
	for x in ir:
		temp = lwdfilter.push(x)
		if dtype==np.float:
			ir_out.append((temp[0]+temp[1])/(2.0*(1<<coeff_scale)))
		else:
			ir_out.append((temp[0]+temp[1]+(1<<coeff_scale))>>(coeff_scale+1))
		
	# Compare with SciPy equivalent
	if ftype=='cheby1':
		(B,A) = signal.iirfilter(order, wn, rp=1,btype='lowpass', analog=0, ftype='cheby1', output='ba')
	elif ftype=='cheby2':
		(B,A) = signal.iirfilter(order, wn, rs=rs,btype='lowpass', analog=0, ftype='cheby2', output='ba')
	else:
		(B,A) = signal.iirfilter(order, wn, btype='lowpass', analog=0, ftype=ftype, output='ba')
	zf = signal.lfiltic(B,A,np.zeros(len(A)-1), np.zeros(len(B)-1))
	ir_comp, zf = signal.lfilter(B,A,ir,zi=zf)
	
	# Plot the data
	plt.plot(ir, 'k-', linewidth=2.0)
	plt.plot(ir_comp, 'r-', linewidth=2.0)
	plt.plot(ir_out, 'b-', linewidth=2.0)
	plt.grid()
	plt.legend(('Raw data', 'SciPy filtered - float32', 'LWDF - int32'), loc='upper left')
	plt.show()
Exemplo n.º 51
0
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
    """
    Theoretical autocovariance function of ARMA process

    Parameters
    ----------
    ar : array_like, 1d
        coefficient for autoregressive lag polynomial, including zero lag
    ma : array_like, 1d
        coefficient for moving-average lag polynomial, including zero lag
    nobs : int
        number of terms (lags plus zero lag) to include in returned acovf
    sigma2 : float
        Variance of the innovation term.

    Returns
    -------
    acovf : array
        autocovariance of ARMA process given by ar, ma

    See Also
    --------
    arma_acf
    acovf

    References
    ----------
    Brockwell, Peter J., and Richard A. Davis. 2009.
    Time Series: Theory and Methods. 2nd ed. 1991.
    New York, NY: Springer.

    """
    if dtype is None:
        dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))

    p = len(ar) - 1
    q = len(ma) - 1
    m = max(p, q) + 1

    if sigma2.real < 0:
        raise ValueError('Must have positive innovation variance.')

    # Short-circuit for trivial corner-case
    if p == q == 0:
        out = np.zeros(nobs, dtype=dtype)
        out[0] = sigma2
        return out

    # Get the moving average representation coefficients that we need
    ma_coeffs = arma2ma(ar, ma, lags=m)

    # Solve for the first m autocovariances via the linear system
    # described by (BD, eq. 3.3.8)
    A = np.zeros((m, m), dtype=dtype)
    b = np.zeros((m, 1), dtype=dtype)
    # We need a zero-right-padded version of ar params
    tmp_ar = np.zeros(m, dtype=dtype)
    tmp_ar[:p + 1] = ar
    for k in range(m):
        A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
        A[k, 1:m - k] += tmp_ar[(k + 1):m]
        b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
    acovf = np.zeros(max(nobs, m), dtype=dtype)
    acovf[:m] = np.linalg.solve(A, b)[:, 0]

    # Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
    if nobs > m:
        zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
        acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype),
                                   zi=zi)[0]

    return acovf[:nobs]
Exemplo n.º 52
0
def removeTides(dc, dt=None, gapFactor=20, T=T_M2):
    """A low-pass filter to remove tidal signal from the data"""
    from scipy import signal
    time = dc.time.array
    vals = dc.data
    if dt is None:
        dt = np.diff(time).mean()
    # try to calculate exact dt by omitting large gaps
    gaps, ranges, t = dc.detectGaps(dt=dt, gapFactor=gapFactor)
    diff = []
    for i in range(ranges.shape[0]):
        twin = time[ranges[i, 0]:ranges[i, 1]]
        diff.append(np.diff(twin))
    diff = np.concatenate(tuple(diff), axis=0)
    dt = diff.mean()
    # filter design, low-pass butterworth
    T0 = (2 * dt)  # period of Nyquist frequency
    Tpass = 8 * T  # period of pass frequency
    Gpass = 3.0       # max dB loss in pass band
    Tstop = 1 * T  # period of stop frequency
    Gstop = 30.0     # min dB atennuation in stop band
    o, Wn = signal.buttord(T0 / Tpass, T0 / Tstop, Gpass, Gstop)
    if o < 0:
        raise Exception(
            'Cannot create tidal filter. Data sampling frequency may be too low, dt=' +
            str(dt))
    b, a = signal.butter(o, Wn, 'low')
    # filter each contiquous data range separately
    npoints, nfields, ntime = vals.shape
    for k in range(nfields):
        for j in range(npoints):
            newvals = []
            newtime = []
            for i in range(ranges.shape[0]):
                twin = time[ranges[i, 0]:ranges[i, 1]]
                vwin = vals[j, k, ranges[i, 0]:ranges[i, 1]]
                if len(vwin) > 3*len(a):
                    try:
                        # default forward-backward filter
                        # filtered = signal.filtfilt(b, a, vwin, padtype='constant')
                        # forward-backward filter with custom boundary conditions
                        # pad with mean of 1/2 pass window length
                        N_init = int(np.ceil(Tpass/dt/2))
                        # forward filter
                        x_init = vwin[:N_init]
                        y_init = x_init.mean()*np.ones_like(x_init)
                        z_init = signal.lfiltic(b, a, y_init, x_init)
                        filtered = signal.lfilter(b, a, vwin, zi=z_init)[0]
                        # backward filter
                        x_init = vwin[-N_init:][::-1]
                        y_init = x_init.mean()*np.ones_like(x_init)
                        z_init = signal.lfiltic(b, a, y_init, x_init)
                        filtered = signal.lfilter(b, a, filtered[::-1], zi=z_init)[0]
                        filtered = filtered[::-1]
                        newvals.append(filtered)
                        newtime.append(twin)
                    except Exception as e:
                        print a.shape, vwin.shape
                        raise e

            newvals = np.concatenate(tuple(newvals), axis=0)
            if j == 0 and k == 0:
                data_out = np.zeros((npoints, nfields, len(newvals)))
                time_out = np.concatenate(tuple(newtime), axis=0)
            data_out[j, k, :] = newvals
    ta = timeArray.timeArray(time_out, 'epoch')
    dc2 = dc.interpolateInTime(ta)
    dc2.data = data_out
    return dc2
Exemplo n.º 53
0
    def step(self,responses):
        retinal_image = nx.asarray( responses )
        retinal_image = retinal_image.astype( self.compute_typecode )
        assert retinal_image.shape == (self.n_receptors,)

        self._retinal_image = retinal_image[:,nx.newaxis] # we operate on rank-2 arrays

        if self.do_luminance_adaptation:
            if self.zi_luminance_adaptation is None:

                # This is the first step, so find filter coefficients
                # that produce zero output to produce perfectly
                # adapted filter state.
                y = nx.zeros_like(self._retinal_image)
                x = self._retinal_image

                n_elements_state_vec = max(len(self.b_lum_adapt),len(self.b_lum_adapt))-1
                zi_shape = (self.n_receptors,n_elements_state_vec)

                if 0:
                    self.zi_luminance_adaptation = signal.lfiltic(
                        self.b_lum_adapt, self.a_lum_adapt, y, x, axis=1)
                else:
                    self.zi_luminance_adaptation = nx.zeros( zi_shape,
                                                             self.compute_typecode )
                    for i in range(self.n_receptors):
                        this_zi = signal.lfiltic(
                            self.b_lum_adapt, self.a_lum_adapt, y[i,:], x[i,:])
                        self.zi_luminance_adaptation[i,:] = this_zi.astype(
                            self.compute_typecode)
                del y
                del x

                if zi_shape != self.zi_luminance_adaptation.shape:
                    print 'wanted shape %s, got shape %s'%(
                        str(zi_shape),str(self.zi_luminance_adaptation.shape))
                    raise ValueError('shape wrong')

                test_zero, tmpzi = signal.lfilter(self.b_lum_adapt,
                                                  self.a_lum_adapt,
                                                  self._retinal_image,
                                                  axis=1,
                                                  zi=self.zi_luminance_adaptation)
                epsilon = 1e-5
                if test_zero.max() > epsilon:
                    raise ValueError("maximum value shouldn't be greater than epsilon")

            (self._luminance_adapted,
             self.zi_luminance_adaptation) = signal.lfilter(self.b_lum_adapt,
                                                            self.a_lum_adapt,
                                                            self._retinal_image,
                                                            axis=1,
                                                            zi=self.zi_luminance_adaptation)
            #print 'set self._luminance_adapted'
        else:
            self._luminance_adapted = self._retinal_image

        # early vision (photoreceptor/LMC) filtering
        if not self.skip_earlyvis:
            self._earlyvis, self.zi_earlyvis = signal.lfilter(self.b_earlyvis,
                                                              self.a_earlyvis,
                                                              self._luminance_adapted,
                                                              axis=1,
                                                              zi=self.zi_earlyvis)
        else:
            self._earlyvis = self._retinal_image

        if self.early_contrast_saturation_params is not None:
            tmp = self.early_contrast_saturation_params
            csat_type = tmp[0]
            if csat_type == 'tanh+lin':
                a, b = self.early_contrast_saturation_params[1:]
                self._early_contrast_saturated = numpy.tanh( self._earlyvis * a) + self._earlyvis*b
            elif csat_type == 'tanh':
                a = self.early_contrast_saturation_params[1]
                self._early_contrast_saturated = numpy.tanh( self._earlyvis * a)
            else:
                raise ValueError('unknown contrast saturation type: %s'%csat_type)
        else:
            self._early_contrast_saturated = self._earlyvis

        # high pass filter if necessary
        if self.do_highpass:
            self._U, self.zi_hp = signal.lfilter(self.b_hp,
                                                 self.a_hp,
                                                 self._early_contrast_saturated,
                                                 axis=1,
                                                 zi=self.zi_hp)
        else:
            self._U = self._early_contrast_saturated # undelayed is just early vision filtering

        # emd lowpass filter
        self._D, self.zi_emd = signal.lfilter(self.b_emd,
                                              self.a_emd,
                                              self._U,axis=1,
                                              zi=self.zi_emd)

        self._U_pre_saturation = self._U
        self._D_pre_saturation = self._D
        if self.preEMD_saturation_s is not None:
            # compression/saturation, a la Dror. 2001, eqn. 5
##            sU = self.preEMD_saturation_s*self._U
##            self._U = nx.tanh(sU)
##            print sU[:5],'->',self._U[:5]
            self._U = nx.tanh(self.preEMD_saturation_s*self._U)
            self._D = nx.tanh(self.preEMD_saturation_s*self._D)

        # half correlators
        # A * Bdelayed
        self._subunit_A_Bd = self._U[self.emd_sideA_idxs] * self._D[self.emd_sideB_idxs]
        # Adelayed * B
        self._subunit_Ad_B = self._D[self.emd_sideA_idxs] * self._U[self.emd_sideB_idxs]

        # flicker insensitive
        if self.sign_convention:
            self.emd_outputs = (self.weights_A*self._subunit_A_Bd -
                                self.S*self.weights_B*self._subunit_Ad_B)
        else:
            self.emd_outputs = (self.S*self.weights_B*self._subunit_Ad_B -
                                self.weights_A*self._subunit_A_Bd)

        return self.emd_outputs[:,0] # make rank-1
Exemplo n.º 54
0
def miso_lfilter(ar, ma, x, useic=False):
    '''
    use nd convolution to merge inputs,
    then use lfilter to produce output

    arguments for column variables
    return currently 1d

    Parameters
    ----------
    ar : array_like, 1d, float
        autoregressive lag polynomial including lag zero, ar(L)y_t
    ma : array_like, same ndim as x, currently 2d
        moving average lag polynomial ma(L)x_t
    x : array_like, 2d
        input data series, time in rows, variables in columns

    Returns
    -------
    y : array, 1d
        filtered output series
    inp : array, 1d
        combined input series

    Notes
    -----
    currently for 2d inputs only, no choice of axis
    Use of signal.lfilter requires that ar lag polynomial contains
    floating point numbers
    does not cut off invalid starting and final values

    miso_lfilter find array y such that::

            ar(L)y_t = ma(L)x_t

    with shapes y (nobs,), x (nobs,nvars), ar (narlags,), ma (narlags,nvars)

    '''
    ma = np.asarray(ma)
    ar = np.asarray(ar)
    #inp = signal.convolve(x, ma, mode='valid')
    #inp = signal.convolve(x, ma)[:, (x.shape[1]+1)//2]
    #Note: convolve mixes up the variable left-right flip
    #I only want the flip in time direction
    #this might also be a mistake or problem in other code where I
    #switched from correlate to convolve
    # correct convolve version, for use with fftconvolve in other cases
    #inp2 = signal.convolve(x, ma[:,::-1])[:, (x.shape[1]+1)//2]
    inp = signal.correlate(x, ma[::-1,:])[:, (x.shape[1]+1)//2]
    #for testing 2d equivalence between convolve and correlate
    #np.testing.assert_almost_equal(inp2, inp)
    nobs = x.shape[0]
    # cut of extra values at end

    #todo initialize also x for correlate
    if useic:
        return signal.lfilter([1], ar, inp,
                #zi=signal.lfilter_ic(np.array([1.,0.]),ar, ic))[0][:nobs], inp[:nobs]
                zi=signal.lfiltic(np.array([1.,0.]),ar, useic))[0][:nobs], inp[:nobs]
    else:
        return signal.lfilter([1], ar, inp)[:nobs], inp[:nobs]
Exemplo n.º 55
0
        e[t] = x[t] - y[t]

    for t in range(maxlag, nobs):
        #wrong broadcasting, 1d only
        y[t] = (x[t-arlag:t] * arcoefs_r).sum(0) + (e[t-malag:t] * macoefs_r).sum(0)
        e[t] = x[t] - y[t]

    return y, e

arcoefs, macoefs = -np.array([1, -0.8, 0.2])[1:], np.array([1., 0.5, 0.1])[1:]
print armaloop(arcoefs, macoefs, np.ones(10))
print armaloop([0.8], [], np.ones(10))
print armaloop([0.8], [], np.arange(2,10))
y, e = armaloop([0.1], [0.8], np.arange(2,10))
print e
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.arange(2,10))

y, e = armaloop([], [0.8], np.ones(10))
print e
print signal.lfilter(np.array([1, -0.]), np.array([1., 0.8]), np.ones(10))

ic=signal.lfiltic(np.array([1, -0.1]), np.array([1., 0.8]), np.ones([0]), np.array([1]))
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.ones(10), zi=ic)

zi = signal.lfilter_zi(np.array([1, -0.8, 0.2]), np.array([1., 0, 0]))
print signal.lfilter(np.array([1, -0.1]), np.array([1., 0.8]), np.ones(10), zi=zi)
print signal.filtfilt(np.array([1, -0.8]), np.array([1.]), np.ones(10))

#todo write examples/test across different versions

Exemplo n.º 56
0
  elif control=='VC':
    Pin = 0.5*sum((pdata[indx:h-2,1]-pdata[indx:h-2,2])*
                    (pdata[indx+1:h-1,0]-pdata[indx:h-2,0]))/ \
              sum(pdata[indx+1:h-1,0]-pdata[indx:h-2,0]);  print('  \twdwdy\t:\t', Pin)
    PinN = Pin; PinP = Pin
  gamma = Pin/P0;                                          print('  \tgamma\t:\t', gamma)

# Computing uncertainties
sbar_dudy=0.0; sbar_Ub=0.0; T0_dudy=0.0; T0_Ub=0.0
if elaborate:
  t = linspace(data[indx,0],data[m-2,0],m-indx-1); dt=t[1]-t[0]
  dudyI0 = zeros((1,m-indx-1)); dudyIn = zeros((1,m-indx-1));
  dudyI0f = interp1d(data[indx-1:m-1,0], data[indx-1:m-1,1]); dudyI0[0,:] = dudyI0f(t)
  dudyInf = interp1d(data[indx-1:m-1,0], data[indx-1:m-1,2]); dudyIn[0,:] = dudyInf(t)
  ar_dudyI0 = arsel(dudyI0); ar_dudyIn = arsel(dudyIn)
  zi0  = lfiltic([1], ar_dudyI0.AR[0], ar_dudyI0.autocor[0])
  zin  = lfiltic([1], ar_dudyIn.AR[0], ar_dudyIn.autocor[0])
  rho0 = ones(m-indx-1); rho0[1:] = lfilter([1], ar_dudyI0.AR[0], \
              zeros(m-indx-2), zi=zi0)[0]
  rhon = ones(m-indx-1); rhon[1:] = lfilter([1], ar_dudyIn.AR[0], \
              zeros(m-indx-2), zi=zin)[0]
  sbar_dudy = std(concatenate((data[indx:m-1,1], data[indx:m-1,2])))/ \
              sqrt(ar_dudyI0.eff_N[0]+ar_dudyIn.eff_N[0])
  T0_dudy =  0.5*dt*(ar_dudyI0.T0[0]+ar_dudyIn.T0[0])
  if CPI:
    UbI = zeros((1,m-indx-1))
    UbIf = interp1d(data[indx-1:m-1,0], data[indx-1:m-1,5]); UbI[0,:] = 0.5*UbIf(t)
    ar_UbI  = arsel(UbI); ziUb  = lfiltic([1], ar_UbI.AR[0], ar_UbI.autocor[0])
    rhoU = ones(m-indx-1); rhoU[1:] = lfilter([1], ar_UbI.AR[0],  \
                zeros(m-indx-2), zi=ziUb)[0]
    sbar_Ub = std(data[indx:m-1,5])/sqrt(ar_UbI.eff_N[0])