Esempio n. 1
0
    def apply_filter_fir(self, data, truncate=False,
                         stages=None):
        """
        Apply filter to data by applying the discrete-time filter.

        truncate        If true, intermediate calculations are
                        truncated as they would be in the MCE's fixed
                        point implementation.  This allows for complete
                        simulation of digital artifacts.
        """
        import scipy.signal as scs
        # Special hack
        n = data.shape[-1]
        b = [1., 2., 1.]
        # First filter
        if stages is None or 0 in stages:
            a = [1., -self.params[0]/2.**14, self.params[1]/2.**14]
            data = scs.lfilter(b, a, data) / 2**self.params[5]
            if truncate:
                data = numpy.floor(data)
        # Second filter
        if stages is None or 1 in stages:
            a = [1., -self.params[2]/2.**14, self.params[3]/2.**14]
            data = scs.lfilter(b, a, data) / 2**self.params[4]
            if truncate:
                data = numpy.floor(data)
        return data
Esempio n. 2
0
    def test_rank1(self):
        x = np.linspace(0, 5, 6).astype(self.dt)
        b = np.array([1, -1]).astype(self.dt)
        a = np.array([0.5, -0.5]).astype(self.dt)

        # Test simple IIR
        y_r = np.array([0, 2, 4, 6, 8, 10.]).astype(self.dt)
        assert_array_almost_equal(lfilter(b, a, x), y_r)

        # Test simple FIR
        b = np.array([1, 1]).astype(self.dt)
        a = np.array([1]).astype(self.dt)
        y_r = np.array([0, 1, 3, 5, 7, 9.]).astype(self.dt)
        assert_array_almost_equal(lfilter(b, a, x), y_r)

        # Test IIR with initial conditions
        b = np.array([1, 1]).astype(self.dt)
        a = np.array([1]).astype(self.dt)
        zi = np.array([1]).astype(self.dt)
        y_r = np.array([1, 1, 3, 5, 7, 9.]).astype(self.dt)
        zf_r = np.array([5]).astype(self.dt)
        y, zf = lfilter(b, a, x, zi=zi)
        assert_array_almost_equal(y, y_r)
        assert_array_almost_equal(zf, zf_r)

        b = np.array([1, 1, 1]).astype(self.dt)
        a = np.array([1]).astype(self.dt)
        zi = np.array([1, 1]).astype(self.dt)
        y_r = np.array([1, 2, 3, 6, 9, 12.]).astype(self.dt)
        zf_r = np.array([9, 5]).astype(self.dt)
        y, zf = lfilter(b, a, x, zi=zi)
        assert_array_almost_equal(y, y_r)
        assert_array_almost_equal(zf, zf_r)
Esempio n. 3
0
def filtfilt(b,a,x):
    """
    What does this function do?  Alberto??
    """
    
    #For now only accepting 1d arrays
    ntaps=max(len(a),len(b))
    edge=ntaps*3
        
    if x.ndim != 1:
        raise ValueError, "Filiflit is only accepting 1 dimension arrays."

    #x must be bigger than edge
    if x.size < edge:
        raise ValueError, "Input vector needs to be bigger than 3 * max(len(a),len(b)."
        
    if len(a) < ntaps:
	a=np.r_[a,zeros(len(b)-len(a))]

    if len(b) < ntaps:
	b=np.r_[b,zeros(len(a)-len(b))]         
    
    zi=lfilter_zi(b,a)
    
    #Grow the signal to have edges for stabilizing 
    #the filter with inverted replicas of the signal
    s=np.r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
    #in the case of one go we only need one of the extrems 
    # both are needed for filtfilt
    
    (y,zf)=lfilter(b,a,s,-1,zi*s[0])

    (y,zf)=lfilter(b,a,np.flipud(y),-1,zi*y[-1])
    
    return np.flipud(y[edge-1:-edge+1])
def inv_convolve(h1, h3, length):
    x = np.random.standard_normal(10000)
    u = signal.lfilter(h1, 1, x)
    d = signal.lfilter(h3, 1, x)
    h = np.zeros(length, np.float64)
    nlms(u, d, h, 0.1)
    return h
Esempio n. 5
0
def instantaneous_frequency(data, fs, fk):
    """
    Instantaneous frequency of a signal.

    Computes the instantaneous frequency of the given data which can be
    windowed or not. The instantaneous frequency is determined by the time
    derivative of the analytic signal of the input data.

    :type data: :class:`~numpy.ndarray`
    :param data: Data to determine instantaneous frequency of.
    :param fs: Sampling frequency.
    :param fk: Coefficients for calculating time derivatives
        (calculated via central difference).
    :return: **omega[, domega]** - Instantaneous frequency of input data, Time
        derivative of instantaneous frequency (windowed only).
    """
    x = envelope(data)
    if len(x[0].shape) > 1:
        omega = np.zeros(x[0].shape[0], dtype=np.float64)
        i = 0
        for row in x[0]:
            f = np.real(row)
            h = np.imag(row)
            # faster alternative to calculate f_add
            f_add = np.hstack(([f[0]] * (np.size(fk) // 2), f, [f[np.size(f) - 1]] * (np.size(fk) // 2)))
            fd = signal.lfilter(fk, 1, f_add)
            # correct start and end values of time derivative
            fd = fd[np.size(fk) - 1 : np.size(fd)]
            # faster alternative to calculate h_add
            h_add = np.hstack(([h[0]] * (np.size(fk) // 2), h, [h[np.size(h) - 1]] * (np.size(fk) // 2)))
            hd = signal.lfilter(fk, 1, h_add)
            # correct start and end values of time derivative
            hd = hd[np.size(fk) - 1 : np.size(hd)]
            omega_win = abs(((f * hd - fd * h) / (f * f + h * h)) * fs / 2 / np.pi)
            omega[i] = np.median(omega_win)
            i = i + 1
        # faster alternative to calculate omega_add
        omega_add = np.hstack(
            ([omega[0]] * (np.size(fk) // 2), omega, [omega[np.size(omega) - 1]] * (np.size(fk) // 2))
        )
        domega = signal.lfilter(fk, 1, omega_add)
        # correct start and end values of time derivative
        domega = domega[np.size(fk) - 1 : np.size(domega)]
        return omega, domega
    else:
        omega = np.zeros(np.size(x[0]), dtype=np.float64)
        f = np.real(x[0])
        h = np.imag(x[0])
        # faster alternative to calculate f_add
        f_add = np.hstack(([f[0]] * (np.size(fk) // 2), f, [f[np.size(f) - 1]] * (np.size(fk) // 2)))
        fd = signal.lfilter(fk, 1, f_add)
        # correct start and end values of time derivative
        fd = fd[np.size(fk) - 1 : np.size(fd)]
        # faster alternative to calculate h_add
        h_add = np.hstack(([h[0]] * (np.size(fk) // 2), h, [h[np.size(h) - 1]] * (np.size(fk) // 2)))
        hd = signal.lfilter(fk, 1, h_add)
        # correct start and end values of time derivative
        hd = hd[np.size(fk) - 1 : np.size(hd)]
        omega = abs(((f * hd - fd * h) / (f * f + h * h)) * fs / 2 / np.pi)
        return omega
Esempio n. 6
0
def highpass(data, freq, df, corners=4, zerophase=False):
    """
    Butterworth-Highpass Filter.

    Filter data removing data below certain frequency ``freq`` using
    ``corners`` corners.

    :param data: Data to filter, type numpy.ndarray.
    :param freq: Filter corner frequency.
    :param df: Sampling rate in Hz.
    :param corners: Filter corners. Note: This is twice the value of PITSA's
        filter sections
    :param zerophase: If True, apply filter once forwards and once backwards.
        This results in twice the number of corners but zero phase shift in
        the resulting filtered trace.
    :return: Filtered data.
    """
    fe = 0.5 * df
    f = freq / fe
    # raise for some bad scenarios
    if f > 1:
        msg = "Selected corner frequency is above Nyquist."
        raise ValueError(msg)
    [b, a] = iirfilter(corners, f, btype='highpass', ftype='butter',
                       output='ba')
    if zerophase:
        firstpass = lfilter(b, a, data)
        return lfilter(b, a, firstpass[::-1])[::-1]
    else:
        return lfilter(b, a, data)
Esempio n. 7
0
def decode_efm():
    """ Decode EFM from STDIN, assuming it's a 28Mhz 8bit raw stream  """
    datao = np.fromstring(sys.stdin.read(SAMPLES), dtype=np.uint8).astype(np.int16)
    datao = sps.detrend(datao, type='constant')  # Remove DC

    datao = auto_gain(datao, 10000, 'pre-filter')  # Expand before filtering, since we'll lose much of signal otherwise

    low_pass = sps.butter(4, 1.75 / FREQ_MHZ, btype='lowpass')  # Low pass at 1.75 Mhz
    datao = sps.lfilter(low_pass[0], low_pass[1], datao)

    high_pass = sps.butter(4, 0.01333 / FREQ_MHZ, btype='highpass')  # High pass at 13.333 khz
    datao = sps.lfilter(high_pass[0], high_pass[1], datao)

    # This is too slow, need to work out a way to do it in scipy
    de_emphasis_filter = biquad_filter(-1.8617006585639506, 0.8706642683920058, 0.947680874725466, -1.8659578411373265, 0.9187262110931641)
    datao = np.fromiter(run_filter(de_emphasis_filter, datao), np.int16)  # De-emph - 26db below 500khz

    # Could tie edge_pll and run_filter together as generators, but we want to see the filter output

    bit_gen = edge_pll(datao, EFM_PIXEL_RATE)  # This is a ultra-naive PLL that returns a bit-stream of 1 = edge, 0 = no-edge
    try:
        while 1:
            run_until_start_code(bit_gen)
            eat_three_bits(bit_gen)
            process_efm_frame(bit_gen, 31)  # 31 14 bit EFM codes in a frame
    except StopIteration:
        printerr('Hit the end of the bitstream')

    datao = np.clip(datao, 0, 255).astype(np.uint8)
    sys.stdout.write(datao.tostring())
Esempio n. 8
0
def test_arma_lfilter():
    # Tests of an ARMA model simulation against scipy.signal.lfilter
    # Note: the first elements of the generated SARIMAX datasets are based on
    # the initial state, so we don't include them in the comparisons
    np.random.seed(10239)
    nobs = 100
    eps = np.random.normal(size=nobs)

    # AR(1)
    mod = sarimax.SARIMAX([0], order=(1, 0, 0))
    actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
                          initial_state=np.zeros(mod.k_states))
    desired = lfilter([1], [1, -0.5], eps)
    assert_allclose(actual[1:], desired)

    # MA(1)
    mod = sarimax.SARIMAX([0], order=(0, 0, 1))
    actual = mod.simulate([0.5, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
                          initial_state=np.zeros(mod.k_states))
    desired = lfilter([1, 0.5], [1], eps)
    assert_allclose(actual[1:], desired)

    # ARMA(1, 1)
    mod = sarimax.SARIMAX([0], order=(1, 0, 1))
    actual = mod.simulate([0.5, 0.2, 1.], nobs + 1, state_shocks=np.r_[eps, 0],
                          initial_state=np.zeros(mod.k_states))
    desired = lfilter([1, 0.2], [1, -0.5], eps)
    assert_allclose(actual[1:], desired)
Esempio n. 9
0
    def equalLoudness(self,data):
        # TODO: Assumes 16000 sampling rate, fix!
        # Basically, save a few more sets of filter coefficients...

        # Basic equal loudness curve. 
        # This is for humans, NOT birds (there is a paper that claims to have some, but I can't access it:
        # https://doi.org/10.1121/1.428951)

        # The filter weights were obtained from Matlab (using yulewalk) for the standard 80 dB ISO curve
        # for a sampling rate of 16000

        # 10 coefficient Yule-Walker fit for [0,120;20,113;30,103;40,97;50,93;60,91;70,89;80,87;90,86;100,85;200,78;300,76;400,76;500,76;600,76;700,77;800,78;900,79.5;1000,80;1500,79;2000,77;2500,74;3000,71.5;3700,70;4000,70.5;5000,74;6000,79;7000,84;8000,86]
        # Or at least, EL80(:,1)./(fs/2) and m=10.^((70-EL80(:,2))/20);

        ay = np.array([1.0000,-0.6282, 0.2966,-0.3726,0.0021,-0.4203,0.2220,0.0061, 0.0675, 0.0578,0.0322])
        by = np.array([0.4492,-0.1435,-0.2278,-0.0142,0.0408,-0.1240,0.0410,0.1048,-0.0186,-0.0319,0.0054])

        # Butterworth highpass
        ab = np.array([1.0000,-1.9167,0.9201])
        bb = np.array([0.9592,-1.9184,0.9592])

        data = signal.lfilter(by,ay,data)
        data = signal.lfilter(bb,ab,data)

        return data
Esempio n. 10
0
    def setup(self):

        #########
        # PART 1: Make model calcium data
        #########

        # Data parameters
        RATE = 1  # mean firing rate of poisson spike train (Hz)
        STEPS = 100  # number of time steps in data
        STEPS_LONG = 5000  # number of time steps in data
        TAU = 0.6  # time constant of calcium indicator (seconds)
        DELTAT = 1 / 30  # time step duration (seconds)
        self.sigma = 0.1  # standard deviation of gaussian noise
        SEED = 2222  # random number generator seed

        # Make a poisson spike trains
        self.spikes = sima.spikes.get_poisson_spikes(deltat=DELTAT, rate=RATE, steps=STEPS, seed=SEED)

        # longer time-series for parameter estimation
        self.spikes_long = sima.spikes.get_poisson_spikes(deltat=DELTAT, rate=RATE, steps=STEPS_LONG, seed=SEED)

        # Convolve with kernel to make calcium signal
        np.random.seed(SEED)
        self.gamma = 1 - (DELTAT / TAU)
        CALCIUM = signal.lfilter([1], [1, -self.gamma], self.spikes)
        CALCIUM_LONG = signal.lfilter([1], [1, -self.gamma], self.spikes_long)

        # Make fluorescence traces with random gaussian noise and baseline
        self.fluors = CALCIUM + norm.rvs(scale=self.sigma, size=STEPS) + uniform.rvs()
        self.fluors_long = CALCIUM_LONG + norm.rvs(scale=self.sigma, size=STEPS_LONG) + uniform.rvs()
Esempio n. 11
0
def filtering_time_signal(originalSignalTime, delta, lowFreq, highFreq, filterType, order, MULTIPLE, runReversePass = False):
	NYQ, Wn, B, A, w, h = get_filter_params(delta, lowFreq, highFreq, filterType, order, MULTIPLE)
	filteredSignalTime = signal.lfilter(B, A, originalSignalTime)
	if runReversePass:
		filteredSignalTime = signal.lfilter(B, A, filteredSignalTime[::-1])
		filteredSignalTime = filteredSignalTime[::-1]
	return filteredSignalTime
Esempio n. 12
0
def bandpass(data, freqmin, freqmax, df, corners=4, zerophase=False):
    """
    Butterworth-Bandpass Filter.

    Filter data from ``freqmin`` to ``freqmax`` using ``corners`` corners.

    :param data: Data to filter, type numpy.ndarray.
    :param freqmin: Pass band low corner frequency.
    :param freqmax: Pass band high corner frequency.
    :param df: Sampling rate in Hz.
    :param corners: Filter corners / orders.
    :param zerophase: If True, apply filter once forwards and once backwards.
        This results in twice the number of corners but zero phase shift in
        the resulting filtered trace.
    :return: Filtered data.
    """
    fe = 0.5 * df
    low = freqmin / fe
    high = freqmax / fe
    # raise for some bad scenarios
    if high > 1:
        high = 1.0
        msg = "Selected high corner frequency is above Nyquist. " + \
              "Setting Nyquist as high corner."
        warnings.warn(msg)
    if low > 1:
        msg = "Selected low corner frequency is above Nyquist."
        raise ValueError(msg)
    [b, a] = iirfilter(corners, [low, high], btype='band',
                       ftype='butter', output='ba')
    if zerophase:
        firstpass = lfilter(b, a, data)
        return lfilter(b, a, firstpass[::-1])[::-1]
    else:
        return lfilter(b, a, data)
def lpcar2cc(ar):

    sh = shape(ar)
	
    if len(sh) == 1 :
		p1 = len(ar)
		nf = 1
		b = zeros(len(ar))
    else:
		p1 = len(ar[0])
		nf = len(ar)
		b = zeros(len(ar[0]))
    p = p1-1

    cm = arange(1,p+1)**(-1.)

    xm = -arange(1,p+1)

    
    b[0] = 1
    cc = []
    
    for k in xrange(nf):
        if nf > 1:
			cc += [ signal.lfilter(b,ar[k],ar[k,1:p1]*xm)*cm ]
        else:
			cc = signal.lfilter(b,ar,ar[1:p1]*xm)*cm

    return cc
Esempio n. 14
0
def sosfilt(sos, x, axis=-1, zi=None):
    """Filter data along one dimension using cascaded second-order sections"""
    from scipy.signal import lfilter
    x = np.asarray(x)

    sos = np.atleast_2d(sos)
    if sos.ndim != 2:
        raise ValueError('sos array must be 2D')

    n_sections, m = sos.shape
    if m != 6:
        raise ValueError('sos array must be shape (n_sections, 6)')

    use_zi = zi is not None
    if use_zi:
        zi = np.asarray(zi)
        x_zi_shape = list(x.shape)
        x_zi_shape[axis] = 2
        x_zi_shape = tuple([n_sections] + x_zi_shape)
        if zi.shape != x_zi_shape:
            raise ValueError('Invalid zi shape.  With axis=%r, an input with '
                             'shape %r, and an sos array with %d sections, zi '
                             'must have shape %r.' %
                             (axis, x.shape, n_sections, x_zi_shape))
        zf = np.zeros_like(zi)

    for section in range(n_sections):
        if use_zi:
            x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
                                     x, axis, zi=zi[section])
        else:
            x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
    out = (x, zf) if use_zi else x
    return out
Esempio n. 15
0
def lowpass(data, freq, df, corners=4, zerophase=False):
    """
    Butterworth-Lowpass Filter.

    Filter data removing data over certain frequency ``freq`` using ``corners``
    corners.

    :param data: Data to filter, type numpy.ndarray.
    :param freq: Filter corner frequency.
    :param df: Sampling rate in Hz.
    :param corners: Filter corners / orders.
    :param zerophase: If True, apply filter once forwards and once backwards.
        This results in twice the number of corners but zero phase shift in
        the resulting filtered trace.
    :return: Filtered data.
    """
    fe = 0.5 * df
    f = freq / fe
    # raise for some bad scenarios
    if f > 1:
        f = 1.0
        msg = "Selected corner frequency is above Nyquist. " + \
              "Setting Nyquist as high corner."
        warnings.warn(msg)
    [b, a] = iirfilter(corners, f, btype='lowpass', ftype='butter',
                       output='ba')
    if zerophase:
        firstpass = lfilter(b, a, data)
        return lfilter(b, a, firstpass[::-1])[::-1]
    else:
        return lfilter(b, a, data)
Esempio n. 16
0
def _hurst(x):
    """Estimate Hurst exponent on a timeseries.

    The estimation is based on the second order discrete derivative.

    Parameters
    ----------
    x : array, shape(n_channels, n_samples)
        The timeseries to estimate the Hurst exponent for.

    Returns
    -------
    h : float
        The estimation of the Hurst exponent for the given timeseries.
    """
    from scipy.signal import lfilter
    y = np.cumsum(np.diff(x, axis=1), axis=1)

    b1 = [1, -2, 1]
    b2 = [1,  0, -2, 0, 1]

    # second order derivative
    y1 = lfilter(b1, 1, y, axis=1)
    y1 = y1[:, len(b1) - 1:-1]  # first values contain filter artifacts

    # wider second order derivative
    y2 = lfilter(b2, 1, y, axis=1)
    y2 = y2[:, len(b2) - 1:-1]  # first values contain filter artifacts

    s1 = np.mean(y1 ** 2, axis=1)
    s2 = np.mean(y2 ** 2, axis=1)

    return 0.5 * np.log2(s2 / s1)
Esempio n. 17
0
def benchmarks():
    print "Benchmarking"

    ref = (2.0 * random.rand(131072)) - 1.0
    h = signal.firwin(8191, 0.4)
    expected = signal.lfilter(h, 1, ref)
    savetxt("test_coeffs.txt", h)

    #fir mono benchmark
    writeaudio(ref)
    os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
    compareaudio(expected, readaudio(), 1e-5)

    #fir stereo benchmark
    writeaudio(transpose([ref,-ref]))
    os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p fir,h=test_coeffs.txt")
    compareaudio(transpose([expected, -expected]), readaudio(), 1e-5)

    os.remove('test_coeffs.txt')

    #iir stereo benchmark
    writeaudio(transpose([ref,-ref]))
    b, a = signal.butter(2, 100.0/24000, 'high')
    expected = signal.lfilter(b,a,ref*10**(-6.0/20))
    os.system("../file-qdsp -n 256 -i test_in.wav -o test_out.wav -p iir,hp2,f=100,q=0.7071,g=-6")
    compareaudio(transpose([expected, -expected]), readaudio(), 1e-5)
Esempio n. 18
0
    def do_stft(arr):

        if notch_filter:
            arr = signal.lfilter(b1, a1, arr)
            arr = signal.lfilter(b2, a2, arr)
            if len(arr) < box_width:
                raise ValueError(
                    "The buffer_size used by the connector should "
                    "be higher than box_width. buffer_size = "
                    "%s | box_width = %s" % (len(arr), box_width)
                )

        out = stft(
            arr[:, np.newaxis],
            box_width=box_width,
            step=downsampling_factor,
            pad_width=0,
            kaiser_beta=kaiser_beta,
            include_phase=False,
            log_mag=True,
        )

        fftfreq = np.fft.rfftfreq(box_width, d=1 / float(sfreq))
        good = np.logical_and(fftfreq >= low_f, fftfreq <= high_f)
        out = out[:, good].mean(axis=1)

        return out
Esempio n. 19
0
File: filt.py Progetto: ctw/ptsa_new
def filtfilt(b,a,x):
    #For now only accepting 1d arrays
    ntaps=max(len(a),len(b))
    edge=ntaps*3

    if x.ndim != 1:
        raise ValueError("Filtflit is only accepting 1 dimension arrays.")

    #x must be bigger than edge
    if x.size < edge:
        raise ValueError("Input vector needs to be bigger than 3 * max(len(a),len(b).")


    if len(a)!=len(b):
        b=r_[b,zeros(len(a)-len(b))]


    zi=lfilter_zi(b,a)

    #Grow the signal to have edges for stabilizing 
    #the filter with inverted replicas of the signal
    s=r_[2*x[0]-x[edge:1:-1],x,2*x[-1]-x[-1:-edge:-1]]
    #in the case of one go we only need one of the extrems 
    # both are needed for filtfilt

    (y,zf)=lfilter(b,a,s,-1,zi*s[0])

    (y,zf)=lfilter(b,a,flipud(y),-1,zi*y[-1])

    return flipud(y[edge-1:-edge+1])
Esempio n. 20
0
 def test_empty_zi(self):
     """Regression test for #880: empty array for zi crashes."""
     a = np.ones(1).astype(self.dt)
     b = np.ones(1).astype(self.dt)
     x = np.arange(5).astype(self.dt)
     zi = np.ones(0).astype(self.dt)
     lfilter(b, a, x, zi=zi)
Esempio n. 21
0
def Leq1s(block, CAL_VALUE_A, CAL_VALUE_C):

    sample = numpy.fromstring(block, dtype=numpy.int16)
    dBA = lfilter(Ba, Aa, sample)
    dBC = lfilter(Bc, Ac, sample)

#iterate over the block.
#sum_squares = 0.0
#for sample in filteredsample:
#sample is a signed short in +/- 32768.
#normalize it to 1.0

    dataA = numpy.array(dBA, dtype=float)*SHORT_NORMALIZE
    sumA = numpy.sum(dataA ** 2.0) / len(dataA)

    dataC = numpy.array(dBC, dtype=float)*SHORT_NORMALIZE
    sumC = numpy.sum(dataC ** 2.0) / len(dataC)

# n = sample * SHORT_NORMALIZE
# sum_squares += n*n
# ms = sum_squares/len(block)
# math.sqrt( sum_squares / count )

    LeqC=(10.0 * math.log(sumC, 10.0)) + trim + CAL_VALUE_C
    LeqA=(10.0 * math.log(sumA, 10.0)) + trim + CAL_VALUE_A

    return (LeqA,LeqC)
Esempio n. 22
0
	def ddc(self):
		# Digital downconverter that converts ADC data to IQ data
		if self.iq_flag =='1':
			#Generate quadrature signals
			IFfreq = self.dformat.ifcenterfrequency
			size = len(self.ADC)
			sampleperiod = 1.0/self.dformat.timesamplerate
			xaxis = np.linspace(0,size*sampleperiod,size)
			LO_I = np.sin(IFfreq*(2*np.pi)*xaxis)
			LO_Q = np.cos(IFfreq*(2*np.pi)*xaxis)
			del(xaxis)

			#Run ADC data through digital downconverter
			I = self.ADC*LO_I
			Q = self.ADC*LO_Q
			del(LO_I)
			del(LO_Q)
			nyquist = self.dformat.timesamplerate/2
			cutoff = 40e6/nyquist
			IQfilter = signal.firwin(32, cutoff, window=('kaiser', 2.23))
			I = signal.lfilter(IQfilter, 1.0, I)
			Q = signal.lfilter(IQfilter, 1.0, Q)
			IQ = I + 1j*Q
			IQ = 2*IQ
			self.IQ = IQ
Esempio n. 23
0
    def runFilter(self, new_data, padding=True):
        '''

        :param new_data: New data to be filtered
        :return: the filtered data of the same length as the input data
        '''
        new_data = new_data.strip('[]')
        new_data = [float(i) for i in new_data.split(',') if i is not '']
        try:
            if len(new_data) > len(self.fir_coeff):
                if padding is True:
                    #TODO: The two lines below need to be tested
                    new_data = np.concatenate((self.data, new_data))  # Filter history
                    self.data = new_data[-len(self.fir_coeff):] #Only save the last bit of data to give filter history

                filtered_signal = signal.lfilter(self.fir_coeff, 1.0, new_data)
                return filtered_signal
                #return filtered_signal[len(self.fir_coeff):]
            else:
                self.data = np.roll(self.data, -len(new_data))  #Shift old data out
                self.data[-len(new_data):] = new_data     #Shift in new data
                filtered_signal = signal.lfilter(self.fir_coeff, 1.0, self.data)
                return filtered_signal[-len(new_data):]
        except Exception as e:
            print e
            return {'error':'??'}
Esempio n. 24
0
    def process(self, data):
        """Applies the filter to the input.

        Parameters
        ----------
        data : ndarray, shape (n_channels, n_samples)
            Input signals.
        """
        if data.ndim != 2:
            raise ValueError("data must be 2-dimensional.")

        if self._x_prev is None:
            # first pass has no initial conditions
            out = signal.lfilter(self.b, self.a, data, axis=-1)
        else:
            # subsequent passes get ICs from previous input/output
            num_ch = data.shape[0]
            K = max(len(self.a)-1, len(self.b)-1)
            self._zi = np.zeros((num_ch, K))

            # unfortunately we have to get zi channel by channel
            for c in range(data.shape[0]):
                self._zi[c, :] = signal.lfiltic(
                    self.b,
                    self.a,
                    self._y_prev[c, -(self.overlap+1)::-1],
                    self._x_prev[c, -(self.overlap+1)::-1])

            out, zf = signal.lfilter(self.b, self.a, data, axis=-1,
                                     zi=self._zi)

        self._x_prev = data
        self._y_prev = out

        return out
Esempio n. 25
0
def low_filter(sig_slice):
    from scipy.signal import lfilter, lfilter_zi, filtfilt, butter
    b, a = butter(3, 0.5)
    zi = lfilter_zi(b, a)
    z, _ = lfilter(b, a, sig_slice, zi=zi*sig_slice[0])
    z2, _ = lfilter(b, a, z, zi=zi*z[0])
    return(filtfilt(b, a, sig_slice))
Esempio n. 26
0
def process_audio(indata):
	global test_mode

	if test_mode > 0:
		outputf = np.empty(32768 * 2, dtype = np.float32)
		for i in range(0, 32768):
			outputf[i * 2] = np.cos((i + test_mode) / (freq_hz / 4.0 / 10000)) 
			outputf[(i * 2) + 1] = np.cos((i + test_mode) / (freq_hz / 4.0 / 10000)) 

		outputf *= 50000
	
		test_mode += 32768 
		return outputf, 32768 

#	print(len(indata), len(audiorf_filter_b * 2), len(leftbp_filter_b) * 1)

	in_filt = sps.lfilter(audiorf_filter_b, audiorf_filter_a, indata)[len(audiorf_filter_b) * 2:]

	in_filt4 = np.empty(int(len(in_filt) / 4) + 1)

	for i in range(0, len(in_filt), 4):
		in_filt4[int(i / 4)] = in_filt[i]

	in_left = sps.lfilter(leftbp_filter_b, leftbp_filter_a, in_filt4)[len(leftbp_filter_b) * 1:] 
	in_right = sps.lfilter(rightbp_filter_b, rightbp_filter_a, in_filt4)[len(rightbp_filter_b) * 1:] 

#	if (len(in_left) % 2):
#		in_left = in_left[0:len(in_left - 1)]
#	if (len(in_right) % 2):
#		in_right = in_right[0:len(in_right - 1)]

#	print len(in_left)

	out_left = fm_decode(in_left, freq_hz / 4)
	out_right = fm_decode(in_right, freq_hz / 4)

	out_left = np.clip(out_left - left_audfreqm, -150000, 150000) 
	out_right = np.clip(out_right - right_audfreqm, -150000, 150000) 

	out_left = sps.lfilter(audiolp_filter_b, audiolp_filter_a, out_left)[800:]
	out_right = sps.lfilter(audiolp_filter_b, audiolp_filter_a, out_right)[800:] 

	outputf = np.empty((len(out_left) * 2.0 / 20.0) + 2, dtype = np.float32)

	tot = 0
	for i in range(0, len(out_left), 20):
		outputf[tot * 2] = out_left[i]
		outputf[(tot * 2) + 1] = out_right[i]
		tot = tot + 1

#	exit()
	
	return outputf[0:tot * 2], tot * 20 * 4 

	plt.plot(range(0, len(out_left)), out_left)
#	plt.plot(range(0, len(out_leftl)), out_leftl)
	plt.plot(range(0, len(out_right)), out_right + 150000)
#	plt.ylim([2000000,3000000])
	plt.show()
	exit()
    def preem(data, a10, weight0, a11, weight1):
        a, b = [1, a10], [-a10, a10]
        temp = weight0 * signal.lfilter(b, a, data, axis=-1, zi=None)
        a, b = [1, a11], [-a11, a11]
        temp += weight1 * signal.lfilter(b, a, data, axis=-1, zi=None)

        return temp
Esempio n. 28
0
def PolyphaseDecimate(filt,inputData,mixValues,filtState,decRate):
    
  # Decompose the input and the filter
  polyFilt = np.reshape(filt,[decRate, -1],order='F');
  polyFilt = np.flipud(polyFilt);
  polyInput = np.reshape(inputData,[decRate,-1],order='F');
  # Pre-allocate the array
  tmp = np.zeros(shape=(decRate,len(inputData)/decRate), dtype=np.complex64);

  # Perform the mixing (only if necessary)
  if len(mixValues) > 0:
    polyMix = np.reshape(mixValues,[decRate,-1],order='F');
    polyInput = polyInput * polyMix;
  
  # Perform the filtering - there are two ways out of the function
  if np.size(filtState) == 0:
    # A filter state was not passed in, ignore tracking states
    for ndx in range(decRate):
      tmp[ndx,:] = lfilter(polyFilt[ndx,:],1,polyInput[ndx,:]);
    return np.sum(tmp,axis=0);
  else:
    # A filter state was passed in. Supply it to the filter routine and pass back the updated state
    for ndx in range(decRate):
      (tmp[ndx,:],filtState[ndx,:]) = lfilter(polyFilt[ndx,:],1,polyInput[ndx,:],zi=filtState[ndx,:]);
    return (np.sum(tmp,axis=0),filtState);
Esempio n. 29
0
    def filter(self):
        path = os.getcwd()+'/trialGraspEventDetection_dataFiles'
        self.Fgr = np.sum(self.values[:, 9:15], axis=1)  # SAI
        self.Fgl = np.sum(self.values[:, 0:7], axis=1)  # SAI

        # can use this to plot in matlab graspeventdetection_plot.m
        np.savetxt(path+'/SAI_Fgr.txt', self.Fgr)
        # can use this to plot in matlab
        np.savetxt(path+'/SAI_Fgl.txt', self.Fgl)

        # 0.55*pi rad/samples
        b1, a1 = signal.butter(1, 0.55, 'high', analog=False)
        self.f_acc_x = signal.lfilter(b1, a1, self.acc_x, axis=-1, zi=None)
        self.f_acc_y = signal.lfilter(b1, a1, self.acc_y, axis=-1, zi=None)
        self.f_acc_z = signal.lfilter(b1, a1, self.acc_z, axis=-1, zi=None)
        # self.f_eff = signal.lfilter(b1, a1, self.eff, axis=-1, zi=None)
        # type(eff)
        self.FAII = np.sqrt(np.square(self.f_acc_x) +
                            np.square(self.f_acc_y) +
                            np.square(self.f_acc_z))
        # can use this to plot in matlab
        np.savetxt(path+'/FAII.txt', self.FAII)

        # subtract base values from the values array
        self.values1 = self.values - self.values.min(axis=0)
        # pass the filter for each sensor
        self.fvalues1 = np.zeros(self.values1.shape)
        # 0.48*pi rad/samples
        b, a = signal.butter(1, 0.48, 'high', analog=False)
        for i in range(16):
            self.fvalues1[:, i] = signal.lfilter(b, a, self.values1[:, i],
                                                 axis=-1, zi=None)
        self.FAI = np.sum(self.fvalues1, axis=1)
        # can use this to plot in matlab
        np.savetxt(path+'/FAI.txt', self.FAI)
Esempio n. 30
0
File: inner.py Progetto: Lx37/pambox
    def filter(self, x):
        """Filters a signal along its last dimension.

        Parameters
        ----------
        x : ndarray
            Signal to filter.

        Returns
        -------
        ndarray
            Filtered signals
        """

        a0, a11, a12, a13, a14, a2 = self.a0, self.a11, self.a12, self.a13, \
            self.a14, self.a2
        b0, b1, b2, gain = self.b0, self.b1, self.b2, self.gain

        output = np.zeros((gain.shape[0], x.shape[-1]))
        for chan in range(gain.shape[0]):
            y1 = ss.lfilter([a0[chan] / gain[chan], a11[chan] / gain[chan],
                             a2[chan] / gain[chan]],
                            [b0[chan], b1[chan], b2[chan]], x)
            y2 = ss.lfilter([a0[chan], a12[chan], a2[chan]],
                            [b0[chan], b1[chan], b2[chan]], y1)
            y3 = ss.lfilter([a0[chan], a13[chan], a2[chan]],
                            [b0[chan], b1[chan], b2[chan]], y2)
            y4 = ss.lfilter([a0[chan], a14[chan], a2[chan]],
                            [b0[chan], b1[chan], b2[chan]], y3)
            output[chan, :] = y4

        return output
Esempio n. 31
0
def A_weight(signal, samplerate):
    """Return the given signal after passing through an A-weighting filter
    
    """
    B, A = A_weighting(samplerate)
    return lfilter(B, A, signal)
Esempio n. 32
0
def butter_lowpasspass_filter(dat, lowcut, highcut, fs, order=5):
    b, a = butter_lowpass(lowcut, highcut, fs, order=order)
    y = lfilter(b, a, dat)
    return y
Esempio n. 33
0
thetao = [-1.2, 0.36, 0.5, 0.4, 0.2, -0.3, 0.8, -0.1]
#Generate the experiment
#The true system is generates by the following relation:
# S: y(t) = Go(q)*u(t) + Ho(q)*e(t),
#with u(t) the input and e white noise.
#Number of Samples
N = 200
#Take u as uniform
u = -1 + 2 * rand(N, nu)
#Generate gaussian white noise with standat deviation 0.01
e = 0.01 * randn(N, ny)
#Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
#Calculate the y through S (ARX: G(q) = B(q)/A(q) and H(q) = 1/A(q))
y1 = zeros((N, 1))
y2 = zeros((N, 1))
v1 = lfilter(C1o, [1], e[:, 0:1], axis=0)
v2 = lfilter(C2o, [1], e[:, 1:2], axis=0)
#Simulate the true process
for i in range(2, N):
    y1[i] = -dot(A1o[1:3], y1[i - 2:i][::-1]) - dot(
        A12o[1:3], y2[i - 2:i][::-1]) + dot(B11o[1:3],
                                            u[i - 2:i, 0][::-1]) + dot(
                                                B12o[1:3], u[i - 2:i, 1][::-1])
    y2[i] = -dot(A21o[1:3], y1[i - 2:i][::-1]) - dot(
        A2o[1:3], y2[i - 2:i][::-1]) + dot(B21o[1:3],
                                           u[i - 2:i, 0][::-1]) + dot(
                                               B22o[1:3], u[i - 2:i, 1][::-1])
y = concatenate((y1 + v1, y2 + v2), axis=1)
#Estimate the model and get only the parameters
A, B, C = armax(na, nb, nc, nk, u, y)
Esempio n. 34
0
def butter_lowpass_filter(data, cutoff, fs, order=5):
    b, a = butter_lowpass(cutoff, fs, order=order)
    y = lfilter(b, a, data)
    return y
Esempio n. 35
0
        #   Aranges time vector
        t = np.arange(len(dat_file['Motor2 Thrust (Nm)'])) * dt
        # generates the filter coefficients for a 4th order low pass Butterworth filter
        b, a = butter(4, 10 / (fs / 2), 'lp')

        #   Repeats all the previous calculations for the upper rotor
        rpm_1 = (np.diff(
            np.squeeze(np.where(np.diff(dat_file['Motor1 RPM']) == 1))) / fs /
                 60)**-1
        t_rpm_1 = t[np.squeeze(np.where(np.diff(dat_file['Motor1 RPM']) == 1))]
        rpm_avg_1[i] = np.mean(
            rpm_1[bisect.bisect(t_rpm_1, t_min):bisect.bisect(t_rpm_1, t_max)])
        N_rev_1 = len(
            rpm_1[bisect.bisect(t_rpm_1, t_min):bisect.bisect(t_rpm_1, t_max)])

        T_filt_1 = lfilter(b, a, dat_file['Motor1 Thrust (N)'])
        T_avg_1[i] = np.mean(
            dat_file['Motor1 Thrust (N)'][int(t_min * fs):int(t_max * fs)])
        T_err_1[i] = 1.96 * np.std(
            dat_file['Motor1 Thrust (N)'][int(t_min *
                                              fs):int(t_max *
                                                      fs)]) / np.sqrt(N_rev_1)

        Q_filt_1 = lfilter(b, a, dat_file['Motor1 Torque (Nm)'])
        Q_avg_1[i] = np.mean(
            dat_file['Motor1 Torque (Nm)'][int(t_min * fs):int(t_max * fs)])
        Q_err_1[i] = 1.96 * np.std(
            dat_file['Motor1 Torque (Nm)'][int(t_min *
                                               fs):int(t_max *
                                                       fs)]) / np.sqrt(N_rev_1)
Esempio n. 36
0
#######################################3
#Filter
order = 5
sampling_freq = 20
cutoff_freq = 2
sampling_duration = 5

normalized_cutoff_freq = 2*cutoff_freq/sampling_freq

numerator_coeff, denominator_coeff = signal.butter(sampling_duration, normalized_cutoff_freq)

#sos = signal.ellip(2, .5, 10, 0.5, output='sos')
#filtered_x = signal.sosfilt(sos, x_axis)

filtered_x = signal.lfilter(numerator_coeff, denominator_coeff, x_axis)
filtered_y = signal.lfilter(numerator_coeff, denominator_coeff, y_axis)
filtered_z = signal.lfilter(numerator_coeff, denominator_coeff, z_axis)
filtered_x0 =signal.lfilter(numerator_coeff, denominator_coeff, x0_axis)
filtered_y0 =signal.lfilter(numerator_coeff, denominator_coeff, y0_axis)
filtered_z0 =signal.lfilter(numerator_coeff, denominator_coeff, z0_axis)



#plotting code
plt.figure(1)
plt.subplot(311)
plt.ylim(.0535, .056)
plt.scatter(time_stamp, x_axis, 2)
plt.plot(time_stamp, filtered_x)
plt.tight_layout()
Esempio n. 37
0
def butter_bandpass_filter(data, lowcut, highcut, fs, order=5):
    b, a = butter_bandpass(lowcut, highcut, fs, order=order)
    y = lfilter(b, a, data)
    return y
Esempio n. 38
0
            samps_ndx + 1), 'to', str(samps_ndx + window_samps)

        #4 level loop - #TODO: make N level loop...
        cD_sum = []
        max_decimation = 2**(levels - 1)
        for loop in range(0, levels):
            cD = []
            # 1) DWT
            if loop == 0:
                [cA, cD] = pywt.dwt(data, 'db4')
                cD_minlen = len(cD) / max_decimation + 1
                cD_sum = numpy.zeros(cD_minlen)
            else:
                [cA, cD] = pywt.dwt(cA, 'db4')
            # 2) Filter
            cD = signal.lfilter([0.01], [1 - 0.99], cD)

            # 4) Subtract out the mean.

            # 5) Decimate for reconstruction later.
            cD = abs(cD[::(2**(levels - loop - 1))])

            cD = cD - numpy.mean(cD)
            # 6) Recombine the signal before ACF
            #    essentially, each level I concatenate
            #    the detail coefs (i.e. the HPF values)
            #    to the beginning of the array
            cD_sum = cD[0:cD_minlen] + cD_sum

        cA = signal.lfilter([0.01], [1 - 0.99], cA)
        cA = abs(cA)
Esempio n. 39
0
def process(subject):
    """
        Given a subject index, filter each clip, calculate covariance matrices, 
        calculate top-3 filter sets, and save processed data to pickle.
    """
    dn = './data/clips/%s/' % inlabels[subject]
    fns = [fn for fn in os.listdir(dn) if '.mat' in fn]

    allcovs = []
    labels = []

    print dn
    # For each clip, resample to 400Hz, apply each filter, calculate and normalize covariance.
    for fn in fns:
        covs = []
        m = loadmat(dn + fn)
        d = m['data']
        d = resample(d, 400, axis=1)
        if 'inter' in fn:
            l = 0
        elif '_ictal' in fn:
            l = 1
        else:
            l = -1

        labels.append(l)

        for b, a in filters:
            f = lfilter(b, a, d)
            c = cov(f)
            c = (c - c.mean()) / c.std()
            covs.append(c)
        allcovs.append(covs)
    allcovs = array(allcovs)
    labels = array(labels)

    # For each filter combination, test prediction quality by CV of logistic regression.
    scores = []
    for w in wis:
        y = labels[labels != -1]
        X = allcovs[labels != -1]
        X = X[:, w, ::2, ::2]
        X = X.reshape((X.shape[0], -1))

        ps = []
        test_size = 0.25
        for tri, tei in sklearn.cross_validation.ShuffleSplit(
                X.shape[0], n_iter=15, test_size=test_size, random_state=42):
            X_train = X[tri]
            X_test = X[tei]
            y_train = y[tri]
            y_test = y[tei]

            clf = sklearn.linear_model.SGDClassifier(loss='log',
                                                     penalty='l1',
                                                     alpha=0.0001)
            clf.fit(X_train, y_train)
            p = clf.predict_proba(X_test)[:, 1]
            cv = sklearn.metrics.roc_auc_score(y_test, p)
            ps.append(cv)
        ps = array(ps)
        scores.append(ps.mean())

    # Select 3 best filter sets and save processed features and labels to pickle.
    best = sorted(zip(scores, wis))[-3:]
    sets = 'ABC'
    i = 0
    for cv, w in best:
        print outlabels[subject], cv, w
        y = labels
        X = allcovs
        X = X[:, w, :, :]
        d = {'y': y, 'covs': X, 'w': w, 'cv': cv, 'fns': fns}
        var = open(
            './data/cov_opt_%s_%s.pickle' % (outlabels[subject], sets[i]), 'w')
        pickle.dump(d, var)
        i += 1
Esempio n. 40
0
def smooth1D(x, y, window=10, method='loess', weighting='tri-cubic'):
    """
    Performs fast smoothing of evenly spaced data using moving loess, lowess or average 
    filters.  
    
    References:
        [1] Bowman and Azzalini "Applied Smoothing Techniques for Data Analysis" 
        Oxford Science Publications, 1997.
    
    Args: 
        x: Uniformly spaced feature vector (eg mz or drift time). 
        y: Array of intensities. Smmothing is computed on flattened array of 
            intensities.
        method: Smoothing method {'lowess','loess',or 'average'}, by default 'loess'.
        window: Frame length for sliding window [10 data points, by default].
        weighting: Weighting scheme for smoothing {'tricubic' (default), 'gaussian' or 'linear'}.
             
    Returns:
        yhat: Smoothed signal.
    """

    from scipy import signal
    from scipy import linalg

    leny = len(y)
    halfw = np.floor((window / 2.))
    window = int(2. * halfw + 1.)
    x1 = np.arange(1. - halfw, (halfw - 1.) + 1)

    if weighting == 'tri-cubic':
        weight = (1. - np.divide(np.abs(x1), halfw)**3.)**1.5
    elif weighting == 'gaussian':
        weight = np.exp(-(np.divide(x1, halfw) * 2.)**2.)
    elif weighting == 'linear':
        weight = 1. - np.divide(np.abs(x1), halfw)

    if method == 'loess':
        V = (np.vstack((np.hstack(weight), np.hstack(weight * x1),
                        np.hstack(weight * x1 * x1)))).transpose()
        order = 2
    elif method == 'lowess':
        V = (np.vstack((np.hstack((weight)), np.hstack(
            (weight * x1))))).transpose()
        order = 1
    elif method == 'average':
        V = weight.transpose()
        order = 0

    #% Do QR decomposition
    [Q, R] = linalg.qr(V, mode='economic')

    halfw = halfw.astype(int)
    alpha = np.dot(Q[halfw - 1, ], Q.transpose())

    yhat = signal.lfilter(alpha * weight, 1, y)
    yhat[int(halfw + 1) - 1:-halfw] = yhat[int(window - 1) - 1:-1]

    x1 = np.arange(1., (window - 1.) + 1)
    if method == 'loess':
        V = (np.vstack((np.hstack(np.ones([1, window - 1])), np.hstack(x1),
                        np.hstack(x1 * x1)))).transpose()
    elif method == 'lowess':
        V = (np.vstack(
            (np.hstack(np.ones([1, window - 1])), np.hstack(x1)))).transpose()
    elif method == 'average':
        V = np.ones([window - 1, 1])

    for j in np.arange(1, (halfw) + 1):
        #% Compute weights based on deviations from the jth point,
        if weighting == 'tri-cubic':
            weight = (1. - np.divide(np.abs(
                (np.arange(1, window) - j)), window - j)**3.)**1.5
        elif weighting == 'gaussian':
            weight = np.exp(-(np.divide(np.abs(
                (np.arange(1, window) - j)), window - j) * 2.)**2.)
        elif method == 'linear':
            weight = 1. - np.divide(np.abs(np.arange(1, window) - j),
                                    window - j)

        W = (np.kron(np.ones((order + 1, 1)), weight)).transpose()
        [Q, R] = linalg.qr(V * W, mode='economic')

        alpha = np.dot(Q[j - 1, ], Q.transpose())
        alpha = alpha * weight
        yhat[int(j) - 1] = np.dot(alpha, y[:int(window) - 1])
        yhat[int(-j)] = np.dot(
            alpha, y[np.arange(leny - 1, leny - window, -1, dtype=int)])

    return yhat
Esempio n. 41
0
def eigval(datax, datay, dataz, fk, normf=1):
    """
    Polarization attributes of a signal.

    Computes the rectilinearity, the planarity and the eigenvalues of the given
    data which can be windowed or not.
    The time derivatives are calculated by central differences and the
    parameter ``fk`` describes the coefficients of the used polynomial. The
    values of ``fk`` depend on the order of the derivative you want to
    calculate. If you do not want to use derivatives you can simply
    use [1, 1, 1, 1, 1] for ``fk``.

    The algorithm is mainly based on the paper by [Jurkevics1988]_. The rest is
    just the numerical differentiation by central differences (carried out by
    the routine :func:`scipy.signal.lfilter(data, 1, fk)`).

    :type datax: :class:`~numpy.ndarray`
    :param datax: Data of x component.
    :type datay: :class:`~numpy.ndarray`
    :param datay: Data of y component.
    :type dataz: :class:`~numpy.ndarray`
    :param dataz: Data of z component.
    :type fk: list
    :param fk: Coefficients of polynomial used for calculating the time
        derivatives.
    :param normf: Factor for normalization.
    :return: **leigenv1, leigenv2, leigenv3, rect, plan, dleigenv, drect,
        dplan** - Smallest eigenvalue, Intermediate eigenvalue, Largest
        eigenvalue, Rectilinearity, Planarity, Time derivative of eigenvalues,
        time derivative of rectilinearity, Time derivative of planarity.
    """
    covmat = np.zeros([3, 3])
    leigenv1 = np.zeros(datax.shape[0], dtype='float64')
    leigenv2 = np.zeros(datax.shape[0], dtype='float64')
    leigenv3 = np.zeros(datax.shape[0], dtype='float64')
    dleigenv = np.zeros([datax.shape[0], 3], dtype='float64')
    rect = np.zeros(datax.shape[0], dtype='float64')
    plan = np.zeros(datax.shape[0], dtype='float64')
    i = 0
    for i in xrange(datax.shape[0]):
        covmat[0][0] = np.cov(datax[i, :], rowvar=False)
        covmat[0][1] = covmat[1][0] = np.cov(datax[i, :],
                                             datay[i, :],
                                             rowvar=False)[0, 1]
        covmat[0][2] = covmat[2][0] = np.cov(datax[i, :],
                                             dataz[i, :],
                                             rowvar=False)[0, 1]
        covmat[1][1] = np.cov(datay[i, :], rowvar=False)
        covmat[1][2] = covmat[2][1] = np.cov(dataz[i, :],
                                             datay[i, :],
                                             rowvar=False)[0, 1]
        covmat[2][2] = np.cov(dataz[i, :], rowvar=False)
        _eigvec, eigenval, _v = (np.linalg.svd(covmat))
        eigenv = np.sort(eigenval)
        leigenv1[i] = eigenv[0]
        leigenv2[i] = eigenv[1]
        leigenv3[i] = eigenv[2]
        rect[i] = 1 - ((eigenv[1] + eigenv[0]) / (2 * eigenv[2]))
        plan[i] = 1 - ((2 * eigenv[0]) / (eigenv[1] + eigenv[2]))
    leigenv1 = leigenv1 / normf
    leigenv2 = leigenv2 / normf
    leigenv3 = leigenv3 / normf

    leigenv1_add = np.append(
        np.append([leigenv1[0]] * (np.size(fk) // 2), leigenv1),
        [leigenv1[np.size(leigenv1) - 1]] * (np.size(fk) // 2))
    dleigenv1 = signal.lfilter(fk, 1, leigenv1_add)
    dleigenv[:, 0] = dleigenv1[len(fk) - 1:]
    #dleigenv1 = dleigenv1[np.size(fk) // 2:(np.size(dleigenv1) - np.size(fk) /
    #        2)]

    leigenv2_add = np.append(
        np.append([leigenv2[0]] * (np.size(fk) // 2), leigenv2),
        [leigenv2[np.size(leigenv2) - 1]] * (np.size(fk) // 2))
    dleigenv2 = signal.lfilter(fk, 1, leigenv2_add)
    dleigenv[:, 1] = dleigenv2[len(fk) - 1:]
    #dleigenv2 = dleigenv2[np.size(fk) // 2:(np.size(dleigenv2) - np.size(fk) /
    #        2)]

    leigenv3_add = np.append(
        np.append([leigenv3[0]] * (np.size(fk) // 2), leigenv3),
        [leigenv3[np.size(leigenv3) - 1]] * (np.size(fk) // 2))
    dleigenv3 = signal.lfilter(fk, 1, leigenv3_add)
    dleigenv[:, 2] = dleigenv3[len(fk) - 1:]
    #dleigenv3 = dleigenv3[np.size(fk) // 2:(np.size(dleigenv3) - np.size(fk) /
    #        2)]

    rect_add = np.append(np.append([rect[0]] * (np.size(fk) // 2), rect),
                         [rect[np.size(rect) - 1]] * (np.size(fk) // 2))
    drect = signal.lfilter(fk, 1, rect_add)
    drect = drect[len(fk) - 1:]
    #drect = drect[np.size(fk) // 2:(np.size(drect3) - np.size(fk) // 2)]

    plan_add = np.append(np.append([plan[0]] * (np.size(fk) // 2), plan),
                         [plan[np.size(plan) - 1]] * (np.size(fk) // 2))
    dplan = signal.lfilter(fk, 1, plan_add)
    dplan = dplan[len(fk) - 1:]
    #dplan = dplan[np.size(fk) // 2:(np.size(dplan) - np.size(fk) // 2)]

    return leigenv1, leigenv2, leigenv3, rect, plan, dleigenv, drect, dplan
Esempio n. 42
0
def notch_filter(data, f0, fs, Q=30):
    b, a = notch(f0, fs, Q=Q)
    y = lfilter(b, a, data)
    return y
Esempio n. 43
0
def baseline(sample_path):
    record_0 = wfdb.rdrecord(sample_path,
                             sampfrom=0,
                             physical=False,
                             channels=[
                                 0,
                             ])
    record_1 = wfdb.rdrecord(sample_path,
                             sampfrom=0,
                             physical=False,
                             channels=[
                                 1,
                             ])
    ecg_0 = record_0.d_signal  #导联I
    ecg_1 = record_1.d_signal  #导联II
    #导联I滤波
    ecg_2 = ecg_0.T
    resampled_ecg = signal.medfilt(ecg_2, [1, 41])
    resampled_ecg = signal.medfilt(resampled_ecg, [1, 121])
    ecg_3 = ecg_2 - resampled_ecg
    b, a = butterBandPassFilter(1, 50, 200, order=5)
    ecg_3 = signal.lfilter(b, a, ecg_3)
    ecg_3 = pd.DataFrame(ecg_3.T)
    ecg_3 = ecg_3.iloc[:, 0]
    ecg0 = np.array(ecg_3)
    #导联II滤波
    ecg_4 = ecg_1.T
    resampled_ecg = signal.medfilt(ecg_4, [1, 41])
    resampled_ecg = signal.medfilt(resampled_ecg, [1, 121])
    ecg_5 = ecg_4 - resampled_ecg
    b, a = butterBandPassFilter(1, 50, 200, order=5)
    ecg_5 = signal.lfilter(b, a, ecg_5)
    ecg_5 = pd.DataFrame(ecg_5.T)
    ecg_5 = ecg_5.iloc[:, 0]
    ecg1 = np.array(ecg_5)
    #标签
    signal_annotation = wfdb.rdann(sample_path, "atr")
    #直接读取R峰位置
    peak_time = signal_annotation.sample
    if peak_time[-1] > (len(ecg0) - 1):
        peak_time[-1] = len(ecg0) - 1
    #切割为单拍
    test_ecg1, de = danpai(peak_time, ecg0)
    test_ecg2, de = danpai(peak_time, ecg1)
    #处理RR间隔
    rr1 = list([])
    for i in range(len(peak_time) - 1):
        rr = peak_time[i + 1] - peak_time[i]
        rr1.append(rr)
    rr2 = np.array(rr1)
    rr3 = rr2.reshape(len(rr2), 1)
    rr_mean = np.zeros(shape=(1, 2))
    if de == 0:
        RR_mean = np.zeros(shape=(len(rr2) + 1, 1))
        for i in range(len(rr2) - 1):
            rr_mean[0, 0] = rr3[i, 0]
            rr_mean[0, 1] = rr3[i + 1, 0]
            RR_mean[i + 1, 0] = np.mean(rr_mean)
        RR_mean[0, 0] = rr2[0]
        RR_mean[-1, 0] = rr2[-1]
        RR_mean1 = RR_mean
    elif len(de) == 1:
        if de[0] == 0:
            RR_mean = np.zeros(shape=(len(rr2), 1))
            for i in range(len(rr2) - 1):
                rr_mean[0, 0] = rr3[i, 0]
                rr_mean[0, 1] = rr3[i + 1, 0]
                RR_mean[i + 1, 0] = np.mean(rr_mean)
            RR_mean[0, 0] = rr2[0]
            RR_mean1 = RR_mean
        else:
            RR_mean = np.zeros(shape=(len(rr2), 1))
            for i in range(len(rr2) - 1):
                rr_mean[0, 0] = rr3[i, 0]
                rr_mean[0, 1] = rr3[i + 1, 0]
                RR_mean[i, 0] = np.mean(rr_mean)
            RR_mean[-1, 0] = rr2[-1]
            RR_mean1 = RR_mean
    elif len(de) == 2:
        RR_mean = np.zeros(shape=(len(rr2) - 1, 1))
        for i in range(len(rr2) - 1):
            rr_mean[0, 0] = rr3[i, 0]
            rr_mean[0, 1] = rr3[i + 1, 0]
            RR_mean[i, 0] = np.mean(rr_mean)
        RR_mean1 = RR_mean
    elif len(de) == 3:
        RR_mean = np.zeros(shape=(len(rr2) - 1, 1))
        for i in range(len(rr2) - 1):
            rr_mean[0, 0] = rr3[i, 0]
            rr_mean[0, 1] = rr3[i + 1, 0]
            RR_mean[i, 0] = np.mean(rr_mean)
        if de[1] == 1:
            RR_mean1 = np.delete(RR_mean, [0], axis=0)  #去掉第一拍
        else:
            RR_mean1 = np.delete(RR_mean, (len(RR_mean) - 1), axis=0)  #去掉最后一拍
    elif len(de) == 4:
        RR_mean = np.zeros(shape=(len(rr2) - 1, 1))
        for i in range(len(rr2) - 1):
            rr_mean[0, 0] = rr3[i, 0]
            rr_mean[0, 1] = rr3[i + 1, 0]
            RR_mean[i, 0] = np.mean(rr_mean)
        RR_mean1 = np.delete(RR_mean, [0], axis=0)  #去掉第一拍
        RR_mean1 = np.delete(RR_mean1, (len(RR_mean1) - 1), axis=0)  #去掉最后一拍
    test_rr = rr_normal(RR_mean1)
    test_rr1 = np.array(test_rr)
    test_ecg1 = np.array(test_ecg1)
    test_ecg2 = np.array(test_ecg2)
    test_ecg1_1 = ecg_normal(test_ecg1)
    test_ecg2_1 = ecg_normal(test_ecg2)
    test_ecg1_2 = np.array(test_ecg1_1)
    test_ecg2_2 = np.array(test_ecg2_1)
    test_ecg3 = np.expand_dims(test_ecg1_2, axis=2)
    test_ecg4 = np.expand_dims(test_ecg2_2, axis=2)
    """预测"""
    y_pre = model.predict([test_ecg3, test_ecg4, test_rr1])
    y_pre1 = y_pre.tolist()
    y_pre2 = props_to_onehot(y_pre1)
    y_pre3 = [np.argmax(one_hot) for one_hot in y_pre2]
    y_pre4 = np.array(y_pre3)
    #标签转化为3种结果
    nor = 0
    af = 0
    for i in range(len(y_pre4)):
        if y_pre4[i] == 0:
            nor = nor + 1
        elif y_pre4[i] == 1:
            af = af + 1
    end_points = []
    end_ind = 0
    start_points = []
    for i in range(len(y_pre4) - 13):
        if nor / len(y_pre4) >= 0.8 and y_pre4[i] != 1 and y_pre4[
                i + 1] != 1 and y_pre4[i + 2] != 1 and y_pre4[
                    i + 3] != 1 and y_pre4[i + 4] != 1 and y_pre4[
                        i + 5] != 1 and y_pre4[i + 6] != 1 and y_pre4[
                            i + 7] != 1:  #正常
            end_points = []
        elif af / len(y_pre4) >= 0.7:  #持续性房颤
            end_points.append(peak_time[0])
            end_points.append(peak_time[-1])
            break
        elif y_pre4[i] == 1 and y_pre4[i + 1] == 1 and y_pre4[
                i + 2] == 1 and y_pre4[i + 3] == 1 and y_pre4[i + 4] == 1:
            start_ind = i
            if i > end_ind:
                for j in range(len(y_pre4) - start_ind - 13):
                    if y_pre4[start_ind + j + 5] == 1 and y_pre4[
                            start_ind + j + 6] != 1 and y_pre4[
                                start_ind + j + 7] != 1 and y_pre4[
                                    start_ind + j + 8] != 1 and y_pre4[
                                        start_ind + j + 9] != 1 and y_pre4[
                                            start_ind + j +
                                            10] != 1 and y_pre4[
                                                start_ind + j +
                                                11] != 1 and y_pre4[
                                                    start_ind + j + 12] != 1:
                        end_ind = start_ind + j + 5
                        length = j + 5
                        if length > 15:
                            if len(de) == 0:
                                start_points.append(start_ind)
                                end_points.append(peak_time[start_ind])
                                end_points.append(peak_time[end_ind])
                            elif len(de) == 1:
                                if de[0] == 0:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 1])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind])
                                    end_points.append(peak_time[end_ind])
                            elif len(de) == 2:
                                if de[1] == 1:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 2])
                                    end_points.append(peak_time[end_ind + 2])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 1])
                            elif len(de) == 3:
                                if de[1] == 1:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 2])
                                    end_points.append(peak_time[end_ind + 2])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 1])
                            elif len(de) == 4:
                                start_points.append(start_ind)
                                end_points.append(peak_time[start_ind + 2])
                                end_points.append(peak_time[end_ind + 2])
                            break
                    elif j == len(y_pre4) - start_ind - 8:
                        if y_pre4[j - 1] == 1 or y_pre4[j - 2] == 1:
                            end_ind = len(y_pre4) - 1
                            if len(de) == 0:
                                start_points.append(start_ind)
                                end_points.append(peak_time[start_ind])
                                end_points.append(peak_time[end_ind])
                            elif len(de) == 1:
                                if de[0] == 0:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 1])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind])
                                    end_points.append(peak_time[end_ind + 1])
                            elif len(de) == 2:
                                if de[1] == 1:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 2])
                                    end_points.append(peak_time[end_ind + 2])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 2])
                            elif len(de) == 3:
                                if de[1] == 1:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 2])
                                    end_points.append(peak_time[end_ind + 3])
                                else:
                                    start_points.append(start_ind)
                                    end_points.append(peak_time[start_ind + 1])
                                    end_points.append(peak_time[end_ind + 3])
                            elif len(de) == 4:
                                start_points.append(start_ind)
                                end_points.append(peak_time[start_ind + 2])
                                end_points.append(peak_time[end_ind + 4])
                            break
                        else:
                            for k in range(8):
                                if y_pre4[j - 8 +
                                          k] == 1 and y_pre4[j - 7 + k] != 1:
                                    end_ind = j - 8 + k
                                    if len(de) == 0:
                                        start_points.append(start_ind)
                                        end_points.append(peak_time[start_ind])
                                        end_points.append(peak_time[end_ind])
                                    elif len(de) == 1:
                                        if de[0] == 0:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind + 1])
                                            end_points.append(
                                                peak_time[end_ind + 1])
                                        else:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind])
                                            end_points.append(
                                                peak_time[end_ind])
                                    elif len(de) == 2:
                                        if de[1] == 1:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind + 2])
                                            end_points.append(
                                                peak_time[end_ind + 2])
                                        else:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind + 1])
                                            end_points.append(
                                                peak_time[end_ind + 1])
                                    elif len(de) == 3:
                                        if de[1] == 1:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind + 2])
                                            end_points.append(
                                                peak_time[end_ind + 2])
                                        else:
                                            start_points.append(start_ind)
                                            end_points.append(
                                                peak_time[start_ind + 1])
                                            end_points.append(
                                                peak_time[end_ind + 1])
                                    elif len(de) == 4:
                                        start_points.append(start_ind)
                                        end_points.append(peak_time[start_ind +
                                                                    2])
                                        end_points.append(peak_time[end_ind +
                                                                    2])
                                    break

    if end_points != []:
        if start_points != [] and start_points[0] <= 3:
            end_points[0] = peak_time[0]

    end_points1 = np.array(end_points)
    end_points1 = end_points1.astype(np.float)
    q = int(len(end_points1) / 2)
    end_points2 = end_points1.reshape(q, 2)
    end_points3 = end_points2.tolist()

    pred_dict = {'predict_endpoints': end_points3}
    return pred_dict
    ys.append(y)
    names.append(name)
    wavfile.write('audio_files/' + names[-1] + '.wav', fs, y)
    models.append(model)


# Two pole
pole_mag = 0.2
pole_angle = 1.0  # works up to ~1.8

root1 = pole_mag * np.exp(+1j * pole_angle)
root2 = pole_mag * np.exp(-1j * pole_angle)
poly = np.poly((root1, root2))

yTP = np.zeros(np.shape(x))
yTP[:, 0] = signal.lfilter([1], [1, poly[1], poly[2]], x[:, 0])
yTP[:, 1] = signal.lfilter([1], [1, poly[1], poly[2]], x[:, 1])

modelTP = Model()
fb = FB2()
fb.pole_mag = pole_mag
fb.pole_angle = pole_angle
modelTP.elements.append(fb)
add_to_tests(modelTP, 'Two-pole-test', yTP, ys, names, models)

# Lowpass filter
b, a = adsp.design_LPF2(1000, 0.7071, fs)
yLPF = np.zeros(np.shape(x))
yLPF[:, 0] = signal.lfilter(b, a, x[:, 0])
yLPF[:, 1] = signal.lfilter(b, a, x[:, 1])
Esempio n. 45
0
def de_emphasis(x):
    return lfilter([1], [1, -hp.preemphasis], x)
Esempio n. 46
0
def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15):
    """
    Mimic a hand-drawn line from (x, y) data

    Parameters
    ----------
    x, y : array_like
        arrays to be modified
    xlim, ylim : data range
        the assumed plot range for the modification.  If not specified,
        they will be guessed from the  data
    mag : float
        magnitude of distortions
    f1, f2, f3 : int, float, int
        filtering parameters.  f1 gives the size of the window, f2 gives
        the high-frequency cutoff, f3 gives the size of the filter

    Returns
    -------
    x, y : ndarrays
        The modified lines
    """
    x = np.asarray(x)
    y = np.asarray(y)

    # get limits for rescaling
    if xlim is None:
        xlim = (x.min(), x.max())
    if ylim is None:
        ylim = (y.min(), y.max())

    if xlim[1] == xlim[0]:
        xlim = ylim

    if ylim[1] == ylim[0]:
        ylim = xlim

    # scale the data
    x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
    y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])

    # compute the total distance along the path
    dx = x_scaled[1:] - x_scaled[:-1]
    dy = y_scaled[1:] - y_scaled[:-1]
    dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))

    # number of interpolated points is proportional to the distance
    Nu = int(200 * dist_tot)
    u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)

    # interpolate curve at sampled points
    k = min(3, len(x) - 1)
    res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
    x_int, y_int = interpolate.splev(u, res[0])

    # we'll perturb perpendicular to the drawn line
    dx = x_int[2:] - x_int[:-2]
    dy = y_int[2:] - y_int[:-2]
    dist = np.sqrt(dx * dx + dy * dy)

    # create a filtered perturbation
    coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
    b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
    response = signal.lfilter(b, 1, coeffs)

    x_int[1:-1] += response * dy / dist
    y_int[1:-1] += response * dx / dist

    # un-scale data
    x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
    y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]

    return x_int, y_int
Esempio n. 47
0
 def lowpass_filter(dataset, lowcut, signal_freq, filter_order):
     nyq = 0.5 * signal_freq
     low = lowcut / nyq
     b, a = butter(filter_order, low, btype="lowpass")
     y = lfilter(b, a, dataset)
     return y
Esempio n. 48
0
if (firstlist[0].get('time', None) == None):
    lasttime = time.time()
    exit()
else:
    lasttime = datetime.datetime.timestamp(firstlist[0].get(
        'time', datetime.datetime.now()))

#print(lasttime)
init_value = []
for i in firstlist:
    init_value.append(i.get('value', 0))
time_th = abs(
    datetime.datetime.timestamp(firstlist[-1].get('time', None)) -
    datetime.datetime.timestamp(firstlist[0].get('time', None)))
print((time_th / len(firstlist)))
init_filt = signal.lfilter([1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7, 1 / 7],
                           1, (init_value - np.mean(init_value))).tolist()
detect = real_time_peak_detection(init_filt, 20, 3, 0.8)
time_rate_go_up = 0
while 1:
    try:
        x_time = []
        y_value = []
        datalist = list(
            db.real_time.find({
                "_id": {
                    "$gt": last_oid
                }
            }).max_time_ms(500).limit(200))
        if (datalist == []):
            continue
        for i in datalist:
Esempio n. 49
0
def inv_preemphasis(x):
    return signal.lfilter([1], [1, -hparams.preemphasis], x)
Esempio n. 50
0
from matplotlib import pyplot as plt
from pyfftw.interfaces.numpy_fft import fft
import time

if __name__ == '__main__':

    # Fix the seed
    np.random.seed(12354)
    # Choose size of data
    n_data = 2**14
    # Generate Gaussian white noise
    noise = np.random.normal(loc=0.0, scale=1.0, size=n_data)
    # Apply filtering to turn it into colored noise
    r = 0.01
    b, a = signal.butter(3, 0.1 / 0.5, btype='high', analog=False)
    n = signal.lfilter(b, a, noise, axis=-1, zi=None) + noise * r
    # Generate periodic deterministic signal
    t = np.arange(0, n_data)
    f0 = 1e-2
    a0 = 5e-3
    s = a0 * np.sin(2 * np.pi * f0 * t)
    # Create a mask vector indicating missing data points
    mask = np.ones(n_data)
    n_gaps = 30
    gapstarts = (n_data * np.random.random(n_gaps)).astype(int)
    gaplength = 10
    gapends = (gapstarts + gaplength).astype(int)
    for k in range(n_gaps):
        mask[gapstarts[k]:gapends[k]] = 0
    # Create the masked data vector
    y = mask * (s + n)
Esempio n. 51
0
def AweightPower_extract(y, sr):
    b, a = a_weighting_coeffs_design(sr)
    k = lfilter(b, a, y)
    a_weighted_power = librosa.feature.rms(y=k)
    return a_weighted_power
Esempio n. 52
0
nsample = 2000
x = np.arange(nsample)
X1 = sm.add_constant(x, prepend=False)

wnoise = noiseratio * np.random.randn(nsample + nlags)
# .. noise = noise[1:] + rhotrue*noise[:-1] # wrong this is not AR

# .. find my drafts for univariate ARMA functions
# generate AR(p)
if np.size(rhotrue) == 1:
    # replace with scipy.signal.lfilter, keep for testing
    arnoise = np.zeros(nsample + 1)
    for i in range(1, nsample + 1):
        arnoise[i] = rhotrue * arnoise[i - 1] + wnoise[i]
    noise = arnoise[1:]
    an = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise[1:])
    print('simulate AR(1) difference', np.max(np.abs(noise - an)))
else:
    noise = signal.lfilter([1], np.hstack((1, -rhotrue)), wnoise)[nlags:]

# generate GLS model with AR noise
y1 = np.dot(X1, beta) + noise

if 1 in examples:
    print('\nExample 1: iterative_fit and repeated calls')
    mod1 = GLSAR(y1, X1, 1)
    res = mod1.iterative_fit()
    print(res.params)
    print(mod1.rho)
    mod1 = GLSAR(y1, X1, 2)
    for i in range(5):
    def calcNewBurstBranchMetrics(self, y, paths, pathmetrics, n):
        # Path length
        pathlen = paths.shape[1]
        
        # Allocate branchmetrics
        branchmetrics = np.zeros(self.newBurstPretransitions.shape) + np.inf
        shortbranchmetrics = np.zeros_like(branchmetrics) + np.inf
        
        # Preallocate vectors
        guess = np.zeros(pathlen, dtype=paths.dtype)
        upguess = np.zeros(pathlen * self.up, dtype=paths.dtype)
        
        
        print("First symbol of next burst, n = %d" % (n))
        # Iterate over only the allowed start idxes
        for p in self.allowedStartIdx:

            # Now loop over the pre-transitions (default is all are possible)
            for t in np.arange(len(self.newBurstPretransitions[p])):
                print("Calculating for alphabet idx %d, from previous burst alphabet idx %d" % (p, self.newBurstPretransitions[p,t]))
                
                # As usual, check if the pre-transition has a valid path metric from the previous burst
                if pathmetrics[self.newBurstPretransitions[p,t]] == np.inf:
                    print("Skipped due to invalid pre-transition path metric")
                    branchmetrics[p,t] = np.inf
                    shortbranchmetrics[p,t] = np.inf
                    continue
                
                # As usual, form a guess now by copying the existing path
                guess[:] = paths[self.newBurstPretransitions[p,t]] # like this
                guess[n] = self.alphabet[p]
                
                print(guess[:n+1])

                # Upsample the guess
                upguess[:] = 0 # zero out first
                upguess[::self.up] = guess

                # Loop over all sources; but now in order to properly add a branch over the indices we skipped,
                # we must consider a longer section (see below, N = pulselen)
                #
                #  BURST 0              GUARD            BURST 1
                # | ...    | 0 ......              ...0 | n | 0..... 
                # |N-1 elem| numGuardSyms * up elem     | 1 | N-1 elem
                #
                
                # Calculate the upsampled guard len
                guardlen = self.numGuardSyms * self.up
                
                # We now start here, to include the guard period 0s
                s = np.max([(n-self.numGuardSyms)*self.up - self.pulselen + 1,0])
                x_all = np.zeros((self.L, guardlen + self.pulselen), dtype=np.complex128)
                
                # Convenience indexing for extraction with reference to original signal length
                extractionIdx = np.arange((n-self.numGuardSyms)*self.up, n*self.up + self.pulselen)
                shortextractionIdx = np.arange((n-self.numGuardSyms)*self.up, (n+1)*self.up)
                
                # Loop over sources
                for i in np.arange(self.L): 
                    
                    # As usual, extract from upguess and pad it
                    upguesspad = np.pad(upguess[s:n*self.up+1], (0,self.pulselen-1)) # pad zeros to pulselen-1
                    xc = sps.lfilter(self.pulses[i], 1, upguesspad)[-(self.pulselen + guardlen):]
                    
                    # And now we extract
                    xcs = self.omegavectors[i,extractionIdx] * xc
                    
                    x_all[i,:] = xcs
                    


                summed = np.sum(x_all, axis=0)
                
                # print("Writing to branchmetrics[%d,%d]" % (p,t))
                branchmetrics[p,t] = np.linalg.norm(y[extractionIdx] - summed)**2
                shortbranchmetrics[p,t] = np.linalg.norm(y[shortextractionIdx] - summed[:guardlen + self.up])**2
                
        # Complete
        print(branchmetrics)
        print(shortbranchmetrics)
        return branchmetrics, shortbranchmetrics
Esempio n. 54
0
                                        preprocess.spike_detector_label +
                                        X + '.gdf'))
    sptrains = preprocess.compute_time_binned_sptrains(X, spikes,
                                                       preprocess.time_bins_rs,
                                                       dtype=np.uint8)
    binned_sptrains = preprocess.compute_pos_binned_sptrains(positions_corrected[X],
                                                           sptrains,
                                                           dtype=np.uint16).toarray()

    # To not introduce a temporal shift between the downsampled LFP and
    # temporally binned spike trains with bin width \Delta t = 1 ms on intervals
    # [k*\Delta t, (k+1)*\Delta t), filter each kernel with a [0, \Delta t)
    # assymetric boxcar filter
    b = np.ones(int(1./network.dt))*network.dt
    a = 1.
    binned_sptrains = ss.lfilter(b, a, binned_sptrains)
    

    # for j, Y in enumerate(preprocess.X[:-1]):
    # Set up container for LFP signal of each postsynaptic population
    # due to presynaptic activity
    if X not in LFP_h.keys():
        LFP_h[X] = np.zeros(binned_sptrains.shape)
        
    # np.convolve can only deal with 1D sequences, so we have to recursively
    # iterate over all local and non-local rate bins.
    for k in range(x.size):
        #iterate over distances.
        for l, d in enumerate(r):
            # compute rate-bin distance to other bins, taking into account
            # periodic boundary conditions
def butter_apply_filter(data, cutoff, fs, order=5, btype='low'):
    b, a = butter_build_filter(cutoff, fs, order=order, btype=btype)
    # y = filtfilt(b, a, data, method="gust", axis=0)
    y = lfilter(b, a, data, axis=0)
    return y
Esempio n. 56
0
            F = np.ones(3) * np.nan  # format frequency
            # BW = np.zeros(3)  # band width
            F[0:np.min([3, len(y)])] = y[0:np.min([3, len(y)
                                                   ])]  # only output 4 format
            # BW[0: np.min([3, len(y)])] = bw[0:np.min([3, len(y)])]
            fmt[:, m] = F / fs * 2  # normalized frequency

    return fmt


if __name__ == '__main__':
    filename = 'vowels8.wav'
    speech = Speech()
    xx, fs = speech.audioread(filename, None)  # read one frame data
    x = xx - np.mean(xx)  # DC
    y = lfilter(b=np.array([1, -0.99]), a=1, x=x)  # pre-emphasis
    wlen = 200  # frame length
    inc = 80  # frame shift
    xy = speech.enframe(y, wlen, inc).T  # enframe
    fn = xy.shape[1]  # frame number
    Nx = len(y)  # data length
    time = np.arange(0, Nx) / fs  # time scale
    frameTime = speech.FrameTime(fn, wlen, inc, fs)  # frame to time
    T1 = 0.1
    miniL = 20  # voice segment minimal frame number
    voiceseg, vosl, SF, Ef = VAD().pitch_vad1(xy, fn, T1, miniL)  # VAD
    Msf = np.tile(SF.reshape((len(SF), 1)), (1, 3))  # SF ---> fn x 3
    Fsamps = 256  # frequency range length
    Tsamps = fn  # time range length
    ct = 0
    numiter = 10  # loop times
 def sample_trajectory(self,
                       env_id_chosen,
                       schedule_local_time_step,
                       num_repeat,
                       get_state_kwargs={}):
     """
     Sample Trajectories
     Args:
     env_id_chosen : list
     env_id_chosen = list of id for environments where we will sample
     schedule_local_time_step : int
     schedule_local_time_step = scheduled number of local time step, actual number can be smaller
     num_repeat : int
     num_repeat = number of repeated actions
     get_state_kwargs : dictionary
     get_state_kwargs = arguments for get_state
     Returns:
     total_step : int
     total_step = total of steps sampled in this batch
     finished_episode : int
     finished_episode = number of finished episodes in this batch
     trajectory_set : dictionary
     trajectory_set = a set of trajectories
         'state' : numpy.ndarray
         'state' = input states, shape [seq len * batch_size] + state_shape
         'lstm_state_input' : dictionary
         'lstm_state_input' = input batch of lstm state, when value_output, policy_output, policy_logits are not None, this item is ignored, indexed by name in self.lstm_layer()
                         each element is (h0, c0)
                         h0, c0 are numpy.ndarray
                         shape of h0, c0 is (1, batch size, hidden size)
         'action_index' : numpy.ndarray
         'action_index' = selected action indicies, shape [seq len * batch_size]
         'target_value' : numpy.ndarray
         'target_value' = target values, shape [seq len * batch_size]
         'advantage' : numpy.ndarray
         'advantage' = advantage estimation, shape [seq len * batch_size]
     """
     self.model.eval()
     if (get_state_kwargs is None):
         get_state_kwargs = {}
     n_chosen = len(env_id_chosen)
     env_set_chosen = [self.env_set[env_id] for env_id in env_id_chosen]
     if (self.model.contain_lstm()):
         trajectory_set = {
             'lstm_state_input': self.get_lstm_state(env_id_chosen, True)
         }
     else:
         trajectory_set = {}
     state_batch = []  # [sequence size][batch size] + state_shape
     action_batch = []  # [sequence size][batch size]
     reward_list = []  # [sequence size][batch size]
     value_batch = []  # [sequence size][batch size]
     total_step = 0
     finished_episode = 0
     ep_done = False
     for i_step in range(schedule_local_time_step):
         state_input_step = np.stack(
             [env.get_state(**get_state_kwargs) for env in env_set_chosen],
             axis=0)
         if (self.model.contain_lstm()):
             lstm_state_input_step = self.get_lstm_state(env_id_chosen)
         else:
             lstm_state_input_step = None
         # sample action
         action_info = self.model.sample_action(state_input_step,
                                                lstm_state_input_step)
         state_batch.append(state_input_step)
         action_batch.append(action_info['action_index'])
         value_batch.append(action_info['state_value'])
         # update lstm state if necessary
         if (self.model.contain_lstm()):
             self.update_lstm_state(action_info['lstm_state_output'],
                                    env_id_chosen)
         # apply action
         reward_step = []
         for i_chosen, env_chosen in enumerate(env_set_chosen):
             reward = env_chosen.apply_action(
                 env_chosen.action_set()[action_info['action_index']
                                         [i_chosen]], num_repeat)
             reward_step.append(reward)
             if (env_chosen.episode_end()):
                 ep_done = True
         reward_list.append(np.array(reward_step))
         total_step += n_chosen
         if (ep_done):
             break
     # compute target and advantage
     reward_list = np.stack(reward_list,
                            axis=0)  # [sequence size, batch size]
     value_batch = np.stack(value_batch,
                            axis=0)  # [sequence size, batch size]
     target_batch = []  # [batch size][sequence size]
     adv_batch = []  # [batch size][sequence size]
     for i_chosen, env_chosen in enumerate(env_set_chosen):
         id_chosen = env_id_chosen[i_chosen]
         if (env_chosen.episode_end()):
             bootstrap = 0
             finished_episode += 1
             env_chosen.new_episode()
             self.reset_lstm_state(id_chosen)
         else:
             if (self.model.contain_lstm()):
                 bootstrap = self.model.sample_action(
                     env_chosen.get_state(**get_state_kwargs),
                     self.get_lstm_state(id_chosen))['state_value']
             else:
                 bootstrap = self.model.sample_action(
                     env_chosen.get_state(**get_state_kwargs),
                     None)['state_value']
         reward_plus_bootstrap = np.concatenate(
             [reward_list[:, i_chosen], [bootstrap]], axis=0)
         target_value = lfilter([1], [1, -self.gamma],
                                reward_plus_bootstrap[::-1])[::-1]
         target_value = target_value[:-1]
         target_batch.append(target_value)
         value_plus_bootstrap = np.concatenate(
             [value_batch[:, i_chosen], [bootstrap]], axis=0)
         td_error = reward_plus_bootstrap[:-1] +\
                         self.gamma * value_plus_bootstrap[1:] -\
                         value_plus_bootstrap[:-1]
         gae = lfilter([1], [1, -self.gamma * self.lambda_gae],
                       td_error[::-1])[::-1]
         adv_batch.append(gae)
     target_batch = np.stack(target_batch,
                             axis=1)  # [sequence size, batch size]
     adv_batch = np.stack(adv_batch, axis=1)  # [sequence size, batch size]
     # return
     state_batch = np.stack(
         state_batch, axis=0)  # [sequence size, batch size] + state_shape
     trajectory_set['state'] = state_batch.reshape(
         [-1] + list(self.model.state_shape))
     trajectory_set['action_index'] = np.concatenate(action_batch, axis=0)
     trajectory_set['target_value'] = target_batch.reshape(-1)
     trajectory_set['advantage'] = adv_batch.reshape(-1)
     return total_step, finished_episode, trajectory_set
    def calcAllBranchMetrics(self, y, paths, pathmetrics, n):
        '''
        Calculate branches leading to next symbol at index n.
        '''
        
        if (y.ndim > 1):
            raise ValueError("Please flatten y before input.")
        
        # Path length
        pathlen = paths.shape[1]
        
        # Allocate branchmetrics
        branchmetrics = np.zeros(self.pretransitions.shape)
        shortbranchmetrics = np.zeros_like(branchmetrics)
        
        # Preallocate vectors
        guess = np.zeros(pathlen, dtype=paths.dtype)
        upguess = np.zeros(pathlen * self.up, dtype=paths.dtype)
        
        # Select the current symbol
        for p in np.arange(paths.shape[0]):
            # Select a valid pre-transition path
            for t in np.arange(len(self.pretransitions[p])):
                # if self.pretransitions[p,t] != 0: # DEBUG
                #     continue
                
                
                if pathmetrics[self.pretransitions[p,t]] == np.inf:
                    # print("Pretransition is inf, skipping!")
                    branchmetrics[p,t] = np.inf
                    shortbranchmetrics[p,t] = np.inf
                    continue
                
                # print("Alphabet %d->%d at index %d" % (self.pretransitions[p,t],p,n))
                
                # guess = np.copy(paths[self.pretransitions[p,t]]) # move this out of the loop without a copy, set values in here
                guess[:] = paths[self.pretransitions[p,t]] # like this
                guess[n] = self.alphabet[p]
                # print("Guess:")
                # print(guess)
                # KEEP IT SIMPLE FOR NOW, UPSAMPLE THE WHOLE PATH
                # upguess = np.zeros(pathlen * self.up, dtype=paths.dtype) # move this out of the loop and set values
                upguess[:] = 0 # zero out first
                upguess[::self.up] = guess
                # print(upguess[:n*self.up+1:self.up])
                # assert(np.all(upguess[::self.up] == guess))
                
                # Loop over all sources
                s = np.max([n*self.up - self.pulselen + 1,0])
                x_all = np.zeros((self.L, self.pulselen), dtype=np.complex128)
                for i in np.arange(self.L): 
                    
                    # # this is equivalent, as tested below
                    upguesspad = np.pad(upguess[s:n*self.up+1], (0,self.pulselen-1)) # pad zeros to pulselen-1
                    xc = sps.lfilter(self.pulses[i], 1, upguesspad)[-self.pulselen:]
                    
                    # # original
                    # xc2 = np.convolve(self.pulses[i], upguess[s:n*self.up+1])[-self.pulselen:]
                    # if (not np.all(xc==xc2)):
                    #     print("What$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
                    
                    # xc = np.convolve(self.pulses[i], upguess[n*self.up-self.pulselen:n*self.up+1])[-self.pulselen:]
                    # xcs = np.exp(1j*(-self.omegas[i]*np.arange(n*self.up,n*self.up+self.pulselen))) * xc
                    xcs = self.omegavectors[i,n*self.up:n*self.up+self.pulselen] * xc
                    
                    x_all[i,:] = xcs
                    


                summed = np.sum(x_all, axis=0)
                
                # print("Writing to branchmetrics[%d,%d]" % (p,t))
                branchmetrics[p,t] = np.linalg.norm(y[self.up*n:self.up*n+self.pulselen] - summed)**2
                shortbranchmetrics[p,t] = np.linalg.norm(y[self.up*n:self.up*(n+1)] - summed[:self.up])**2
                
        # Complete
        return branchmetrics, shortbranchmetrics
 def butter_bandpass_filter(self, data, lowcut, highcut, fs, order=5):
     b, a = self.butter_bandpass(lowcut, highcut, fs, order=order)
     y = signal.lfilter(b, a, data)
     return y
Esempio n. 60
0
def build_test_data(sample_set=0,
                    precision=None,
                    f_start=100,
                    f_stop=4e3,
                    filt_Q=2,
                    n_filt=20):
    """
    Loads a test data set and returns results
    :param sample_set: index of sample set
        0 : UrbanSound
        1 : KitchenSound
        2 : 15dB NOIZEUS
        3 : 10dB NOIZEUS
        4 :  5dB NOIZEUS
        5 :  0dB NOIZEUS
        6 : example
    :param precision: number of bits to show the input file as
    :param f_start: BPF start frequency
    :param f_stop: BPF stop frequency
    :param filt_Q: BPF Q (fc/bw)
    :param n_filt: number of BPF filters
    :returns: numpy arrays
    """

    f_center = np.logspace(np.log10(f_start), np.log10(f_stop), n_filt)

    audio_data = np.zeros((0, n_filt))
    audio_class = np.zeros((0, 1))

    if sample_set == 0:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\negative\urban_test'
        data_class = 0

    elif sample_set == 1:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\negative\kitchen_test'
        data_class = 0

    elif sample_set == 2:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\15dB'
        data_class = 1

    elif sample_set == 3:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\10dB'
        data_class = 1

    elif sample_set == 4:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\5dB'
        data_class = 1

    elif sample_set == 5:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\positive\0dB'
        data_class = 1

    elif sample_set == 6:
        data_dir = r'C:\Users\brady\GitHub\MinVAD\data\test\example'
        data_class = 0

    else:
        return

    for fname in os.listdir(data_dir):
        # load a random positive sample file
        file = os.path.join(data_dir, fname)
        file, fs, data = fe.parse_file(file)

        if precision is not None:
            data = quantize(data, precision)

        # preload positive trained data classes
        file = ntpath.basename(file)
        if data_class and file.startswith('sp'):
            fpath = os.path.join(TRAIN_LABELS, file[0:4])
            with open(fpath + '.csv', 'r') as f:
                classes_str = f.readline()
                classes = [int(c) for c in classes_str.split(',')]

        # this is inefficient but without knowing sampling rate
        # i'm not sure how to improve it
        # maybe make a list at the top with filt_bank_8k,16k,44,1k etc?
        # throw error if invalid?
        filt_bank = [
            fe.createFilter(f_center[n], filt_Q, fs) for n in range(n_filt)
        ]

        # process a single frame
        frame_len = fe.getFrameSize(fs)
        for frame_cnt in range(int(len(data) / frame_len)):
            if frame_cnt * frame_len > len(data):
                end_pt = frame_cnt = len(data)
            else:
                end_pt = (frame_cnt + 1) * frame_len
            frame = data[frame_len * frame_cnt:end_pt]

            frame_avg = np.zeros((1, n_filt))
            for i, bpf in enumerate(filt_bank):
                filt_frame = signal.lfilter(bpf[0], bpf[1], frame)
                frame_avg[0][i] = fe.calcFrameAvg(filt_frame)

            # add data features
            audio_data = np.append(audio_data, frame_avg, axis=0)

            # append data class
            if data_class:
                if file.startswith('sp'):
                    audio_class = np.append(audio_class, classes[frame_cnt])
                else:
                    # this is ONLY VALID IF THE POSITIVE DATA IS NOISELESS
                    audio_class = np.append(audio_class,
                                            fe.aboveFrameThreshold(frame))
            else:
                audio_class = np.append(audio_class, 0)

    return audio_data, audio_class