Exemplo n.º 1
0
    def test1(self):

        r = [
            None,
            np.array([1., -0.94200795]),
            np.array([1., -1.82697124, 0.93944355]),
            np.array([1., -2.7532477, 2.74080825, -0.98598416]),
            np.array([1., -3.73744923, 5.47666121, -3.73425407, 0.99819203]),
            np.array([
                1., -4.73317429, 9.2016863, -9.19738005, 4.72640438,
                -0.99752856
            ]),
            np.array([
                1., -5.51461918, 12.90426156, -16.40243257, 11.93483034,
                -4.70540723, 0.78338097
            ]),
            np.array([
                1., -5.39214752, 12.16863091, -14.53657345, 9.37051837,
                -2.68798986, -0.07875969, 0.1563373
            ]),
            np.array([
                1., -5.4893526, 12.21760094, -12.86527508, 3.54425679,
                6.35034463, -7.64479007, 3.5089866, -0.62176513
            ])
        ]

        data = map(float, open(test_data).read().split())
        for order in xrange(1, 9):
            assert_array_almost_equal(arburg(data, order), r[order])
Exemplo n.º 2
0
 def test0(self):
     d = np.array([1.00000e-00,
                   -3.2581e-01,
                    3.4571e-04,
                    3.3790e-02,
                    9.8853e-02])
     r = arburg([1,2,3,4,5,6,7,-8], 4)
     np.testing.assert_array_almost_equal(r, d, decimal=5)
Exemplo n.º 3
0
    def test1(self):

        r = [None,
             np.array([ 1. , -0.94200795]) ,
             np.array([ 1. , -1.82697124,  0.93944355]) ,
             np.array([ 1. , -2.7532477 ,  2.74080825, -0.98598416]) ,
             np.array([ 1. , -3.73744923,  5.47666121, -3.73425407,  0.99819203]) ,
             np.array([ 1. , -4.73317429,  9.2016863 , -9.19738005,  4.72640438, -0.99752856]) ,
             np.array([ 1. , -5.51461918,  12.90426156, -16.40243257, 11.93483034,  -4.70540723,   0.78338097]) ,
             np.array([ 1. , -5.39214752,  12.16863091, -14.53657345,  9.37051837,  -2.68798986,  -0.07875969,   0.1563373 ]) ,
             np.array([ 1. , -5.4893526 ,  12.21760094, -12.86527508,  3.54425679,   6.35034463,  -7.64479007,   3.5089866 ,  -0.62176513])]

        data = map(float, open(test_data).read().split())
        for order in xrange(1,9):
            assert_array_almost_equal(arburg(data, order),r[order])
Exemplo n.º 4
0
    def fit_baseline(self, x, fs=60., _tsplot=False):
        """
        fit_baseline(self, x[, fs=60.][, _tsplot=False])

        Finds AR coefficients to build prediction errors and estimates baseline
        prediction errors distribution. Returns the baseline entropy.

        Parameters
        ----------
        x : array_like
            Input data. Should specify steering wheel angle over at least 120 s.
            x[:N/2] is used to find the AR coefficients.
            x[N/2:] is used to build the baseline prediciton error distribution.
            
        fs : float
            sampling rate of x in Hz. Default is 60 Hz. Be sure to specify fs if
            x was not sampled at 60 Hz.

        Returns
        -------
        hbas : float
            baseline entropy
        """

        # unpack relevant parameters
        N = len(x)
        M = self.M
        alpha = self.alpha
        resample_fs = self.resample_fs

        # apply LP and downsample signal
        _x = self._resample(x, fs)
        Nx = len(_x)

        if len(_x) < resample_fs * 60:
            raise Exception('Need at least 60 seconds of steering '
                            'data to calculate baseline\n(ideally '
                            'need ~ 120 seconds of data).')

        # use first half of x to find coefficients of AR model
        # using Burg's method
        #
        # sign on coeffs returned by arburg is reversed
        b_pe = arburg(_x[:Nx / 2], 3)

        # use second half of x to calculate baseline entropy
        # use AR coefficents to build prediction errors
        PE = lfilter(b_pe, 1, _x[Nx / 2:])

        # need to build bins to approximate the PE distribution
        cdf = percentile(PE)  # cdf is a CDF object
        pe_alpha = 0.5 * (abs(cdf.find(alpha)) + abs(cdf.find(1. - alpha)))
        bin_edges = [-10e12] + list(
            np.linspace(-M, M, 2 * M + 1) * pe_alpha) + [10e12]

        # now we can calculate the baseline entropy
        # np.histogram returns a tuple of histogram counts and bin_edges
        Pk = np.histogram(PE, bins=bin_edges)[0] / float(len(PE))

        # replace small values to avoid excessively high entropy
        Pk[Pk < 1e-3] = 1e-3

        # and the baseline entropy is
        hbas = np.sum(np.multiply(-Pk, np.log2(Pk)))

        # store the relevant information
        self.bin_edges = bin_edges
        self.b_pe = b_pe
        self.cdf = cdf
        self.pkbas = Pk
        self.hbas = hbas

        if _tsplot:
            return hbas, self._baseline_tsplot(x[N / 2:], _x[Nx / 2:], PE, fs)

        return hbas
Exemplo n.º 5
0
    def fit_baseline(self, x, fs=60., _tsplot=False):
        """
        fit_baseline(self, x[, fs=60.][, _tsplot=False])

        Finds AR coefficients to build prediction errors and estimates baseline
        prediction errors distribution. Returns the baseline entropy.

        Parameters
        ----------
        x : array_like
            Input data. Should specify steering wheel angle over at least 120 s.
            x[:N/2] is used to find the AR coefficients.
            x[N/2:] is used to build the baseline prediciton error distribution.
            
        fs : float
            sampling rate of x in Hz. Default is 60 Hz. Be sure to specify fs if
            x was not sampled at 60 Hz.

        Returns
        -------
        hbas : float
            baseline entropy
        """
        
        # unpack relevant parameters
        N = len(x)
        M = self.M
        alpha = self.alpha
        resample_fs = self.resample_fs
        
        # apply LP and downsample signal
        _x = self._resample(x, fs)
        Nx = len(_x)
        
        if len(_x) < resample_fs * 60:
            raise Exception('Need at least 60 seconds of steering '
                            'data to calculate baseline\n(ideally '
                            'need ~ 120 seconds of data).')
        
        # use first half of x to find coefficients of AR model
        # using Burg's method
        #
        # sign on coeffs returned by arburg is reversed
        b_pe = arburg(_x[:Nx/2], 3)
        
        # use second half of x to calculate baseline entropy
        # use AR coefficents to build prediction errors
        PE = lfilter(b_pe, 1, _x[Nx/2:])

        # need to build bins to approximate the PE distribution
        cdf = percentile(PE) # cdf is a CDF object
        pe_alpha = 0.5*(abs(cdf.find(alpha)) + abs(cdf.find(1.-alpha)))
        bin_edges = [-10e12]+list(np.linspace(-M, M, 2*M+1)*pe_alpha)+[10e12]

        # now we can calculate the baseline entropy
        # np.histogram returns a tuple of histogram counts and bin_edges
        Pk = np.histogram(PE, bins=bin_edges)[0]/float(len(PE))
        
        # replace small values to avoid excessively high entropy
        Pk[Pk < 1e-3] = 1e-3
        
        # and the baseline entropy is
        hbas = np.sum(np.multiply(-Pk, np.log2(Pk)))

        # store the relevant information
        self.bin_edges = bin_edges
        self.b_pe = b_pe
        self.cdf = cdf
        self.pkbas = Pk
        self.hbas = hbas

        if _tsplot:
            return hbas, self._baseline_tsplot(x[N/2:], _x[Nx/2:], PE, fs)

        return hbas
Exemplo n.º 6
0
 def test0(self):
     d = np.array(
         [1.00000e-00, -3.2581e-01, 3.4571e-04, 3.3790e-02, 9.8853e-02])
     r = arburg([1, 2, 3, 4, 5, 6, 7, -8], 4)
     np.testing.assert_array_almost_equal(r, d, decimal=5)