示例#1
0
def test_model_fit():
    """
    Testing of model fitting procedure of the nitime.analysis.granger module.
    """
    # Start by generating some MAR processes (according to Ding and Bressler),
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                   [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_lags = order + 1

    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in range(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    # First we test that the model fitting procedure recovers the coefficients,
    # on average:
    Rxx = np.empty((N, n_process, n_process, n_lags))
    coef = np.empty((N, n_process, n_process, order))
    ecov = np.empty((N, n_process, n_process))

    for i in range(N):
        this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0],
                                                                  z[i][1],
                                                                  order=2)
        Rxx[i] = this_Rxx
        coef[i] = this_coef
        ecov[i] = this_ecov

    npt.assert_almost_equal(cov, np.mean(ecov, axis=0), decimal=1)
    npt.assert_almost_equal(am, np.mean(coef, axis=0), decimal=1)

    # Next we test that the automatic model order estimation procedure works:
    est_order = []
    for i in range(N):
        this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0],
                                                                  z[i][1])
        est_order.append(this_order)

    npt.assert_almost_equal(order, np.mean(est_order), decimal=1)
示例#2
0
def test_GrangerAnalyzer():
    """
    Testing the GrangerAnalyzer class, which simplifies calculations of related
    quantities
    """

    # Start by generating some MAR processes (according to Ding and Bressler),
    a1 = np.array([[0.9, 0], [0.16, 0.8]])

    a2 = np.array([[-0.5, 0], [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov], [xy_cov, y_var]])

    L = 1024
    z, nz = utils.generate_mar(am, cov, L)

    # Move on to testing the Analyzer object itself:
    ts1 = ts.TimeSeries(data=z, sampling_rate=np.pi)
    g1 = gc.GrangerAnalyzer(ts1)

    # Check that things have the right shapes:
    npt.assert_equal(g1.frequencies.shape[-1], g1._n_freqs // 2 + 1)
    npt.assert_equal(g1.causality_xy[0, 1].shape, g1.causality_yx[0, 1].shape)

    # Test inputting ij:
    g2 = gc.GrangerAnalyzer(ts1, ij=[(0, 1), (1, 0)])

    # x => y for one is like y => x for the other:
    npt.assert_almost_equal(g1.causality_yx[1, 0], g2.causality_xy[0, 1])
示例#3
0
def test_model_fit():
    """
    Testing of model fitting procedure of the nitime.analysis.granger module.
    """
    # Start by generating some MAR processes (according to Ding and Bressler),
    a1 = np.array([[0.9, 0], [0.16, 0.8]])

    a2 = np.array([[-0.5, 0], [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov], [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_lags = order + 1

    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in range(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    # First we test that the model fitting procedure recovers the coefficients,
    # on average:
    Rxx = np.empty((N, n_process, n_process, n_lags))
    coef = np.empty((N, n_process, n_process, order))
    ecov = np.empty((N, n_process, n_process))

    for i in range(N):
        this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0],
                                                                  z[i][1],
                                                                  order=2)
        Rxx[i] = this_Rxx
        coef[i] = this_coef
        ecov[i] = this_ecov

    npt.assert_almost_equal(cov, np.mean(ecov, axis=0), decimal=1)
    npt.assert_almost_equal(am, np.mean(coef, axis=0), decimal=1)

    # Next we test that the automatic model order estimation procedure works:
    est_order = []
    for i in range(N):
        this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(
            z[i][0], z[i][1])
        est_order.append(this_order)

    npt.assert_almost_equal(order, np.mean(est_order), decimal=1)
示例#4
0
def test_GrangerAnalyzer():
    """
    Testing the GrangerAnalyzer class, which simplifies calculations of related
    quantities
    """

    # Start by generating some MAR processes (according to Ding and Bressler),
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                   [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    L = 1024
    z, nz = utils.generate_mar(am, cov, L)

    # Move on to testing the Analyzer object itself:
    ts1 = ts.TimeSeries(data=z, sampling_rate=np.pi)
    g1 = gc.GrangerAnalyzer(ts1)

    # Check that things have the right shapes:
    npt.assert_equal(g1.frequencies.shape[-1], g1._n_freqs // 2 + 1)
    npt.assert_equal(g1.causality_xy[0, 1].shape, g1.causality_yx[0, 1].shape)

    # Test inputting ij:
    g2 = gc.GrangerAnalyzer(ts1, ij=[(0, 1), (1, 0)])

    # g1 agrees with g2
    npt.assert_almost_equal(g1.causality_xy[0, 1], g2.causality_xy[0, 1])
    npt.assert_almost_equal(g1.causality_yx[0, 1], g2.causality_yx[0, 1])

    # x => y for one is like y => x for the other:
    npt.assert_almost_equal(g2.causality_yx[1, 0], g2.causality_xy[0, 1])
    npt.assert_almost_equal(g2.causality_xy[1, 0], g2.causality_yx[0, 1])
示例#5
0
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                  [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    N = 10
    L = 100

    z = np.empty((N, 2, L))
    nz = np.empty((N, 2, L))
    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []
    for i in range(10):
        AIC.append(utils.akaike_information_criterion(z, i))
        AICc.append(utils.akaike_information_criterion_c(z, i))
        BIC.append(utils.bayesian_information_criterion(z, i))

    # The model has order 2, so this should minimize on 2:
    #nt.assert_equal(np.argmin(AIC),2)
    #nt.assert_equal(np.argmin(AICc),2)
    nt.assert_equal(np.argmin(BIC), 2)
示例#6
0
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0], [0.16, 0.8]])

    a2 = np.array([[-0.5, 0], [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov], [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []

    # The total number data points available for estimation:
    Ntotal = L * n_process

    for n_lags in range(1, 10):

        Rxx = np.empty((N, n_process, n_process, n_lags))

        for i in xrange(N):
            Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)

        Rxx = Rxx.mean(axis=0)
        Rxx = Rxx.transpose(2, 0, 1)

        a, ecov = alg.lwr_recursion(Rxx)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags,
                                                Ntotal)
        AIC.append(IC)

        IC = utils.akaike_information_criterion(ecov,
                                                n_process,
                                                n_lags,
                                                Ntotal,
                                                corrected=True)
        AICc.append(IC)

        IC = utils.bayesian_information_criterion(ecov, n_process, n_lags,
                                                  Ntotal)
        BIC.append(IC)

    # The model has order 2, so this should minimize on 2:

    # We do not test this for AIC/AICc, because these sometimes do not minimize
    # (see Ding and Bressler)
    # nt.assert_equal(np.argmin(AIC), 2)
    # nt.assert_equal(np.argmin(AICc), 2)
    nt.assert_equal(np.argmin(BIC), 2)
示例#7
0
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                  [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []

    # The total number data points available for estimation:
    Ntotal = L * n_process

    for n_lags in range(1, 10):

        Rxx = np.empty((N, n_process, n_process, n_lags))

        for i in xrange(N):
            Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)

        Rxx = Rxx.mean(axis=0)
        Rxx = Rxx.transpose(2, 0, 1)

        a, ecov = alg.lwr_recursion(Rxx)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal)
        AIC.append(IC)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal, corrected=True)
        AICc.append(IC)

        IC = utils.bayesian_information_criterion(ecov, n_process, n_lags, Ntotal)
        BIC.append(IC)

    # The model has order 2, so this should minimize on 2:

    # We do not test this for AIC/AICc, because these sometimes do not minimize
    # (see Ding and Bressler)
    # nt.assert_equal(np.argmin(AIC), 2)
    # nt.assert_equal(np.argmin(AICc), 2)
    nt.assert_equal(np.argmin(BIC), 2)
示例#8
0
#Number of realizations of the process
N = 500
#Length of each realization:
L = 1024

order = am.shape[0]
n_lags = order + 1

n_process = am.shape[-1]

z = np.empty((N, n_process, L))
nz = np.empty((N, n_process, L))

for i in range(N):
    z[i], nz[i] = utils.generate_mar(am, cov, L)

"""

We can estimate the 2nd order AR coefficients, by averaging together N
estimates of auto-covariance at lags k=0,1,2

Each $R^{xx}(k)$ has the shape (2,2), where:

.. math::

   R^{xx}_{00}(k) = E( Z_0(t)Z_0^*(t-k) )

.. math::

   R^{xx}_{01}(k) = E( Z_0(t)Z_1^*(t-k) )
示例#9
0
def test_MAR_est_LWR():
    """

    Test the LWR MAR estimator against the power of the signal

    This also tests the functions: transfer_function_xy, spectral_matrix_xy,
    coherence_from_spectral and granger_causality_xy
    
    """

    # This is the same processes as those in doc/examples/ar_est_2vars.py: 
    a1 = np.array([ [0.9, 0],
                [0.16, 0.8] ])

    a2 = np.array([ [-0.5, 0],
                [-0.2, -0.5] ])


    am = np.array([ -a1, -a2 ])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([ [x_var, xy_cov],
                     [xy_cov, y_var] ])


    n_freqs = 1024
    w, Hw = tsa.transfer_function_xy(am, n_freqs=n_freqs)
    Sw = tsa.spectral_matrix_xy(Hw, cov)

    # This many realizations of the process:
    N = 500
    # Each one this long
    L = 1024

    order = am.shape[0]
    n_lags = order + 1

    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    a_est = []
    cov_est = []

    # This loop runs MAR_est_LWR:
    for i in xrange(N):
        Rxx = (tsa.MAR_est_LWR(z[i],order=n_lags))
        a_est.append(Rxx[0])
        cov_est.append(Rxx[1])

    a_est = np.mean(a_est,0)
    cov_est = np.mean(cov_est,0)

    # This tests transfer_function_xy and spectral_matrix_xy: 
    w, Hw_est = tsa.transfer_function_xy(a_est, n_freqs=n_freqs)
    Sw_est = tsa.spectral_matrix_xy(Hw_est, cov_est)

    # coherence_from_spectral:
    c = tsa.coherence_from_spectral(Sw)
    c_est = tsa.coherence_from_spectral(Sw_est)

    # granger_causality_xy:

    w, f_x2y, f_y2x, f_xy, Sw = tsa.granger_causality_xy(am,
                                                         cov,
                                                         n_freqs=n_freqs)

    w, f_x2y_est, f_y2x_est, f_xy_est, Sw_est = tsa.granger_causality_xy(a_est,
                                                                     cov_est,
                                                                     n_freqs=n_freqs)


    # interdependence_xy

    i_xy = tsa.interdependence_xy(Sw)
    i_xy_est = tsa.interdependence_xy(Sw_est)
    
    # This is all very approximate:
    npt.assert_almost_equal(Hw,Hw_est,decimal=1)
    npt.assert_almost_equal(Sw,Sw_est,decimal=1)
    npt.assert_almost_equal(c,c_est,1)
    npt.assert_almost_equal(f_xy,f_xy_est,1)
    npt.assert_almost_equal(f_x2y,f_x2y_est,1)
    npt.assert_almost_equal(f_y2x,f_y2x_est,1)
    npt.assert_almost_equal(i_xy,i_xy_est,1)
示例#10
0
N = 500
L = 100


"""

Generate the instances of the time-series based on the coefficients:

"""

za = np.empty((N, 3, L))
zb = np.empty((N, 3, L))
ea = np.empty((N, 3, L))
eb = np.empty((N, 3, L))
for i in range(N):
    za[i], ea[i] = utils.generate_mar(a, cov, L)
    zb[i], eb[i] = utils.generate_mar(b, cov, L)

"""

Try to estimate the 2nd order (m)AR coefficients-- Average together N estimates
of auto-covariance at lags k=0,1,2

"""

Raxx = np.empty((N, 3, 3, 3))
Rbxx = np.empty((N, 3, 3, 3))

for i in range(N):
    Raxx[i] = utils.autocov_vector(za[i], nlags=3)
    Rbxx[i] = utils.autocov_vector(zb[i], nlags=3)
示例#11
0
def test_MAR_est_LWR():
    """

    Test the LWR MAR estimator against the power of the signal

    This also tests the functions: transfer_function_xy, spectral_matrix_xy,
    coherence_from_spectral and granger_causality_xy

    """

    # This is the same processes as those in doc/examples/ar_est_2vars.py:
    a1 = np.array([[0.9, 0], [0.16, 0.8]])

    a2 = np.array([[-0.5, 0], [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov], [xy_cov, y_var]])

    n_freqs = 1024
    w, Hw = tsa.transfer_function_xy(am, n_freqs=n_freqs)
    Sw = tsa.spectral_matrix_xy(Hw, cov)

    # This many realizations of the process:
    N = 500
    # Each one this long
    L = 1024

    order = am.shape[0]
    n_lags = order + 1

    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in range(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    a_est = []
    cov_est = []

    # This loop runs MAR_est_LWR:
    for i in range(N):
        Rxx = (tsa.MAR_est_LWR(z[i], order=n_lags))
        a_est.append(Rxx[0])
        cov_est.append(Rxx[1])

    a_est = np.mean(a_est, 0)
    cov_est = np.mean(cov_est, 0)

    # This tests transfer_function_xy and spectral_matrix_xy:
    w, Hw_est = tsa.transfer_function_xy(a_est, n_freqs=n_freqs)
    Sw_est = tsa.spectral_matrix_xy(Hw_est, cov_est)

    # coherence_from_spectral:
    c = tsa.coherence_from_spectral(Sw)
    c_est = tsa.coherence_from_spectral(Sw_est)

    # granger_causality_xy:

    w, f_x2y, f_y2x, f_xy, Sw = tsa.granger_causality_xy(am,
                                                         cov,
                                                         n_freqs=n_freqs)

    w, f_x2y_est, f_y2x_est, f_xy_est, Sw_est = tsa.granger_causality_xy(
        a_est, cov_est, n_freqs=n_freqs)

    # interdependence_xy
    i_xy = tsa.interdependence_xy(Sw)
    i_xy_est = tsa.interdependence_xy(Sw_est)

    # This is all very approximate:
    npt.assert_almost_equal(Hw, Hw_est, decimal=1)
    npt.assert_almost_equal(Sw, Sw_est, decimal=1)
    npt.assert_almost_equal(c, c_est, 1)
    npt.assert_almost_equal(f_xy, f_xy_est, 1)
    npt.assert_almost_equal(f_x2y, f_x2y_est, 1)
    npt.assert_almost_equal(f_y2x, f_y2x_est, 1)
    npt.assert_almost_equal(i_xy, i_xy_est, 1)
示例#12
0
"""

N = 500
L = 100
"""

Generate the instances of the time-series based on the coefficients:

"""

za = np.empty((N, 3, L))
zb = np.empty((N, 3, L))
ea = np.empty((N, 3, L))
eb = np.empty((N, 3, L))
for i in range(N):
    za[i], ea[i] = utils.generate_mar(a, cov, L)
    zb[i], eb[i] = utils.generate_mar(b, cov, L)
"""

Try to estimate the 2nd order (m)AR coefficients-- Average together N estimates
of auto-covariance at lags k=0,1,2

"""

Raxx = np.empty((N, 3, 3, 3))
Rbxx = np.empty((N, 3, 3, 3))

for i in range(N):
    Raxx[i] = utils.autocov_vector(za[i], nlags=3)
    Rbxx[i] = utils.autocov_vector(zb[i], nlags=3)
"""
示例#13
0
"""

L = 1024

order = am.shape[0]
n_lags = order + 1

n_process = am.shape[-1]

z = np.empty((N, n_process, L))
nz = np.empty((N, n_process, L))

np.random.seed(1981)
for i in range(N):
    z[i], nz[i] = utils.generate_mar(am, cov, L)


"""

We start by estimating the order of the model from the data:

"""

est_order = []
for i in range(N):
    this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1])
    est_order.append(this_order)

order = int(np.round(np.mean(est_order)))
示例#14
0
"""

L = 1024

order = am.shape[0]
n_lags = order + 1

n_process = am.shape[-1]

z = np.empty((N, n_process, L))
nz = np.empty((N, n_process, L))

np.random.seed(1981)
for i in xrange(N):
    z[i], nz[i] = utils.generate_mar(am, cov, L)


"""

We start by estimating the order of the model from the data:

"""

est_order = []
for i in xrange(N):
    this_order, this_Rxx, this_coef, this_ecov = gc.fit_model(z[i][0], z[i][1])
    est_order.append(this_order)

order = int(np.round(np.mean(est_order)))