Esempio n. 1
0
def fit_model(x1,
              x2,
              order=None,
              max_order=10,
              criterion=utils.bayesian_information_criterion):
    """
    Fit the auto-regressive model used in calculation of Granger 'causality'.

    Parameters
    ----------

    x1,x2: float arrays (n)
        x1,x2 bivariate combination.
    order: int (optional)
        If known, the order of the autoregressive process
    max_order: int (optional)
        If the order is not known, this will be the maximal order to fit.
    criterion: callable
       A function which defines an information criterion, used to determine the
        order of the model.

    """
    c_old = np.inf
    n_process = 2
    Ntotal = n_process * x1.shape[-1]

    # If model order was provided as an input:
    if order is not None:
        lag = order + 1
        Rxx = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
        coef, ecov = alg.lwr_recursion(np.array(Rxx).transpose(2, 0, 1))

    # If the model order is not known and provided as input:
    else:
        for lag in xrange(1, max_order):
            Rxx_new = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
            coef_new, ecov_new = alg.lwr_recursion(
                np.array(Rxx_new).transpose(2, 0, 1))
            order_new = coef_new.shape[0]
            c_new = criterion(ecov_new, n_process, order_new, Ntotal)
            if c_new > c_old:
                # Keep the values you got in the last round and break out:
                break

            else:
                # Replace the output values with the new calculated values and
                # move on to the next order:
                c_old = c_new
                order = order_new
                Rxx = Rxx_new
                coef = coef_new
                ecov = ecov_new
        else:
            e_s = (
                "Model estimation order did not converge at max_order = %s" %
                max_order)
            raise ValueError(e_s)

    return order, Rxx, coef, ecov
Esempio n. 2
0
def fit_model(x1, x2, order=None, max_order=10,
              criterion=utils.bayesian_information_criterion):
    """
    Fit the auto-regressive model used in calculation of Granger 'causality'.

    Parameters
    ----------

    x1,x2: float arrays (n)
        x1,x2 bivariate combination.
    order: int (optional)
        If known, the order of the autoregressive process
    max_order: int (optional)
        If the order is not known, this will be the maximal order to fit.
    criterion: callable
       A function which defines an information criterion, used to determine the
        order of the model.

    """
    c_old = np.inf
    n_process = 2
    Ntotal = n_process * x1.shape[-1]

    # If model order was provided as an input:
    if order is not None:
        lag = order + 1
        Rxx = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
        coef, ecov = alg.lwr_recursion(np.array(Rxx).transpose(2, 0, 1))

    # If the model order is not known and provided as input:
    else:
        for lag in range(1, max_order):
            Rxx_new = utils.autocov_vector(np.vstack([x1, x2]), nlags=lag)
            coef_new, ecov_new = alg.lwr_recursion(
                                        np.array(Rxx_new).transpose(2, 0, 1))
            order_new = coef_new.shape[0]
            c_new = criterion(ecov_new, n_process, order_new, Ntotal)
            if c_new > c_old:
                # Keep the values you got in the last round and break out:
                break

            else:
                # Replace the output values with the new calculated values and
                # move on to the next order:
                c_old = c_new
                order = order_new
                Rxx = Rxx_new
                coef = coef_new
                ecov = ecov_new
        else:
            e_s = ("Model estimation order did not converge at max_order = %s"
                                                                  % max_order)
            raise ValueError(e_s)

    return order, Rxx, coef, ecov
Esempio n. 3
0
def test_lwr():
    "test solution of lwr recursion"
    for trial in xrange(3):
        nc = np.random.randint(2, high=10)
        P = np.random.randint(2, high=6)
        # nc is channels, P is lags (order)
        r = np.random.randn(P + 1, nc, nc)
        r[0] = np.dot(r[0], r[0].T)  # force r0 to be symmetric

        a, Va = tsa.lwr_recursion(r)
        # Verify the "orthogonality" principle of the mAR system
        # Set up a system in blocks to compute, for each k
        #   sum_{i=1}^{P} A(i)R(k-i) = -R(k) k > 0
        # = sum_{i=1}^{P} R(k-i)^T A(i)^T = -R(k)^T
        # = sum_{i=1}^{P} R(i-k)A(i)^T = -R(k)^T
        rmat = np.zeros((nc * P, nc * P))
        for k in xrange(1, P + 1):
            for i in xrange(1, P + 1):
                im = i - k
                if im < 0:
                    r1 = r[-im].T
                else:
                    r1 = r[im]
                rmat[(k - 1) * nc:k * nc, (i - 1) * nc:i * nc] = r1

        rvec = np.zeros((nc * P, nc))
        avec = np.zeros((nc * P, nc))
        for m in xrange(P):
            rvec[m * nc:(m + 1) * nc] = -r[m + 1].T
            avec[m * nc:(m + 1) * nc] = a[m].T

        l2_d = np.dot(rmat, avec) - rvec
        l2_d = (l2_d ** 2).sum() ** 0.5
        l2_r = (rvec ** 2).sum() ** 0.5

        # compute |Ax-b| / |b| metric
        npt.assert_almost_equal(l2_d / l2_r, 0, decimal=5)
Esempio n. 4
0
def test_lwr():
    "test solution of lwr recursion"
    for trial in range(3):
        nc = np.random.randint(2, high=10)
        P = np.random.randint(2, high=6)
        # nc is channels, P is lags (order)
        r = np.random.randn(P + 1, nc, nc)
        r[0] = np.dot(r[0], r[0].T)  # force r0 to be symmetric

        a, Va = tsa.lwr_recursion(r)
        # Verify the "orthogonality" principle of the mAR system
        # Set up a system in blocks to compute, for each k
        #   sum_{i=1}^{P} A(i)R(k-i) = -R(k) k > 0
        # = sum_{i=1}^{P} R(k-i)^T A(i)^T = -R(k)^T
        # = sum_{i=1}^{P} R(i-k)A(i)^T = -R(k)^T
        rmat = np.zeros((nc * P, nc * P))
        for k in range(1, P + 1):
            for i in range(1, P + 1):
                im = i - k
                if im < 0:
                    r1 = r[-im].T
                else:
                    r1 = r[im]
                rmat[(k - 1) * nc:k * nc, (i - 1) * nc:i * nc] = r1

        rvec = np.zeros((nc * P, nc))
        avec = np.zeros((nc * P, nc))
        for m in range(P):
            rvec[m * nc:(m + 1) * nc] = -r[m + 1].T
            avec[m * nc:(m + 1) * nc] = a[m].T

        l2_d = np.dot(rmat, avec) - rvec
        l2_d = (l2_d**2).sum()**0.5
        l2_r = (rvec**2).sum()**0.5

        # compute |Ax-b| / |b| metric
        npt.assert_almost_equal(l2_d / l2_r, 0, decimal=5)
Esempio n. 5
0
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0], [0.16, 0.8]])

    a2 = np.array([[-0.5, 0], [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov], [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []

    # The total number data points available for estimation:
    Ntotal = L * n_process

    for n_lags in range(1, 10):

        Rxx = np.empty((N, n_process, n_process, n_lags))

        for i in xrange(N):
            Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)

        Rxx = Rxx.mean(axis=0)
        Rxx = Rxx.transpose(2, 0, 1)

        a, ecov = alg.lwr_recursion(Rxx)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags,
                                                Ntotal)
        AIC.append(IC)

        IC = utils.akaike_information_criterion(ecov,
                                                n_process,
                                                n_lags,
                                                Ntotal,
                                                corrected=True)
        AICc.append(IC)

        IC = utils.bayesian_information_criterion(ecov, n_process, n_lags,
                                                  Ntotal)
        BIC.append(IC)

    # The model has order 2, so this should minimize on 2:

    # We do not test this for AIC/AICc, because these sometimes do not minimize
    # (see Ding and Bressler)
    # nt.assert_equal(np.argmin(AIC), 2)
    # nt.assert_equal(np.argmin(AICc), 2)
    nt.assert_equal(np.argmin(BIC), 2)
Esempio n. 6
0
def test_information_criteria():
    """

    Test the implementation of information criteria:

    """
    a1 = np.array([[0.9, 0],
                   [0.16, 0.8]])

    a2 = np.array([[-0.5, 0],
                  [-0.2, -0.5]])

    am = np.array([-a1, -a2])

    x_var = 1
    y_var = 0.7
    xy_cov = 0.4
    cov = np.array([[x_var, xy_cov],
                    [xy_cov, y_var]])

    #Number of realizations of the process
    N = 500
    #Length of each realization:
    L = 1024

    order = am.shape[0]
    n_process = am.shape[-1]

    z = np.empty((N, n_process, L))
    nz = np.empty((N, n_process, L))

    for i in xrange(N):
        z[i], nz[i] = utils.generate_mar(am, cov, L)

    AIC = []
    BIC = []
    AICc = []

    # The total number data points available for estimation:
    Ntotal = L * n_process

    for n_lags in range(1, 10):

        Rxx = np.empty((N, n_process, n_process, n_lags))

        for i in xrange(N):
            Rxx[i] = utils.autocov_vector(z[i], nlags=n_lags)

        Rxx = Rxx.mean(axis=0)
        Rxx = Rxx.transpose(2, 0, 1)

        a, ecov = alg.lwr_recursion(Rxx)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal)
        AIC.append(IC)

        IC = utils.akaike_information_criterion(ecov, n_process, n_lags, Ntotal, corrected=True)
        AICc.append(IC)

        IC = utils.bayesian_information_criterion(ecov, n_process, n_lags, Ntotal)
        BIC.append(IC)

    # The model has order 2, so this should minimize on 2:

    # We do not test this for AIC/AICc, because these sometimes do not minimize
    # (see Ding and Bressler)
    # nt.assert_equal(np.argmin(AIC), 2)
    # nt.assert_equal(np.argmin(AICc), 2)
    nt.assert_equal(np.argmin(BIC), 2)
Esempio n. 7
0
Rxx = Rxx.mean(axis=0)

R0 = Rxx[..., 0]
Rm = Rxx[..., 1:]

Rxx = Rxx.transpose(2, 0, 1)


"""

We use the Levinson-Whittle(-Wiggins) and Robinson algorithm, as described in [Morf1978]_
, in order to estimate the MAR coefficients and the covariance matrix:

"""

a, ecov = alg.lwr_recursion(Rxx)

"""

Next, we use the calculated coefficients and covariance matrix, in order to
calculate Granger 'causality':

"""

w, f_x2y, f_y2x, f_xy, Sw = alg.granger_causality_xy(a,
                                                     ecov,
                                                     n_freqs=n_freqs)


"""
Esempio n. 8
0
xzRa = extract_ij(0, 2, Raxx)
yzRa = extract_ij(1, 2, Raxx)

Rbxx = Rbxx.mean(axis=0)
xyRb = extract_ij(0, 1, Rbxx)
xzRb = extract_ij(0, 2, Rbxx)
yzRb = extract_ij(1, 2, Rbxx)

"""

Now estimate mAR coefficients and covariance from the full and pairwise relationships:

"""

Raxx = Raxx.transpose(2, 0, 1)
a_est, cov_est1 = alg.lwr_recursion(Raxx)
a_xy_est, cov_xy_est1 = alg.lwr_recursion(xyRa.transpose(2, 0, 1))
a_xz_est, cov_xz_est1 = alg.lwr_recursion(xzRa.transpose(2, 0, 1))
a_yz_est, cov_yz_est1 = alg.lwr_recursion(yzRa.transpose(2, 0, 1))

Rbxx = Rbxx.transpose(2, 0, 1)
b_est, cov_est2 = alg.lwr_recursion(Rbxx)
b_xy_est, cov_xy_est2 = alg.lwr_recursion(xyRb.transpose(2, 0, 1))
b_xz_est, cov_xz_est2 = alg.lwr_recursion(xzRb.transpose(2, 0, 1))
b_yz_est, cov_yz_est2 = alg.lwr_recursion(yzRb.transpose(2, 0, 1))


"""

We proceed to visualize these relationships:
Esempio n. 9
0
xyRa = extract_ij(0, 1, Raxx)
xzRa = extract_ij(0, 2, Raxx)
yzRa = extract_ij(1, 2, Raxx)

Rbxx = Rbxx.mean(axis=0)
xyRb = extract_ij(0, 1, Rbxx)
xzRb = extract_ij(0, 2, Rbxx)
yzRb = extract_ij(1, 2, Rbxx)
"""

Now estimate mAR coefficients and covariance from the full and pairwise relationships:

"""

Raxx = Raxx.transpose(2, 0, 1)
a_est, cov_est1 = alg.lwr_recursion(Raxx)
a_xy_est, cov_xy_est1 = alg.lwr_recursion(xyRa.transpose(2, 0, 1))
a_xz_est, cov_xz_est1 = alg.lwr_recursion(xzRa.transpose(2, 0, 1))
a_yz_est, cov_yz_est1 = alg.lwr_recursion(yzRa.transpose(2, 0, 1))

Rbxx = Rbxx.transpose(2, 0, 1)
b_est, cov_est2 = alg.lwr_recursion(Rbxx)
b_xy_est, cov_xy_est2 = alg.lwr_recursion(xyRb.transpose(2, 0, 1))
b_xz_est, cov_xz_est2 = alg.lwr_recursion(xzRb.transpose(2, 0, 1))
b_yz_est, cov_yz_est2 = alg.lwr_recursion(yzRb.transpose(2, 0, 1))
"""

We proceed to visualize these relationships:

"""