Exemplo n.º 1
0
def do_mvar_evaluation(X, morder, whit_max=3., whit_min=1., thr_cons=0.8):
    '''
    Fit MVAR model to data using scot and do some basic checks.

    X: array (trials, channels, samples)
    morder: the model order

    Returns:
    (is_white, consistency, is_stable)
    '''
    print('starting checks and MVAR fitting...')
    # tsdata_to_var from MVGC requires sources x samples x trials
    # X is of shape trials x sources x samples (which is what ScoT uses)

    X_trans = X.transpose(1, 2, 0)

    A, SIG, E = _tsdata_to_var(X_trans, morder)
    del A, SIG

    whi = False
    dw, pval = dw_whiteness(X_trans, E)
    if np.all(dw < whit_max) and np.all(dw > whit_min):
        whi = True
    cons = consistency(X_trans, E)
    del dw, pval, E

    from scot.var import VAR
    mvar = VAR(morder)
    mvar.fit(X)  # scot func which requires shape trials x sources x samples
    is_st = mvar.is_stable()
    if cons < thr_cons or is_st is False or whi is False:
        print('ERROR: Model order not ideal - check parameters !!')

    return str(whi), cons, str(is_st)
Exemplo n.º 2
0
    def test_residuals(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)

        var = VAR(2)
        var.fit(x)

        self.assertEqual(x.shape, var.residuals.shape)

        self.assertTrue(np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
Exemplo n.º 3
0
    def test_residuals(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)

        var = VAR(2)
        var.fit(x)

        self.assertEqual(x.shape, var.residuals.shape)

        self.assertTrue(
            np.allclose(var.rescov, np.eye(var.rescov.shape[0]), 1e-2, 1e-2))
Exemplo n.º 4
0
    def test_fit(self):
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = 100000
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(2)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
Exemplo n.º 5
0
    def test_fit(self):
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        l = 100000
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(2)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(var0.coef - var.coef) < 0.02))
Exemplo n.º 6
0
def compute_order(X, m_max, verbose=True):
    """
    Estimate VAR order with the Bayesian Information Criterion (BIC).

    Parameters
    ----------
    X : ndarray, shape (trials, n_channels, n_samples)

    m_max : int
        The maximum model order to test

    Reference
    ---------
    [1] provides the equation:BIC(m) = 2*log[det(Σ)]+ 2*(p**2)*m*log(N*n*m)/(N*n*m),
    Σ is the noise covariance matrix, p is the channels, N is the trials, n
    is the n_samples, m is model order.

    [1] Mingzhou Ding, Yonghong Chen. Granger Causality: Basic Theory and Application
    to Neuroscience.Elsevier Science, 7 February 2008.

    URL: https://gist.github.com/dongqunxi/b23d1679b9bffa8e458c11f93bd8d6ff


    Returns
    -------
    o_m : int
        Estimated order
    bic : list
        List with the BICs for the orders from 1 to m_max.
    """
    from scot.var import VAR
    from scipy import linalg

    N, p, n = X.shape
    bic = []
    for m in range(m_max):
        mvar = VAR(m + 1)
        mvar.fit(X)
        sigma = mvar.rescov
        m_bic = np.log(linalg.det(sigma))
        m_bic += (p**2) * (m + 1) * np.log(N * n) / (N * n)
        bic.append(m_bic)
        if verbose:
            print(('model order: %d, BIC value: %.2f' % (m + 1, bic[m])))

    o_m = np.argmin(bic) + 1
    return o_m, bic
Exemplo n.º 7
0
    def test_fit_regularized(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(10, delta=1)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        b0 = np.zeros((2, 20))
        b0[:, 0:2] = var0.coef[:, 0:2]
        b0[:, 10:12] = var0.coef[:, 2:4]

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
Exemplo n.º 8
0
    def test_fit_regularized(self):
        l = 100000
        var0 = VAR(2)
        var0.coef = np.array([[0.2, 0.1, 0.4, -0.1], [0.3, -0.2, 0.1, 0]])
        x = var0.simulate(l)
        y = x.copy()

        var = VAR(10, delta=1)
        var.fit(x)

        # make sure the input remains unchanged
        self.assertTrue(np.all(x == y))

        b0 = np.zeros((2, 20))
        b0[:, 0:2] = var0.coef[:, 0:2]
        b0[:, 10:12] = var0.coef[:, 2:4]

        # that limit is rather generous, but we don't want tests to fail due to random variation
        self.assertTrue(np.all(np.abs(b0 - var.coef) < 0.02))
Exemplo n.º 9
0
Arquivo: pca.py Projeto: cbrnr/scot
# Generate data from a VAR(1) process
model0 = VAR(1)
model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
x = model0.simulate(10000).squeeze()

# Transform data with PCA
w, v = pca(x)
y = np.dot(w.T, x)

# Verify that transformed data y is decorrelated
print("Covariance of x:\n", np.cov(x.squeeze()))
print("\nCovariance of y:\n", np.cov(y.squeeze()))

model1, model2 = VAR(1), VAR(1)

# Fit model1 to the original data
model1.fit(x)

# Fit model2 to the PCA transformed data
model2.fit(y)

# The coefficients estimated on x (2) are exactly equal to the back-transformed
# coefficients estimated on y (4)
print("\n(1) True VAR coefficients:\n", model0.coef)
print("\n(2) VAR coefficients estimated on x:\n", model1.coef)
print("\n(3) VAR coefficients estimated on y:\n", model2.coef)
print("\n(4) VAR coefficients estimated on y and transformed back:\n", w.dot(model2.coef).dot(w.T))

print("\n(5) Check if (2) and (4) are equal:\n", np.isclose(model1.coef, w.dot(model2.coef).dot(w.T)))
Exemplo n.º 10
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                         [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0],
                         [0.4, 0.0, 0.4, 0.0],
                         [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((t, m0, l))

        sources[cl == 0, :, :] = sources1
        sources[cl == 1, :, :] = sources2

        # simulate volume conduction... 3 sources smeared over 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(np.transpose(mix), sources)
        data += np.random.randn(*data.shape) * 0.001  # add small noise

        for backend_name, backend_gen in scot.backend.items():
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2}, reducedim=3, backend=backend_gen())

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (3, 3, 512, (l-100)//50))
            
            tfc1 = api.get_tf_connectivity('PDC', 100, 5, baseline=None)        # no baseline
            tfc2 = api.get_tf_connectivity('PDC', 100, 5, baseline=[110, -10])  # invalid baseline
            tfc3 = api.get_tf_connectivity('PDC', 100, 5, baseline=[0, 0])      # one-window baseline
            tfc4 = tfc1 - tfc1[:, :, :, [0]]
            tfc5 = api.get_tf_connectivity('PDC', 100, 5, baseline=[-np.inf, np.inf])  # full trial baseline
            tfc6 = tfc1 - np.mean(tfc1, axis=3, keepdims=True)
            self.assertTrue(np.allclose(tfc1, tfc2))
            self.assertTrue(np.allclose(tfc3, tfc4))
            self.assertTrue(np.allclose(tfc5, tfc6, rtol=1e-05, atol=1e-06))

            api.set_data(data, cl)
            
            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))
            
            api.do_cspvarica()
            
            self.assertEqual(api.get_connectivity('S').shape, (3,3,512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l-100)//50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))
Exemplo n.º 11
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0],
                        [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0],
                        [0.4, 0.0, 0.4, 0.0],
                        [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl==0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl==1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((l,m0,t))

        sources[:,:,cl==0] = sources1
        sources[:,:,cl==1] = sources2

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2}, reducedim=3, backend=bm.backend)

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (3, 3, 512, (l-100)//50))

            api.set_data(data, cl)
            
            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))
            
            api.do_cspvarica()
            
            self.assertEqual(api.get_connectivity('S').shape, (3,3,512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))
            
            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l-100)//50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity('S', 100, 50).shape, (1, 1, 512, (l-100)//50))
Exemplo n.º 12
0
# Prevent execution of the main script in worker threads
if __name__ == "__main__":

    midata = fetch("mi")[0]

    raweeg = midata["eeg"]
    triggers = midata["triggers"]
    classes = midata["labels"]
    fs = midata["fs"]
    locs = midata["locations"]

    # Prepare data
    #
    # Here we cut out segments from 3s to 4s after each trigger. This is right
    # in the middle of the motor imagery period.
    data = cut_segments(raweeg, triggers, 3 * fs, 4 * fs)

    # only use every 10th trial to make the example run faster
    data = data[::10]

    var = VAR(model_order=5)
    var.fit(data)
    for n_jobs in [-1, None, 1, 2, 3, 4, 5, 6, 7, 8]:
        # Set random seed for repeatable results
        np.random.seed(42)
        var.n_jobs = n_jobs
        start = time.perf_counter()
        p = var.test_whiteness(10, repeats=1000)
        time1 = time.perf_counter()
        print('n_jobs: {:>4s}, whiteness test: {:.2f}s, p = {}'.format(str(n_jobs), time1 - start, p))
Exemplo n.º 13
0
from scot.var import VAR

# Generate data from a VAR(1) process
model0 = VAR(1)
model0.coef = np.array([[0.3, -0.6], [0, -0.9]])
x = model0.simulate(10000).squeeze()

# Transform data with PCA
w, v = pca(x)
y = x.dot(w)

print('Covariance of x:\n', np.cov(x.squeeze().T))
print('\nCovariance of y:\n', np.cov(y.squeeze().T))

model1, model2 = VAR(1), VAR(1)

# Fit model1 to the original data
model1.fit(x)

# Fit model2 to the PCA transformed data
model2.fit(y)

# The coefficients estimated on x (2) are exactly equal to the back-transformed
# coefficients estimated on y (4)
print('\n(1) True VAR coefficients:\n', model0.coef)
print('\n(2) VAR coefficients estimated on x:\n', model1.coef)
print('\n(3) VAR coefficients estimated on y:\n', model2.coef)
print('\n(4) VAR coefficients estimated on y and transformed back:\n',
      w.dot(model2.coef).dot(w.T))
Exemplo n.º 14
0
def compute_order_extended(X,
                           m_max,
                           m_min=1,
                           m_step=1,
                           n_jobs=None,
                           verbose=True):
    """
    Estimate VAR order with the Bayesian Information Criterion (BIC).

    Parameters:
    -----------
    X : ndarray, shape (trials, n_channels, n_samples)

    m_max : int
        The maximum model order to test,
    m_min : int
        The minimum model order to test.
    m_step : int
        The step size for checking the model order interval
        given by m_min and m_max.
    n_jobs : None | int, optional
        Number of jobs to run in parallel for various tasks (e.g. whiteness
        testing). If set to None, joblib is not used at all. Note that the main
        script must be guarded with `if __name__ == '__main__':` when using
        parallelization.
    verbose : bool
        Plot results for other information criteria as well.

    Returns:
    --------
    o_m : int
        Estimated order using BIC2.
    morders : np.array of shape ((m_max - m_min) / m_step, )
        The model orders corresponding to the entries in the following results
        arrays.
    ics : np.array of shape (n_ics, (m_max - m_min) / m_step)
        The information criteria for the different model orders.
        [AIC1, BIC1, AIC2, BIC2, lnFPE, HQIC]]
    stability : np.array of shape ((m_max - m_min) / m_step), )
        Indicates if MVAR model describes stable process (covariance
        stationary).
    p_white_scot : np.array of shape ((m_max - m_min) / m_step), )
        p-value that the residuals are white based on the Li-McLeod Portmanteau test
        implemented in SCoT. Reject hypothesis of white residuals if p is smaller
        than the critical p-value.
    p_white_dw : np.array of shape ((m_max - m_min) / m_step), n_rois)
        Uncorrected p-values that the residuals are white based on the Durbin-Watson
        test as implemented by Barnett and Seth (2012). Reject hypothesis of white
        residuals if all p's are smaller than the critical p-value.
    dw : np.array of shape ((m_max - m_min) / m_step), n_rois)
        The Durbin-Watson statistics.
    consistency : np.array of shape ((m_max - m_min) / m_step), )
        Results of the MVAR consistency estimation.

    References:
    -----------
    [1] provides the equation:BIC(m) = 2*log[det(Σ)]+ 2*(p**2)*m*log(N*n*m)/(N*n*m),
    Σ is the noise covariance matrix, p is the channels, N is the trials, n
    is the n_samples, m is model order.

    [1] Mingzhou Ding, Yonghong Chen (2008). "Granger Causality: Basic Theory and Application
    to Neuroscience." Elsevier Science

    [2] Nicoletta Nicolaou and Julius Georgiou (2013). “Autoregressive Model Order Estimation
    Criteria for Monitoring Awareness during Anaesthesia.” IFIP Advances in Information and
    Communication Technology 412

    [3] Helmut Lütkepohl (2005). "New Introduction to Multiple Time Series Analysis."
    1st ed. Berlin: Springer-Verlag Berlin Heidelberg.

    URL: https://gist.github.com/dongqunxi/b23d1679b9bffa8e458c11f93bd8d6ff
    """
    from scot.var import VAR
    from scipy import linalg

    N, p, n = X.shape

    aic1 = []
    bic1 = []
    aic2 = []
    bic2 = []
    lnfpe = []
    hqic = []

    morders = []
    stability = []
    p_white_scot = []
    p_white_dw = []
    dw = []

    consistency = []

    # TODO: should this be n_total = N * n * p ???
    # total number of data points: n_trials * n_samples
    # Esther Florin (2010): N_total is number of time points contained in each time series
    n_total = N * n

    # check model order min/max/step input
    if m_min >= m_max:
        m_min = m_max - 1
    if m_min < 1:
        m_min = 1
    if m_step < 1:
        m_step = 1
    if m_step >= m_max:
        m_step = m_max

    for m in range(m_min, m_max + 1, m_step):
        morders.append(m)
        mvar = VAR(m, n_jobs=n_jobs)
        mvar.fit(X)

        stable = mvar.is_stable()
        stability.append(stable)

        p_white_scot_ = mvar.test_whiteness(h=m,
                                            repeats=100,
                                            get_q=False,
                                            random_state=None)
        white_scot_ = p_white_scot_ >= 0.05

        p_white_scot.append(p_white_scot_)

        white_dw_, cons, dw_, pval = check_whiteness_and_consistency(
            X.transpose(1, 2, 0),
            mvar.residuals.transpose(1, 2, 0),
            alpha=0.05)
        dw.append(dw_)
        p_white_dw.append(pval)
        consistency.append(cons)

        sigma = mvar.rescov

        ########################################################################
        # from [1]
        ########################################################################
        m_aic = 2 * np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total
        m_bic = 2 * np.log(
            linalg.det(sigma)) + 2 * (p**2) * m / n_total * np.log(n_total)
        aic1.append(m_aic)
        bic1.append(m_bic)

        ########################################################################
        # from [2]
        ########################################################################

        m_aic2 = np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total
        m_bic2 = np.log(
            linalg.det(sigma)) + (p**2) * m / n_total * np.log(n_total)

        aic2.append(m_aic2)
        bic2.append(m_bic2)

        ########################################################################
        # from [3]
        ########################################################################
        # Akaike's final prediction error
        m_ln_fpe3 = np.log(linalg.det(sigma)) + p * np.log(
            (n_total + m * p + 1) / (n_total - m * p - 1))
        # Hannan-Quinn criterion
        m_hqc3 = np.log(linalg.det(sigma)) + 2 * (p**2) * m / n_total * np.log(
            np.log(n_total))

        lnfpe.append(m_ln_fpe3)
        hqic.append(m_hqc3)

        if verbose:
            results = 'Model order: ' + str(m).zfill(2)
            results += '    AIC1: %.2f' % m_aic
            results += '    BIC1: %.2f' % m_bic
            results += '    AIC2: %.2f' % m_aic2
            results += '    BIC2: %.2f' % m_bic2
            results += '  lnFPE3: %.2f' % m_ln_fpe3
            results += '    HQC3: %.2f' % m_hqc3
            results += '  stable: %s' % str(stable)
            results += '  white1: %s' % str(white_scot_)
            results += '  white2: %s' % str(white_dw_)
            results += '   DWmin: %.2f' % dw_.min()
            results += '   DWmax: %.2f' % dw_.max()
            results += ' consistency: %.4f' % cons

            print(results)

    morders = np.array(morders)
    o_m = morders[np.argmin(bic2)]
    if verbose:
        print('>>> Optimal model order according to BIC2 = %d' % o_m)

    ics = [aic1, bic1, aic2, bic2, lnfpe, hqic]
    ics = np.asarray(ics)

    stability = np.array(stability)
    p_white_scot = np.array(p_white_scot)
    p_white_dw = np.array(p_white_dw)
    dw = np.array(dw)
    consistency = np.array(consistency)

    return o_m, morders, ics, stability, p_white_scot, p_white_dw, dw, consistency
Exemplo n.º 15
0
def check_model_order(X, p, whit_min=1.5, whit_max=2.5, check_stability=True):
    """
    Check whiteness, consistency, and stability for all model
    orders k <= p.

    Computationally intensive but for high model orders probably
    faster than do_mvar_evaluation().

    Parameters:
    -----------
    X : narray, shape (n_epochs, n_sources, n_times)
        The data to estimate the model order for.
    p : int
        The maximum model order.
    whit_min : float
        Lower boundary for the Durbin-Watson test.
    whit_max : float
        Upper boundary for the Durbin-Watson test.
    check_stability : bool
        Check the stability condition. Time intensive since
        it fits a second MVAR model from scot.var.VAR
    Returns:
    --------
    A: array, coefficients of the specified model
    SIG:array, recovariance of this model
    E:  array, noise covariance of this model
    """

    assert p >= 1, "The model order must be greater or equal to 1."

    from scot.var import VAR

    X_orig = X.copy()
    X = X.transpose(1, 2, 0)

    n, m, N = X.shape
    p1 = p + 1
    q1n = p1 * n
    I = np.eye(n)
    XX = np.zeros((n, p1, m + p, N))
    for k in range(p1):
        XX[:, k, k:k + m, :] = X
    AF = np.zeros((n, q1n))
    AB = np.zeros((n, q1n))
    k = 1
    kn = k * n
    M = N * (m - k)
    kf = list(range(0, kn))
    kb = list(range(q1n - kn, q1n))
    XF = np.reshape(XX[:, 0:k, k:m, :], (kn, M), order='F')
    XB = np.reshape(XX[:, 0:k, k - 1:m - 1, :], (kn, M), order='F')
    CXF = np.linalg.cholesky(XF.dot(XF.T)).T
    CXB = np.linalg.cholesky(XB.dot(XB.T)).T
    AF[:, kf] = np.linalg.solve(CXF.T, I)
    AB[:, kb] = np.linalg.solve(CXB.T, I)

    del p1, XF, XB, CXF, CXB

    while k <= p:

        tempF = np.reshape(XX[:, 0:k, k:m, :], (kn, M), order='F')
        af = AF[:, kf]
        EF = af.dot(tempF)

        del af, tempF

        tempB = np.reshape(XX[:, 0:k, k - 1:m - 1, :], (kn, M), order='F')
        ab = AB[:, kb]
        EB = ab.dot(tempB)

        del ab, tempB

        CEF = np.linalg.cholesky(EF.dot(EF.T)).T
        CEB = np.linalg.cholesky(EB.dot(EB.T)).T
        R = np.dot(np.linalg.solve(CEF.T, EF.dot(EB.T)), np.linalg.inv(CEB))

        del EB, CEF, CEB

        RF = np.linalg.cholesky(I - R.dot(R.T)).T
        RB = np.linalg.cholesky(I - (R.T).dot(R)).T
        k = k + 1
        kn = k * n
        M = N * (m - k)
        kf = np.arange(kn)
        kb = list(range(q1n - kn, q1n))
        AFPREV = AF[:, kf]
        ABPREV = AB[:, kb]
        AF[:, kf] = np.linalg.solve(RF.T, AFPREV - R.dot(ABPREV))
        AB[:, kb] = np.linalg.solve(RB.T, ABPREV - R.T.dot(AFPREV))

        del RF, RB, ABPREV

        # check MVAR model properties

        E = np.linalg.solve(AFPREV[:, :n], EF)
        E = np.reshape(E, (n, m - k + 1, N), order='F')

        if k > 1:

            whi, cons, _, _ = check_whiteness_and_consistency(
                X, E, whit_min, whit_max)

            if check_stability:
                mvar = VAR((k - 1))
                mvar.fit(
                    X_orig
                )  # scot func which requires shape trials x sources x samples
                is_st = mvar.is_stable()

            output = 'morder %d:' % (k - 1)
            output += ' white: %s' % str(whi)
            output += '; consistency: %.4f' % cons
            if check_stability:
                output += '; stable: %s' % str(is_st)
            print(output)
Exemplo n.º 16
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0], [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0], [0.4, 0.0, 0.4, 0.0],
                         [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0))**3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((l, m0, t))

        sources[:, :, cl == 0] = sources1
        sources[:, :, cl == 1] = sources2

        # simulate volume conduction... 3 sources measured with 7 channels
        mix = [[0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
               [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
               [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5]]
        data = datatools.dot_special(sources, mix)

        backend_modules = [import_module('scot.' + b) for b in scot.backends]

        for bm in backend_modules:
            np.random.seed(
                3141592
            )  # reset random seed so we're independent of module order

            api = scot.Workspace({'model_order': 2},
                                 reducedim=3,
                                 backend=bm.backend)

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (3, 3, 512, (l - 100) // 50))

            api.set_data(data, cl)

            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))

            api.do_cspvarica()

            self.assertEqual(api.get_connectivity('S').shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity('S')
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity('S', 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l - 100) // 50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (1, 1, 512, (l - 100) // 50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity('S').shape, (1, 1, 512))
            self.assertEqual(
                api.get_tf_connectivity('S', 100, 50).shape,
                (1, 1, 512, (l - 100) // 50))
Exemplo n.º 17
0
    def testFunctionality(self):
        """ generate VAR signals, and apply the api to them
            do this for every backend """
        np.random.seed(3141592)

        # original model coefficients
        b01 = np.zeros((3, 6))
        b02 = np.zeros((3, 6))
        b01[1:3, 2:6] = [[0.4, -0.2, 0.3, 0.0], [-0.7, 0.0, 0.9, 0.0]]
        b02[0:3, 2:6] = [[0.4, 0.0, 0.0, 0.0], [0.4, 0.0, 0.4, 0.0], [0.0, 0.0, 0.4, 0.0]]
        m0 = b01.shape[0]
        cl = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1, 0])
        l = 200
        t = len(cl)

        # generate VAR sources with non-gaussian innovation process, otherwise ICA won't work
        noisefunc = lambda: np.random.normal(size=(1, m0)) ** 3 / 1e3

        var = VAR(2)
        var.coef = b01
        sources1 = var.simulate([l, sum(cl == 0)], noisefunc)
        var.coef = b02
        sources2 = var.simulate([l, sum(cl == 1)], noisefunc)

        var.fit(sources1)
        var.fit(sources2)

        sources = np.zeros((t, m0, l))

        sources[cl == 0, :, :] = sources1
        sources[cl == 1, :, :] = sources2

        # simulate volume conduction... 3 sources smeared over 7 channels
        mix = [
            [0.5, 1.0, 0.5, 0.2, 0.0, 0.0, 0.0],
            [0.0, 0.2, 0.5, 1.0, 0.5, 0.2, 0.0],
            [0.0, 0.0, 0.0, 0.2, 0.5, 1.0, 0.5],
        ]
        data = datatools.dot_special(np.transpose(mix), sources)
        data += np.random.randn(*data.shape) * 0.001  # add small noise

        for backend_name, backend_gen in scot.backend.items():
            np.random.seed(3141592)  # reset random seed so we're independent of module order

            api = scot.Workspace({"model_order": 2}, reducedim=3, backend=backend_gen())

            api.set_data(data)

            api.do_ica()

            self.assertEqual(api.mixing_.shape, (3, 7))
            self.assertEqual(api.unmixing_.shape, (7, 3))

            api.do_mvarica()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            api.set_data(data)

            api.fit_var()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (3, 3, 512, (l - 100) // 50))

            tfc1 = api.get_tf_connectivity("PDC", 100, 5, baseline=None)  # no baseline
            tfc2 = api.get_tf_connectivity("PDC", 100, 5, baseline=[110, -10])  # invalid baseline
            tfc3 = api.get_tf_connectivity("PDC", 100, 5, baseline=[0, 0])  # one-window baseline
            tfc4 = tfc1 - tfc1[:, :, :, [0]]
            tfc5 = api.get_tf_connectivity("PDC", 100, 5, baseline=[-np.inf, np.inf])  # full trial baseline
            tfc6 = tfc1 - np.mean(tfc1, axis=3, keepdims=True)
            self.assertTrue(np.allclose(tfc1, tfc2))
            self.assertTrue(np.allclose(tfc3, tfc4))
            self.assertTrue(np.allclose(tfc5, tfc6, rtol=1e-05, atol=1e-06))

            api.set_data(data, cl)

            self.assertFalse(np.any(np.isnan(api.data_)))
            self.assertFalse(np.any(np.isinf(api.data_)))

            api.do_cspvarica()

            self.assertEqual(api.get_connectivity("S").shape, (3, 3, 512))

            self.assertFalse(np.any(np.isnan(api.activations_)))
            self.assertFalse(np.any(np.isinf(api.activations_)))

            for c in np.unique(cl):
                api.set_used_labels([c])

                api.fit_var()
                fc = api.get_connectivity("S")
                self.assertEqual(fc.shape, (3, 3, 512))

                tfc = api.get_tf_connectivity("S", 100, 50)
                self.assertEqual(tfc.shape, (3, 3, 512, (l - 100) // 50))

            api.set_data(data)
            api.remove_sources([0, 2])
            api.fit_var()
            self.assertEqual(api.get_connectivity("S").shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (1, 1, 512, (l - 100) // 50))

            try:
                api.optimize_var()
            except NotImplementedError:
                pass
            api.fit_var()
            self.assertEqual(api.get_connectivity("S").shape, (1, 1, 512))
            self.assertEqual(api.get_tf_connectivity("S", 100, 50).shape, (1, 1, 512, (l - 100) // 50))